content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.extensions import ExtensionArray\nfrom pandas.core.internals.blocks import EABackedBlock\n\n\nclass BaseConstructorsTests:\n def test_from_sequence_from_cls(self, data):\n result = type(data)._from_sequence(data, dtype=data.dtype)\n tm.assert_extension_array_equal(result, data)\n\n data = data[:0]\n result = type(data)._from_sequence(data, dtype=data.dtype)\n tm.assert_extension_array_equal(result, data)\n\n def test_array_from_scalars(self, data):\n scalars = [data[0], data[1], data[2]]\n result = data._from_sequence(scalars, dtype=data.dtype)\n assert isinstance(result, type(data))\n\n def test_series_constructor(self, data):\n result = pd.Series(data, copy=False)\n assert result.dtype == data.dtype\n assert len(result) == len(data)\n if hasattr(result._mgr, "blocks"):\n assert isinstance(result._mgr.blocks[0], EABackedBlock)\n assert result._mgr.array is data\n\n # Series[EA] is unboxed / boxed correctly\n result2 = pd.Series(result)\n assert result2.dtype == data.dtype\n if hasattr(result._mgr, "blocks"):\n assert isinstance(result2._mgr.blocks[0], EABackedBlock)\n\n def test_series_constructor_no_data_with_index(self, dtype, na_value):\n result = pd.Series(index=[1, 2, 3], dtype=dtype)\n expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n # GH 33559 - empty index\n result = pd.Series(index=[], dtype=dtype)\n expected = pd.Series([], index=pd.Index([], dtype="object"), dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n def test_series_constructor_scalar_na_with_index(self, dtype, na_value):\n result = pd.Series(na_value, index=[1, 2, 3], dtype=dtype)\n expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n def test_series_constructor_scalar_with_index(self, data, dtype):\n scalar = data[0]\n result = pd.Series(scalar, index=[1, 2, 3], dtype=dtype)\n expected = pd.Series([scalar] * 3, index=[1, 2, 3], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n result = pd.Series(scalar, index=["foo"], dtype=dtype)\n expected = pd.Series([scalar], index=["foo"], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("from_series", [True, False])\n def test_dataframe_constructor_from_dict(self, data, from_series):\n if from_series:\n data = pd.Series(data)\n result = pd.DataFrame({"A": data})\n assert result.dtypes["A"] == data.dtype\n assert result.shape == (len(data), 1)\n if hasattr(result._mgr, "blocks"):\n assert isinstance(result._mgr.blocks[0], EABackedBlock)\n assert isinstance(result._mgr.arrays[0], ExtensionArray)\n\n def test_dataframe_from_series(self, data):\n result = pd.DataFrame(pd.Series(data))\n assert result.dtypes[0] == data.dtype\n assert result.shape == (len(data), 1)\n if hasattr(result._mgr, "blocks"):\n assert isinstance(result._mgr.blocks[0], EABackedBlock)\n assert isinstance(result._mgr.arrays[0], ExtensionArray)\n\n def test_series_given_mismatched_index_raises(self, data):\n msg = r"Length of values \(3\) does not match length of index \(5\)"\n with pytest.raises(ValueError, match=msg):\n pd.Series(data[:3], index=[0, 1, 2, 3, 4])\n\n def test_from_dtype(self, data):\n # construct from our dtype & string dtype\n dtype = data.dtype\n\n expected = pd.Series(data)\n result = pd.Series(list(data), dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n result = pd.Series(list(data), dtype=str(dtype))\n tm.assert_series_equal(result, expected)\n\n # gh-30280\n\n expected = pd.DataFrame(data).astype(dtype)\n result = pd.DataFrame(list(data), dtype=dtype)\n tm.assert_frame_equal(result, expected)\n\n result = pd.DataFrame(list(data), dtype=str(dtype))\n tm.assert_frame_equal(result, expected)\n\n def test_pandas_array(self, data):\n # pd.array(extension_array) should be idempotent...\n result = pd.array(data)\n tm.assert_extension_array_equal(result, data)\n\n def test_pandas_array_dtype(self, data):\n # ... but specifying dtype will override idempotency\n result = pd.array(data, dtype=np.dtype(object))\n expected = pd.arrays.NumpyExtensionArray(np.asarray(data, dtype=object))\n tm.assert_equal(result, expected)\n\n def test_construct_empty_dataframe(self, dtype):\n # GH 33623\n result = pd.DataFrame(columns=["a"], dtype=dtype)\n expected = pd.DataFrame(\n {"a": pd.array([], dtype=dtype)}, index=pd.RangeIndex(0)\n )\n tm.assert_frame_equal(result, expected)\n\n def test_empty(self, dtype):\n cls = dtype.construct_array_type()\n result = cls._empty((4,), dtype=dtype)\n assert isinstance(result, cls)\n assert result.dtype == dtype\n assert result.shape == (4,)\n\n # GH#19600 method on ExtensionDtype\n result2 = dtype.empty((4,))\n assert isinstance(result2, cls)\n assert result2.dtype == dtype\n assert result2.shape == (4,)\n\n result2 = dtype.empty(4)\n assert isinstance(result2, cls)\n assert result2.dtype == dtype\n assert result2.shape == (4,)\n
.venv\Lib\site-packages\pandas\tests\extension\base\constructors.py
constructors.py
Python
5,609
0.95
0.140845
0.069565
vue-tools
627
2024-07-25T13:41:38.937437
BSD-3-Clause
true
abca279b5e2a5052c265445398ff713b
"""\nTests for 2D compatibility.\n"""\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.missing import is_matching_na\n\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_integer_dtype,\n)\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE\n\n\nclass Dim2CompatTests:\n # Note: these are ONLY for ExtensionArray subclasses that support 2D arrays.\n # i.e. not for pyarrow-backed EAs.\n\n @pytest.fixture(autouse=True)\n def skip_if_doesnt_support_2d(self, dtype, request):\n if not dtype._supports_2d:\n node = request.node\n # In cases where we are mixed in to ExtensionTests, we only want to\n # skip tests that are defined in Dim2CompatTests\n test_func = node._obj\n if test_func.__qualname__.startswith("Dim2CompatTests"):\n # TODO: is there a less hacky way of checking this?\n pytest.skip(f"{dtype} does not support 2D.")\n\n def test_transpose(self, data):\n arr2d = data.repeat(2).reshape(-1, 2)\n shape = arr2d.shape\n assert shape[0] != shape[-1] # otherwise the rest of the test is useless\n\n assert arr2d.T.shape == shape[::-1]\n\n def test_frame_from_2d_array(self, data):\n arr2d = data.repeat(2).reshape(-1, 2)\n\n df = pd.DataFrame(arr2d)\n expected = pd.DataFrame({0: arr2d[:, 0], 1: arr2d[:, 1]})\n tm.assert_frame_equal(df, expected)\n\n def test_swapaxes(self, data):\n arr2d = data.repeat(2).reshape(-1, 2)\n\n result = arr2d.swapaxes(0, 1)\n expected = arr2d.T\n tm.assert_extension_array_equal(result, expected)\n\n def test_delete_2d(self, data):\n arr2d = data.repeat(3).reshape(-1, 3)\n\n # axis = 0\n result = arr2d.delete(1, axis=0)\n expected = data.delete(1).repeat(3).reshape(-1, 3)\n tm.assert_extension_array_equal(result, expected)\n\n # axis = 1\n result = arr2d.delete(1, axis=1)\n expected = data.repeat(2).reshape(-1, 2)\n tm.assert_extension_array_equal(result, expected)\n\n def test_take_2d(self, data):\n arr2d = data.reshape(-1, 1)\n\n result = arr2d.take([0, 0, -1], axis=0)\n\n expected = data.take([0, 0, -1]).reshape(-1, 1)\n tm.assert_extension_array_equal(result, expected)\n\n def test_repr_2d(self, data):\n # this could fail in a corner case where an element contained the name\n res = repr(data.reshape(1, -1))\n assert res.count(f"<{type(data).__name__}") == 1\n\n res = repr(data.reshape(-1, 1))\n assert res.count(f"<{type(data).__name__}") == 1\n\n def test_reshape(self, data):\n arr2d = data.reshape(-1, 1)\n assert arr2d.shape == (data.size, 1)\n assert len(arr2d) == len(data)\n\n arr2d = data.reshape((-1, 1))\n assert arr2d.shape == (data.size, 1)\n assert len(arr2d) == len(data)\n\n with pytest.raises(ValueError):\n data.reshape((data.size, 2))\n with pytest.raises(ValueError):\n data.reshape(data.size, 2)\n\n def test_getitem_2d(self, data):\n arr2d = data.reshape(1, -1)\n\n result = arr2d[0]\n tm.assert_extension_array_equal(result, data)\n\n with pytest.raises(IndexError):\n arr2d[1]\n\n with pytest.raises(IndexError):\n arr2d[-2]\n\n result = arr2d[:]\n tm.assert_extension_array_equal(result, arr2d)\n\n result = arr2d[:, :]\n tm.assert_extension_array_equal(result, arr2d)\n\n result = arr2d[:, 0]\n expected = data[[0]]\n tm.assert_extension_array_equal(result, expected)\n\n # dimension-expanding getitem on 1D\n result = data[:, np.newaxis]\n tm.assert_extension_array_equal(result, arr2d.T)\n\n def test_iter_2d(self, data):\n arr2d = data.reshape(1, -1)\n\n objs = list(iter(arr2d))\n assert len(objs) == arr2d.shape[0]\n\n for obj in objs:\n assert isinstance(obj, type(data))\n assert obj.dtype == data.dtype\n assert obj.ndim == 1\n assert len(obj) == arr2d.shape[1]\n\n def test_tolist_2d(self, data):\n arr2d = data.reshape(1, -1)\n\n result = arr2d.tolist()\n expected = [data.tolist()]\n\n assert isinstance(result, list)\n assert all(isinstance(x, list) for x in result)\n\n assert result == expected\n\n def test_concat_2d(self, data):\n left = type(data)._concat_same_type([data, data]).reshape(-1, 2)\n right = left.copy()\n\n # axis=0\n result = left._concat_same_type([left, right], axis=0)\n expected = data._concat_same_type([data] * 4).reshape(-1, 2)\n tm.assert_extension_array_equal(result, expected)\n\n # axis=1\n result = left._concat_same_type([left, right], axis=1)\n assert result.shape == (len(data), 4)\n tm.assert_extension_array_equal(result[:, :2], left)\n tm.assert_extension_array_equal(result[:, 2:], right)\n\n # axis > 1 -> invalid\n msg = "axis 2 is out of bounds for array of dimension 2"\n with pytest.raises(ValueError, match=msg):\n left._concat_same_type([left, right], axis=2)\n\n @pytest.mark.parametrize("method", ["backfill", "pad"])\n def test_fillna_2d_method(self, data_missing, method):\n # pad_or_backfill is always along axis=0\n arr = data_missing.repeat(2).reshape(2, 2)\n assert arr[0].isna().all()\n assert not arr[1].isna().any()\n\n result = arr._pad_or_backfill(method=method, limit=None)\n\n expected = data_missing._pad_or_backfill(method=method).repeat(2).reshape(2, 2)\n tm.assert_extension_array_equal(result, expected)\n\n # Reverse so that backfill is not a no-op.\n arr2 = arr[::-1]\n assert not arr2[0].isna().any()\n assert arr2[1].isna().all()\n\n result2 = arr2._pad_or_backfill(method=method, limit=None)\n\n expected2 = (\n data_missing[::-1]._pad_or_backfill(method=method).repeat(2).reshape(2, 2)\n )\n tm.assert_extension_array_equal(result2, expected2)\n\n @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])\n def test_reductions_2d_axis_none(self, data, method):\n arr2d = data.reshape(1, -1)\n\n err_expected = None\n err_result = None\n try:\n expected = getattr(data, method)()\n except Exception as err:\n # if the 1D reduction is invalid, the 2D reduction should be as well\n err_expected = err\n try:\n result = getattr(arr2d, method)(axis=None)\n except Exception as err2:\n err_result = err2\n\n else:\n result = getattr(arr2d, method)(axis=None)\n\n if err_result is not None or err_expected is not None:\n assert type(err_result) == type(err_expected)\n return\n\n assert is_matching_na(result, expected) or result == expected\n\n @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])\n @pytest.mark.parametrize("min_count", [0, 1])\n def test_reductions_2d_axis0(self, data, method, min_count):\n if min_count == 1 and method not in ["sum", "prod"]:\n pytest.skip(f"min_count not relevant for {method}")\n\n arr2d = data.reshape(1, -1)\n\n kwargs = {}\n if method in ["std", "var"]:\n # pass ddof=0 so we get all-zero std instead of all-NA std\n kwargs["ddof"] = 0\n elif method in ["prod", "sum"]:\n kwargs["min_count"] = min_count\n\n try:\n result = getattr(arr2d, method)(axis=0, **kwargs)\n except Exception as err:\n try:\n getattr(data, method)()\n except Exception as err2:\n assert type(err) == type(err2)\n return\n else:\n raise AssertionError("Both reductions should raise or neither")\n\n def get_reduction_result_dtype(dtype):\n # windows and 32bit builds will in some cases have int32/uint32\n # where other builds will have int64/uint64.\n if dtype.itemsize == 8:\n return dtype\n elif dtype.kind in "ib":\n return NUMPY_INT_TO_DTYPE[np.dtype(int)]\n else:\n # i.e. dtype.kind == "u"\n return NUMPY_INT_TO_DTYPE[np.dtype("uint")]\n\n if method in ["sum", "prod"]:\n # std and var are not dtype-preserving\n expected = data\n if data.dtype.kind in "iub":\n dtype = get_reduction_result_dtype(data.dtype)\n expected = data.astype(dtype)\n assert dtype == expected.dtype\n\n if min_count == 0:\n fill_value = 1 if method == "prod" else 0\n expected = expected.fillna(fill_value)\n\n tm.assert_extension_array_equal(result, expected)\n elif method == "median":\n # std and var are not dtype-preserving\n expected = data\n tm.assert_extension_array_equal(result, expected)\n elif method in ["mean", "std", "var"]:\n if is_integer_dtype(data) or is_bool_dtype(data):\n data = data.astype("Float64")\n if method == "mean":\n tm.assert_extension_array_equal(result, data)\n else:\n tm.assert_extension_array_equal(result, data - data)\n\n @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])\n def test_reductions_2d_axis1(self, data, method):\n arr2d = data.reshape(1, -1)\n\n try:\n result = getattr(arr2d, method)(axis=1)\n except Exception as err:\n try:\n getattr(data, method)()\n except Exception as err2:\n assert type(err) == type(err2)\n return\n else:\n raise AssertionError("Both reductions should raise or neither")\n\n # not necessarily type/dtype-preserving, so weaker assertions\n assert result.shape == (1,)\n expected_scalar = getattr(data, method)()\n res = result[0]\n assert is_matching_na(res, expected_scalar) or res == expected_scalar\n\n\nclass NDArrayBacked2DTests(Dim2CompatTests):\n # More specific tests for NDArrayBackedExtensionArray subclasses\n\n def test_copy_order(self, data):\n # We should be matching numpy semantics for the "order" keyword in 'copy'\n arr2d = data.repeat(2).reshape(-1, 2)\n assert arr2d._ndarray.flags["C_CONTIGUOUS"]\n\n res = arr2d.copy()\n assert res._ndarray.flags["C_CONTIGUOUS"]\n\n res = arr2d[::2, ::2].copy()\n assert res._ndarray.flags["C_CONTIGUOUS"]\n\n res = arr2d.copy("F")\n assert not res._ndarray.flags["C_CONTIGUOUS"]\n assert res._ndarray.flags["F_CONTIGUOUS"]\n\n res = arr2d.copy("K")\n assert res._ndarray.flags["C_CONTIGUOUS"]\n\n res = arr2d.T.copy("K")\n assert not res._ndarray.flags["C_CONTIGUOUS"]\n assert res._ndarray.flags["F_CONTIGUOUS"]\n\n # order not accepted by numpy\n msg = r"order must be one of 'C', 'F', 'A', or 'K' \(got 'Q'\)"\n with pytest.raises(ValueError, match=msg):\n arr2d.copy("Q")\n\n # neither contiguity\n arr_nc = arr2d[::2]\n assert not arr_nc._ndarray.flags["C_CONTIGUOUS"]\n assert not arr_nc._ndarray.flags["F_CONTIGUOUS"]\n\n assert arr_nc.copy()._ndarray.flags["C_CONTIGUOUS"]\n assert not arr_nc.copy()._ndarray.flags["F_CONTIGUOUS"]\n\n assert arr_nc.copy("C")._ndarray.flags["C_CONTIGUOUS"]\n assert not arr_nc.copy("C")._ndarray.flags["F_CONTIGUOUS"]\n\n assert not arr_nc.copy("F")._ndarray.flags["C_CONTIGUOUS"]\n assert arr_nc.copy("F")._ndarray.flags["F_CONTIGUOUS"]\n\n assert arr_nc.copy("K")._ndarray.flags["C_CONTIGUOUS"]\n assert not arr_nc.copy("K")._ndarray.flags["F_CONTIGUOUS"]\n
.venv\Lib\site-packages\pandas\tests\extension\base\dim2.py
dim2.py
Python
11,992
0.95
0.13913
0.097378
react-lib
362
2024-12-02T19:06:16.946996
Apache-2.0
true
b0a423ecf0538046ae4e01dc27eeb270
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.types import (\n infer_dtype,\n is_object_dtype,\n is_string_dtype,\n)\n\n\nclass BaseDtypeTests:\n """Base class for ExtensionDtype classes"""\n\n def test_name(self, dtype):\n assert isinstance(dtype.name, str)\n\n def test_kind(self, dtype):\n valid = set("biufcmMOSUV")\n assert dtype.kind in valid\n\n def test_is_dtype_from_name(self, dtype):\n result = type(dtype).is_dtype(dtype.name)\n assert result is True\n\n def test_is_dtype_unboxes_dtype(self, data, dtype):\n assert dtype.is_dtype(data) is True\n\n def test_is_dtype_from_self(self, dtype):\n result = type(dtype).is_dtype(dtype)\n assert result is True\n\n def test_is_dtype_other_input(self, dtype):\n assert dtype.is_dtype([1, 2, 3]) is False\n\n def test_is_not_string_type(self, dtype):\n assert not is_string_dtype(dtype)\n\n def test_is_not_object_type(self, dtype):\n assert not is_object_dtype(dtype)\n\n def test_eq_with_str(self, dtype):\n assert dtype == dtype.name\n assert dtype != dtype.name + "-suffix"\n\n def test_eq_with_numpy_object(self, dtype):\n assert dtype != np.dtype("object")\n\n def test_eq_with_self(self, dtype):\n assert dtype == dtype\n assert dtype != object()\n\n def test_array_type(self, data, dtype):\n assert dtype.construct_array_type() is type(data)\n\n def test_check_dtype(self, data):\n dtype = data.dtype\n\n # check equivalency for using .dtypes\n df = pd.DataFrame(\n {\n "A": pd.Series(data, dtype=dtype),\n "B": data,\n "C": pd.Series(["foo"] * len(data), dtype=object),\n "D": 1,\n }\n )\n result = df.dtypes == str(dtype)\n assert np.dtype("int64") != "Int64"\n\n expected = pd.Series([True, True, False, False], index=list("ABCD"))\n\n tm.assert_series_equal(result, expected)\n\n expected = pd.Series([True, True, False, False], index=list("ABCD"))\n result = df.dtypes.apply(str) == str(dtype)\n tm.assert_series_equal(result, expected)\n\n def test_hashable(self, dtype):\n hash(dtype) # no error\n\n def test_str(self, dtype):\n assert str(dtype) == dtype.name\n\n def test_eq(self, dtype):\n assert dtype == dtype.name\n assert dtype != "anonther_type"\n\n def test_construct_from_string_own_name(self, dtype):\n result = dtype.construct_from_string(dtype.name)\n assert type(result) is type(dtype)\n\n # check OK as classmethod\n result = type(dtype).construct_from_string(dtype.name)\n assert type(result) is type(dtype)\n\n def test_construct_from_string_another_type_raises(self, dtype):\n msg = f"Cannot construct a '{type(dtype).__name__}' from 'another_type'"\n with pytest.raises(TypeError, match=msg):\n type(dtype).construct_from_string("another_type")\n\n def test_construct_from_string_wrong_type_raises(self, dtype):\n with pytest.raises(\n TypeError,\n match="'construct_from_string' expects a string, got <class 'int'>",\n ):\n type(dtype).construct_from_string(0)\n\n def test_get_common_dtype(self, dtype):\n # in practice we will not typically call this with a 1-length list\n # (we shortcut to just use that dtype as the common dtype), but\n # still testing as good practice to have this working (and it is the\n # only case we can test in general)\n assert dtype._get_common_dtype([dtype]) == dtype\n\n @pytest.mark.parametrize("skipna", [True, False])\n def test_infer_dtype(self, data, data_missing, skipna):\n # only testing that this works without raising an error\n res = infer_dtype(data, skipna=skipna)\n assert isinstance(res, str)\n res = infer_dtype(data_missing, skipna=skipna)\n assert isinstance(res, str)\n
.venv\Lib\site-packages\pandas\tests\extension\base\dtype.py
dtype.py
Python
4,006
0.95
0.211382
0.074468
react-lib
716
2025-04-05T22:40:50.173833
GPL-3.0
true
6c0f9f7039de1dac51a7824a28a529f2
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass BaseGetitemTests:\n """Tests for ExtensionArray.__getitem__."""\n\n def test_iloc_series(self, data):\n ser = pd.Series(data)\n result = ser.iloc[:4]\n expected = pd.Series(data[:4])\n tm.assert_series_equal(result, expected)\n\n result = ser.iloc[[0, 1, 2, 3]]\n tm.assert_series_equal(result, expected)\n\n def test_iloc_frame(self, data):\n df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")})\n expected = pd.DataFrame({"A": data[:4]})\n\n # slice -> frame\n result = df.iloc[:4, [0]]\n tm.assert_frame_equal(result, expected)\n\n # sequence -> frame\n result = df.iloc[[0, 1, 2, 3], [0]]\n tm.assert_frame_equal(result, expected)\n\n expected = pd.Series(data[:4], name="A")\n\n # slice -> series\n result = df.iloc[:4, 0]\n tm.assert_series_equal(result, expected)\n\n # sequence -> series\n result = df.iloc[:4, 0]\n tm.assert_series_equal(result, expected)\n\n # GH#32959 slice columns with step\n result = df.iloc[:, ::2]\n tm.assert_frame_equal(result, df[["A"]])\n result = df[["B", "A"]].iloc[:, ::2]\n tm.assert_frame_equal(result, df[["B"]])\n\n def test_iloc_frame_single_block(self, data):\n # GH#32959 null slice along index, slice along columns with single-block\n df = pd.DataFrame({"A": data})\n\n result = df.iloc[:, :]\n tm.assert_frame_equal(result, df)\n\n result = df.iloc[:, :1]\n tm.assert_frame_equal(result, df)\n\n result = df.iloc[:, :2]\n tm.assert_frame_equal(result, df)\n\n result = df.iloc[:, ::2]\n tm.assert_frame_equal(result, df)\n\n result = df.iloc[:, 1:2]\n tm.assert_frame_equal(result, df.iloc[:, :0])\n\n result = df.iloc[:, -1:]\n tm.assert_frame_equal(result, df)\n\n def test_loc_series(self, data):\n ser = pd.Series(data)\n result = ser.loc[:3]\n expected = pd.Series(data[:4])\n tm.assert_series_equal(result, expected)\n\n result = ser.loc[[0, 1, 2, 3]]\n tm.assert_series_equal(result, expected)\n\n def test_loc_frame(self, data):\n df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")})\n expected = pd.DataFrame({"A": data[:4]})\n\n # slice -> frame\n result = df.loc[:3, ["A"]]\n tm.assert_frame_equal(result, expected)\n\n # sequence -> frame\n result = df.loc[[0, 1, 2, 3], ["A"]]\n tm.assert_frame_equal(result, expected)\n\n expected = pd.Series(data[:4], name="A")\n\n # slice -> series\n result = df.loc[:3, "A"]\n tm.assert_series_equal(result, expected)\n\n # sequence -> series\n result = df.loc[:3, "A"]\n tm.assert_series_equal(result, expected)\n\n def test_loc_iloc_frame_single_dtype(self, data):\n # GH#27110 bug in ExtensionBlock.iget caused df.iloc[n] to incorrectly\n # return a scalar\n df = pd.DataFrame({"A": data})\n expected = pd.Series([data[2]], index=["A"], name=2, dtype=data.dtype)\n\n result = df.loc[2]\n tm.assert_series_equal(result, expected)\n\n expected = pd.Series(\n [data[-1]], index=["A"], name=len(data) - 1, dtype=data.dtype\n )\n result = df.iloc[-1]\n tm.assert_series_equal(result, expected)\n\n def test_getitem_scalar(self, data):\n result = data[0]\n assert isinstance(result, data.dtype.type)\n\n result = pd.Series(data)[0]\n assert isinstance(result, data.dtype.type)\n\n def test_getitem_invalid(self, data):\n # TODO: box over scalar, [scalar], (scalar,)?\n\n msg = (\n r"only integers, slices \(`:`\), ellipsis \(`...`\), numpy.newaxis "\n r"\(`None`\) and integer or boolean arrays are valid indices"\n )\n with pytest.raises(IndexError, match=msg):\n data["foo"]\n with pytest.raises(IndexError, match=msg):\n data[2.5]\n\n ub = len(data)\n msg = "|".join(\n [\n "list index out of range", # json\n "index out of bounds", # pyarrow\n "Out of bounds access", # Sparse\n f"loc must be an integer between -{ub} and {ub}", # Sparse\n f"index {ub+1} is out of bounds for axis 0 with size {ub}",\n f"index -{ub+1} is out of bounds for axis 0 with size {ub}",\n ]\n )\n with pytest.raises(IndexError, match=msg):\n data[ub + 1]\n with pytest.raises(IndexError, match=msg):\n data[-ub - 1]\n\n def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):\n result = data_missing[0]\n assert na_cmp(result, na_value)\n\n def test_getitem_empty(self, data):\n # Indexing with empty list\n result = data[[]]\n assert len(result) == 0\n assert isinstance(result, type(data))\n\n expected = data[np.array([], dtype="int64")]\n tm.assert_extension_array_equal(result, expected)\n\n def test_getitem_mask(self, data):\n # Empty mask, raw array\n mask = np.zeros(len(data), dtype=bool)\n result = data[mask]\n assert len(result) == 0\n assert isinstance(result, type(data))\n\n # Empty mask, in series\n mask = np.zeros(len(data), dtype=bool)\n result = pd.Series(data)[mask]\n assert len(result) == 0\n assert result.dtype == data.dtype\n\n # non-empty mask, raw array\n mask[0] = True\n result = data[mask]\n assert len(result) == 1\n assert isinstance(result, type(data))\n\n # non-empty mask, in series\n result = pd.Series(data)[mask]\n assert len(result) == 1\n assert result.dtype == data.dtype\n\n def test_getitem_mask_raises(self, data):\n mask = np.array([True, False])\n msg = f"Boolean index has wrong length: 2 instead of {len(data)}"\n with pytest.raises(IndexError, match=msg):\n data[mask]\n\n mask = pd.array(mask, dtype="boolean")\n with pytest.raises(IndexError, match=msg):\n data[mask]\n\n def test_getitem_boolean_array_mask(self, data):\n mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")\n result = data[mask]\n assert len(result) == 0\n assert isinstance(result, type(data))\n\n result = pd.Series(data)[mask]\n assert len(result) == 0\n assert result.dtype == data.dtype\n\n mask[:5] = True\n expected = data.take([0, 1, 2, 3, 4])\n result = data[mask]\n tm.assert_extension_array_equal(result, expected)\n\n expected = pd.Series(expected)\n result = pd.Series(data)[mask]\n tm.assert_series_equal(result, expected)\n\n def test_getitem_boolean_na_treated_as_false(self, data):\n # https://github.com/pandas-dev/pandas/issues/31503\n mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")\n mask[:2] = pd.NA\n mask[2:4] = True\n\n result = data[mask]\n expected = data[mask.fillna(False)]\n\n tm.assert_extension_array_equal(result, expected)\n\n s = pd.Series(data)\n\n result = s[mask]\n expected = s[mask.fillna(False)]\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "idx",\n [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],\n ids=["list", "integer-array", "numpy-array"],\n )\n def test_getitem_integer_array(self, data, idx):\n result = data[idx]\n assert len(result) == 3\n assert isinstance(result, type(data))\n expected = data.take([0, 1, 2])\n tm.assert_extension_array_equal(result, expected)\n\n expected = pd.Series(expected)\n result = pd.Series(data)[idx]\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "idx",\n [[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")],\n ids=["list", "integer-array"],\n )\n def test_getitem_integer_with_missing_raises(self, data, idx):\n msg = "Cannot index with an integer indexer containing NA values"\n with pytest.raises(ValueError, match=msg):\n data[idx]\n\n @pytest.mark.xfail(\n reason="Tries label-based and raises KeyError; "\n "in some cases raises when calling np.asarray"\n )\n @pytest.mark.parametrize(\n "idx",\n [[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")],\n ids=["list", "integer-array"],\n )\n def test_getitem_series_integer_with_missing_raises(self, data, idx):\n msg = "Cannot index with an integer indexer containing NA values"\n # TODO: this raises KeyError about labels not found (it tries label-based)\n\n ser = pd.Series(data, index=[chr(100 + i) for i in range(len(data))])\n with pytest.raises(ValueError, match=msg):\n ser[idx]\n\n def test_getitem_slice(self, data):\n # getitem[slice] should return an array\n result = data[slice(0)] # empty\n assert isinstance(result, type(data))\n\n result = data[slice(1)] # scalar\n assert isinstance(result, type(data))\n\n def test_getitem_ellipsis_and_slice(self, data):\n # GH#40353 this is called from slice_block_rows\n result = data[..., :]\n tm.assert_extension_array_equal(result, data)\n\n result = data[:, ...]\n tm.assert_extension_array_equal(result, data)\n\n result = data[..., :3]\n tm.assert_extension_array_equal(result, data[:3])\n\n result = data[:3, ...]\n tm.assert_extension_array_equal(result, data[:3])\n\n result = data[..., ::2]\n tm.assert_extension_array_equal(result, data[::2])\n\n result = data[::2, ...]\n tm.assert_extension_array_equal(result, data[::2])\n\n def test_get(self, data):\n # GH 20882\n s = pd.Series(data, index=[2 * i for i in range(len(data))])\n assert s.get(4) == s.iloc[2]\n\n result = s.get([4, 6])\n expected = s.iloc[[2, 3]]\n tm.assert_series_equal(result, expected)\n\n result = s.get(slice(2))\n expected = s.iloc[[0, 1]]\n tm.assert_series_equal(result, expected)\n\n assert s.get(-1) is None\n assert s.get(s.index.max() + 1) is None\n\n s = pd.Series(data[:6], index=list("abcdef"))\n assert s.get("c") == s.iloc[2]\n\n result = s.get(slice("b", "d"))\n expected = s.iloc[[1, 2, 3]]\n tm.assert_series_equal(result, expected)\n\n result = s.get("Z")\n assert result is None\n\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert s.get(4) == s.iloc[4]\n assert s.get(-1) == s.iloc[-1]\n assert s.get(len(s)) is None\n\n # GH 21257\n s = pd.Series(data)\n with tm.assert_produces_warning(None):\n # GH#45324 make sure we aren't giving a spurious FutureWarning\n s2 = s[::2]\n assert s2.get(1) is None\n\n def test_take_sequence(self, data):\n result = pd.Series(data)[[0, 1, 3]]\n assert result.iloc[0] == data[0]\n assert result.iloc[1] == data[1]\n assert result.iloc[2] == data[3]\n\n def test_take(self, data, na_value, na_cmp):\n result = data.take([0, -1])\n assert result.dtype == data.dtype\n assert result[0] == data[0]\n assert result[1] == data[-1]\n\n result = data.take([0, -1], allow_fill=True, fill_value=na_value)\n assert result[0] == data[0]\n assert na_cmp(result[1], na_value)\n\n with pytest.raises(IndexError, match="out of bounds"):\n data.take([len(data) + 1])\n\n def test_take_empty(self, data, na_value, na_cmp):\n empty = data[:0]\n\n result = empty.take([-1], allow_fill=True)\n assert na_cmp(result[0], na_value)\n\n msg = "cannot do a non-empty take from an empty axes|out of bounds"\n\n with pytest.raises(IndexError, match=msg):\n empty.take([-1])\n\n with pytest.raises(IndexError, match="cannot do a non-empty take"):\n empty.take([0, 1])\n\n def test_take_negative(self, data):\n # https://github.com/pandas-dev/pandas/issues/20640\n n = len(data)\n result = data.take([0, -n, n - 1, -1])\n expected = data.take([0, 0, n - 1, n - 1])\n tm.assert_extension_array_equal(result, expected)\n\n def test_take_non_na_fill_value(self, data_missing):\n fill_value = data_missing[1] # valid\n na = data_missing[0]\n\n arr = data_missing._from_sequence(\n [na, fill_value, na], dtype=data_missing.dtype\n )\n result = arr.take([-1, 1], fill_value=fill_value, allow_fill=True)\n expected = arr.take([1, 1])\n tm.assert_extension_array_equal(result, expected)\n\n def test_take_pandas_style_negative_raises(self, data, na_value):\n with pytest.raises(ValueError, match=""):\n data.take([0, -2], fill_value=na_value, allow_fill=True)\n\n @pytest.mark.parametrize("allow_fill", [True, False])\n def test_take_out_of_bounds_raises(self, data, allow_fill):\n arr = data[:3]\n\n with pytest.raises(IndexError, match="out of bounds|out-of-bounds"):\n arr.take(np.asarray([0, 3]), allow_fill=allow_fill)\n\n def test_take_series(self, data):\n s = pd.Series(data)\n result = s.take([0, -1])\n expected = pd.Series(\n data._from_sequence([data[0], data[len(data) - 1]], dtype=s.dtype),\n index=[0, len(data) - 1],\n )\n tm.assert_series_equal(result, expected)\n\n def test_reindex(self, data, na_value):\n s = pd.Series(data)\n result = s.reindex([0, 1, 3])\n expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3])\n tm.assert_series_equal(result, expected)\n\n n = len(data)\n result = s.reindex([-1, 0, n])\n expected = pd.Series(\n data._from_sequence([na_value, data[0], na_value], dtype=s.dtype),\n index=[-1, 0, n],\n )\n tm.assert_series_equal(result, expected)\n\n result = s.reindex([n, n + 1])\n expected = pd.Series(\n data._from_sequence([na_value, na_value], dtype=s.dtype), index=[n, n + 1]\n )\n tm.assert_series_equal(result, expected)\n\n def test_reindex_non_na_fill_value(self, data_missing):\n valid = data_missing[1]\n na = data_missing[0]\n\n arr = data_missing._from_sequence([na, valid], dtype=data_missing.dtype)\n ser = pd.Series(arr)\n result = ser.reindex([0, 1, 2], fill_value=valid)\n expected = pd.Series(\n data_missing._from_sequence([na, valid, valid], dtype=data_missing.dtype)\n )\n\n tm.assert_series_equal(result, expected)\n\n def test_loc_len1(self, data):\n # see GH-27785 take_nd with indexer of len 1 resulting in wrong ndim\n df = pd.DataFrame({"A": data})\n res = df.loc[[0], "A"]\n assert res.ndim == 1\n assert res._mgr.arrays[0].ndim == 1\n if hasattr(res._mgr, "blocks"):\n assert res._mgr._block.ndim == 1\n\n def test_item(self, data):\n # https://github.com/pandas-dev/pandas/pull/30175\n s = pd.Series(data)\n result = s[:1].item()\n assert result == data[0]\n\n msg = "can only convert an array of size 1 to a Python scalar"\n with pytest.raises(ValueError, match=msg):\n s[:0].item()\n\n with pytest.raises(ValueError, match=msg):\n s.item()\n
.venv\Lib\site-packages\pandas\tests\extension\base\getitem.py
getitem.py
Python
15,673
0.95
0.083156
0.076294
node-utils
976
2024-06-17T23:16:02.483015
GPL-3.0
true
ca71227b8cda0c800371b1e56f8a6a20
import re\n\nimport pytest\n\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_numeric_dtype,\n is_object_dtype,\n is_string_dtype,\n)\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\n@pytest.mark.filterwarnings(\n "ignore:The default of observed=False is deprecated:FutureWarning"\n)\nclass BaseGroupbyTests:\n """Groupby-specific tests."""\n\n def test_grouping_grouper(self, data_for_grouping):\n df = pd.DataFrame(\n {\n "A": pd.Series(\n ["B", "B", None, None, "A", "A", "B", "C"], dtype=object\n ),\n "B": data_for_grouping,\n }\n )\n gr1 = df.groupby("A")._grouper.groupings[0]\n gr2 = df.groupby("B")._grouper.groupings[0]\n\n tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values)\n tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping)\n\n @pytest.mark.parametrize("as_index", [True, False])\n def test_groupby_extension_agg(self, as_index, data_for_grouping):\n df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})\n\n is_bool = data_for_grouping.dtype._is_boolean\n if is_bool:\n # only 2 unique values, and the final entry has c==b\n # (see data_for_grouping docstring)\n df = df.iloc[:-1]\n\n result = df.groupby("B", as_index=as_index).A.mean()\n _, uniques = pd.factorize(data_for_grouping, sort=True)\n\n exp_vals = [3.0, 1.0, 4.0]\n if is_bool:\n exp_vals = exp_vals[:-1]\n if as_index:\n index = pd.Index(uniques, name="B")\n expected = pd.Series(exp_vals, index=index, name="A")\n tm.assert_series_equal(result, expected)\n else:\n expected = pd.DataFrame({"B": uniques, "A": exp_vals})\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_agg_extension(self, data_for_grouping):\n # GH#38980 groupby agg on extension type fails for non-numeric types\n df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})\n\n expected = df.iloc[[0, 2, 4, 7]]\n expected = expected.set_index("A")\n\n result = df.groupby("A").agg({"B": "first"})\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("A").agg("first")\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("A").first()\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_extension_no_sort(self, data_for_grouping):\n df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})\n\n is_bool = data_for_grouping.dtype._is_boolean\n if is_bool:\n # only 2 unique values, and the final entry has c==b\n # (see data_for_grouping docstring)\n df = df.iloc[:-1]\n\n result = df.groupby("B", sort=False).A.mean()\n _, index = pd.factorize(data_for_grouping, sort=False)\n\n index = pd.Index(index, name="B")\n exp_vals = [1.0, 3.0, 4.0]\n if is_bool:\n exp_vals = exp_vals[:-1]\n expected = pd.Series(exp_vals, index=index, name="A")\n tm.assert_series_equal(result, expected)\n\n def test_groupby_extension_transform(self, data_for_grouping):\n is_bool = data_for_grouping.dtype._is_boolean\n\n valid = data_for_grouping[~data_for_grouping.isna()]\n df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid})\n is_bool = data_for_grouping.dtype._is_boolean\n if is_bool:\n # only 2 unique values, and the final entry has c==b\n # (see data_for_grouping docstring)\n df = df.iloc[:-1]\n\n result = df.groupby("B").A.transform(len)\n expected = pd.Series([3, 3, 2, 2, 3, 1], name="A")\n if is_bool:\n expected = expected[:-1]\n\n tm.assert_series_equal(result, expected)\n\n def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):\n df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby("B", group_keys=False, observed=False).apply(groupby_apply_op)\n df.groupby("B", group_keys=False, observed=False).A.apply(groupby_apply_op)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby("A", group_keys=False, observed=False).apply(groupby_apply_op)\n df.groupby("A", group_keys=False, observed=False).B.apply(groupby_apply_op)\n\n def test_groupby_apply_identity(self, data_for_grouping):\n df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})\n result = df.groupby("A").B.apply(lambda x: x.array)\n expected = pd.Series(\n [\n df.B.iloc[[0, 1, 6]].array,\n df.B.iloc[[2, 3]].array,\n df.B.iloc[[4, 5]].array,\n df.B.iloc[[7]].array,\n ],\n index=pd.Index([1, 2, 3, 4], name="A"),\n name="B",\n )\n tm.assert_series_equal(result, expected)\n\n def test_in_numeric_groupby(self, data_for_grouping):\n df = pd.DataFrame(\n {\n "A": [1, 1, 2, 2, 3, 3, 1, 4],\n "B": data_for_grouping,\n "C": [1, 1, 1, 1, 1, 1, 1, 1],\n }\n )\n\n dtype = data_for_grouping.dtype\n if (\n is_numeric_dtype(dtype)\n or is_bool_dtype(dtype)\n or dtype.name == "decimal"\n or is_string_dtype(dtype)\n or is_object_dtype(dtype)\n or dtype.kind == "m" # in particular duration[*][pyarrow]\n ):\n expected = pd.Index(["B", "C"])\n result = df.groupby("A").sum().columns\n else:\n expected = pd.Index(["C"])\n\n msg = "|".join(\n [\n # period/datetime\n "does not support sum operations",\n # all others\n re.escape(f"agg function failed [how->sum,dtype->{dtype}"),\n ]\n )\n with pytest.raises(TypeError, match=msg):\n df.groupby("A").sum()\n result = df.groupby("A").sum(numeric_only=True).columns\n tm.assert_index_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\extension\base\groupby.py
groupby.py
Python
6,455
0.95
0.109195
0.062069
react-lib
133
2024-01-07T22:01:38.468760
MIT
true
bdf0cf951264ad738c7f330fd6c6dce7
"""\nTests for Indexes backed by arbitrary ExtensionArrays.\n"""\nimport pandas as pd\n\n\nclass BaseIndexTests:\n """Tests for Index object backed by an ExtensionArray"""\n\n def test_index_from_array(self, data):\n idx = pd.Index(data)\n assert data.dtype == idx.dtype\n\n def test_index_from_listlike_with_dtype(self, data):\n idx = pd.Index(data, dtype=data.dtype)\n assert idx.dtype == data.dtype\n\n idx = pd.Index(list(data), dtype=data.dtype)\n assert idx.dtype == data.dtype\n
.venv\Lib\site-packages\pandas\tests\extension\base\index.py
index.py
Python
517
0.85
0.263158
0
awesome-app
154
2024-03-14T22:05:24.154263
BSD-3-Clause
true
910e1ad1a22c45426a2faec5d584be2a
import warnings\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_version_gt2\n\nfrom pandas.core.dtypes.cast import construct_1d_object_array_from_listlike\nfrom pandas.core.dtypes.common import is_extension_array_dtype\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass BaseInterfaceTests:\n """Tests that the basic interface is satisfied."""\n\n # ------------------------------------------------------------------------\n # Interface\n # ------------------------------------------------------------------------\n\n def test_len(self, data):\n assert len(data) == 100\n\n def test_size(self, data):\n assert data.size == 100\n\n def test_ndim(self, data):\n assert data.ndim == 1\n\n def test_can_hold_na_valid(self, data):\n # GH-20761\n assert data._can_hold_na is True\n\n def test_contains(self, data, data_missing):\n # GH-37867\n # Tests for membership checks. Membership checks for nan-likes is tricky and\n # the settled on rule is: `nan_like in arr` is True if nan_like is\n # arr.dtype.na_value and arr.isna().any() is True. Else the check returns False.\n\n na_value = data.dtype.na_value\n # ensure data without missing values\n data = data[~data.isna()]\n\n # first elements are non-missing\n assert data[0] in data\n assert data_missing[0] in data_missing\n\n # check the presence of na_value\n assert na_value in data_missing\n assert na_value not in data\n\n # the data can never contain other nan-likes than na_value\n for na_value_obj in tm.NULL_OBJECTS:\n if na_value_obj is na_value or type(na_value_obj) == type(na_value):\n # type check for e.g. two instances of Decimal("NAN")\n continue\n assert na_value_obj not in data\n assert na_value_obj not in data_missing\n\n def test_memory_usage(self, data):\n s = pd.Series(data)\n result = s.memory_usage(index=False)\n assert result == s.nbytes\n\n def test_array_interface(self, data):\n result = np.array(data)\n assert result[0] == data[0]\n\n result = np.array(data, dtype=object)\n expected = np.array(list(data), dtype=object)\n if expected.ndim > 1:\n # nested data, explicitly construct as 1D\n expected = construct_1d_object_array_from_listlike(list(data))\n tm.assert_numpy_array_equal(result, expected)\n\n def test_array_interface_copy(self, data):\n result_copy1 = np.array(data, copy=True)\n result_copy2 = np.array(data, copy=True)\n assert not np.may_share_memory(result_copy1, result_copy2)\n\n if not np_version_gt2:\n # copy=False semantics are only supported in NumPy>=2.\n return\n\n warning_raised = False\n msg = "Starting with NumPy 2.0, the behavior of the 'copy' keyword has changed"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter("always")\n result_nocopy1 = np.array(data, copy=False)\n assert len(w) <= 1\n if len(w):\n warning_raised = True\n assert msg in str(w[0].message)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter("always")\n result_nocopy2 = np.array(data, copy=False)\n assert len(w) <= 1\n if len(w):\n warning_raised = True\n assert msg in str(w[0].message)\n\n if not warning_raised:\n # If copy=False was given and did not raise, these must share the same data\n assert np.may_share_memory(result_nocopy1, result_nocopy2)\n\n def test_is_extension_array_dtype(self, data):\n assert is_extension_array_dtype(data)\n assert is_extension_array_dtype(data.dtype)\n assert is_extension_array_dtype(pd.Series(data))\n assert isinstance(data.dtype, ExtensionDtype)\n\n def test_no_values_attribute(self, data):\n # GH-20735: EA's with .values attribute give problems with internal\n # code, disallowing this for now until solved\n assert not hasattr(data, "values")\n assert not hasattr(data, "_values")\n\n def test_is_numeric_honored(self, data):\n result = pd.Series(data)\n if hasattr(result._mgr, "blocks"):\n assert result._mgr.blocks[0].is_numeric is data.dtype._is_numeric\n\n def test_isna_extension_array(self, data_missing):\n # If your `isna` returns an ExtensionArray, you must also implement\n # _reduce. At the *very* least, you must implement any and all\n na = data_missing.isna()\n if is_extension_array_dtype(na):\n assert na._reduce("any")\n assert na.any()\n\n assert not na._reduce("all")\n assert not na.all()\n\n assert na.dtype._is_boolean\n\n def test_copy(self, data):\n # GH#27083 removing deep keyword from EA.copy\n assert data[0] != data[1]\n result = data.copy()\n\n if data.dtype._is_immutable:\n pytest.skip(f"test_copy assumes mutability and {data.dtype} is immutable")\n\n data[1] = data[0]\n assert result[1] != result[0]\n\n def test_view(self, data):\n # view with no dtype should return a shallow copy, *not* the same\n # object\n assert data[1] != data[0]\n\n result = data.view()\n assert result is not data\n assert type(result) == type(data)\n\n if data.dtype._is_immutable:\n pytest.skip(f"test_view assumes mutability and {data.dtype} is immutable")\n\n result[1] = result[0]\n assert data[1] == data[0]\n\n # check specifically that the `dtype` kwarg is accepted\n data.view(dtype=None)\n\n def test_tolist(self, data):\n result = data.tolist()\n expected = list(data)\n assert isinstance(result, list)\n assert result == expected\n
.venv\Lib\site-packages\pandas\tests\extension\base\interface.py
interface.py
Python
5,999
0.95
0.186047
0.180451
awesome-app
297
2024-09-26T16:57:53.213187
BSD-3-Clause
true
80f80fbe23a83f8b54324e1b7f8e0595
from io import StringIO\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.arrays import ExtensionArray\n\n\nclass BaseParsingTests:\n @pytest.mark.parametrize("engine", ["c", "python"])\n def test_EA_types(self, engine, data, request):\n if isinstance(data.dtype, pd.CategoricalDtype):\n # in parsers.pyx _convert_with_dtype there is special-casing for\n # Categorical that pre-empts _from_sequence_of_strings\n pass\n elif isinstance(data.dtype, pd.core.dtypes.dtypes.NumpyEADtype):\n # These get unwrapped internally so are treated as numpy dtypes\n # in the parsers.pyx code\n pass\n elif (\n type(data)._from_sequence_of_strings.__func__\n is ExtensionArray._from_sequence_of_strings.__func__\n ):\n # i.e. the EA hasn't overridden _from_sequence_of_strings\n mark = pytest.mark.xfail(\n reason="_from_sequence_of_strings not implemented",\n raises=NotImplementedError,\n )\n request.node.add_marker(mark)\n\n df = pd.DataFrame({"with_dtype": pd.Series(data, dtype=str(data.dtype))})\n csv_output = df.to_csv(index=False, na_rep=np.nan)\n result = pd.read_csv(\n StringIO(csv_output), dtype={"with_dtype": str(data.dtype)}, engine=engine\n )\n expected = df\n tm.assert_frame_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\extension\base\io.py
io.py
Python
1,475
0.95
0.102564
0.147059
node-utils
334
2024-05-15T16:46:51.083424
BSD-3-Clause
true
635ce3103176d0c6141c9a892a6656f7
import inspect\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas._typing import Dtype\n\nfrom pandas.core.dtypes.common import is_bool_dtype\nfrom pandas.core.dtypes.dtypes import NumpyEADtype\nfrom pandas.core.dtypes.missing import na_value_for_dtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.sorting import nargsort\n\n\nclass BaseMethodsTests:\n """Various Series and DataFrame methods."""\n\n def test_hash_pandas_object(self, data):\n # _hash_pandas_object should return a uint64 ndarray of the same length\n # as the data\n from pandas.core.util.hashing import _default_hash_key\n\n res = data._hash_pandas_object(\n encoding="utf-8", hash_key=_default_hash_key, categorize=False\n )\n assert res.dtype == np.uint64\n assert res.shape == data.shape\n\n def test_value_counts_default_dropna(self, data):\n # make sure we have consistent default dropna kwarg\n if not hasattr(data, "value_counts"):\n pytest.skip(f"value_counts is not implemented for {type(data)}")\n sig = inspect.signature(data.value_counts)\n kwarg = sig.parameters["dropna"]\n assert kwarg.default is True\n\n @pytest.mark.parametrize("dropna", [True, False])\n def test_value_counts(self, all_data, dropna):\n all_data = all_data[:10]\n if dropna:\n other = all_data[~all_data.isna()]\n else:\n other = all_data\n\n result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()\n expected = pd.Series(other).value_counts(dropna=dropna).sort_index()\n\n tm.assert_series_equal(result, expected)\n\n def test_value_counts_with_normalize(self, data):\n # GH 33172\n data = data[:10].unique()\n values = np.array(data[~data.isna()])\n ser = pd.Series(data, dtype=data.dtype)\n\n result = ser.value_counts(normalize=True).sort_index()\n\n if not isinstance(data, pd.Categorical):\n expected = pd.Series(\n [1 / len(values)] * len(values), index=result.index, name="proportion"\n )\n else:\n expected = pd.Series(0.0, index=result.index, name="proportion")\n expected[result > 0] = 1 / len(values)\n\n if isinstance(data.dtype, pd.StringDtype) and data.dtype.na_value is np.nan:\n # TODO: avoid special-casing\n expected = expected.astype("float64")\n elif getattr(data.dtype, "storage", "") == "pyarrow" or isinstance(\n data.dtype, pd.ArrowDtype\n ):\n # TODO: avoid special-casing\n expected = expected.astype("double[pyarrow]")\n elif na_value_for_dtype(data.dtype) is pd.NA:\n # TODO(GH#44692): avoid special-casing\n expected = expected.astype("Float64")\n\n tm.assert_series_equal(result, expected)\n\n def test_count(self, data_missing):\n df = pd.DataFrame({"A": data_missing})\n result = df.count(axis="columns")\n expected = pd.Series([0, 1])\n tm.assert_series_equal(result, expected)\n\n def test_series_count(self, data_missing):\n # GH#26835\n ser = pd.Series(data_missing)\n result = ser.count()\n expected = 1\n assert result == expected\n\n def test_apply_simple_series(self, data):\n result = pd.Series(data).apply(id)\n assert isinstance(result, pd.Series)\n\n @pytest.mark.parametrize("na_action", [None, "ignore"])\n def test_map(self, data_missing, na_action):\n result = data_missing.map(lambda x: x, na_action=na_action)\n expected = data_missing.to_numpy()\n tm.assert_numpy_array_equal(result, expected)\n\n def test_argsort(self, data_for_sorting):\n result = pd.Series(data_for_sorting).argsort()\n # argsort result gets passed to take, so should be np.intp\n expected = pd.Series(np.array([2, 0, 1], dtype=np.intp))\n tm.assert_series_equal(result, expected)\n\n def test_argsort_missing_array(self, data_missing_for_sorting):\n result = data_missing_for_sorting.argsort()\n # argsort result gets passed to take, so should be np.intp\n expected = np.array([2, 0, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_argsort_missing(self, data_missing_for_sorting):\n msg = "The behavior of Series.argsort in the presence of NA values"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = pd.Series(data_missing_for_sorting).argsort()\n expected = pd.Series(np.array([1, -1, 0], dtype=np.intp))\n tm.assert_series_equal(result, expected)\n\n def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value):\n # GH 24382\n is_bool = data_for_sorting.dtype._is_boolean\n\n exp_argmax = 1\n exp_argmax_repeated = 3\n if is_bool:\n # See data_for_sorting docstring\n exp_argmax = 0\n exp_argmax_repeated = 1\n\n # data_for_sorting -> [B, C, A] with A < B < C\n assert data_for_sorting.argmax() == exp_argmax\n assert data_for_sorting.argmin() == 2\n\n # with repeated values -> first occurrence\n data = data_for_sorting.take([2, 0, 0, 1, 1, 2])\n assert data.argmax() == exp_argmax_repeated\n assert data.argmin() == 0\n\n # with missing values\n # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.\n assert data_missing_for_sorting.argmax() == 0\n assert data_missing_for_sorting.argmin() == 2\n\n @pytest.mark.parametrize("method", ["argmax", "argmin"])\n def test_argmin_argmax_empty_array(self, method, data):\n # GH 24382\n err_msg = "attempt to get"\n with pytest.raises(ValueError, match=err_msg):\n getattr(data[:0], method)()\n\n @pytest.mark.parametrize("method", ["argmax", "argmin"])\n def test_argmin_argmax_all_na(self, method, data, na_value):\n # all missing with skipna=True is the same as empty\n err_msg = "attempt to get"\n data_na = type(data)._from_sequence([na_value, na_value], dtype=data.dtype)\n with pytest.raises(ValueError, match=err_msg):\n getattr(data_na, method)()\n\n @pytest.mark.parametrize(\n "op_name, skipna, expected",\n [\n ("idxmax", True, 0),\n ("idxmin", True, 2),\n ("argmax", True, 0),\n ("argmin", True, 2),\n ("idxmax", False, np.nan),\n ("idxmin", False, np.nan),\n ("argmax", False, -1),\n ("argmin", False, -1),\n ],\n )\n def test_argreduce_series(\n self, data_missing_for_sorting, op_name, skipna, expected\n ):\n # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.\n warn = None\n msg = "The behavior of Series.argmax/argmin"\n if op_name.startswith("arg") and expected == -1:\n warn = FutureWarning\n if op_name.startswith("idx") and np.isnan(expected):\n warn = FutureWarning\n msg = f"The behavior of Series.{op_name}"\n ser = pd.Series(data_missing_for_sorting)\n with tm.assert_produces_warning(warn, match=msg):\n result = getattr(ser, op_name)(skipna=skipna)\n tm.assert_almost_equal(result, expected)\n\n def test_argmax_argmin_no_skipna_notimplemented(self, data_missing_for_sorting):\n # GH#38733\n data = data_missing_for_sorting\n\n with pytest.raises(NotImplementedError, match=""):\n data.argmin(skipna=False)\n\n with pytest.raises(NotImplementedError, match=""):\n data.argmax(skipna=False)\n\n @pytest.mark.parametrize(\n "na_position, expected",\n [\n ("last", np.array([2, 0, 1], dtype=np.dtype("intp"))),\n ("first", np.array([1, 2, 0], dtype=np.dtype("intp"))),\n ],\n )\n def test_nargsort(self, data_missing_for_sorting, na_position, expected):\n # GH 25439\n result = nargsort(data_missing_for_sorting, na_position=na_position)\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("ascending", [True, False])\n def test_sort_values(self, data_for_sorting, ascending, sort_by_key):\n ser = pd.Series(data_for_sorting)\n result = ser.sort_values(ascending=ascending, key=sort_by_key)\n expected = ser.iloc[[2, 0, 1]]\n if not ascending:\n # GH 35922. Expect stable sort\n if ser.nunique() == 2:\n expected = ser.iloc[[0, 1, 2]]\n else:\n expected = ser.iloc[[1, 0, 2]]\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("ascending", [True, False])\n def test_sort_values_missing(\n self, data_missing_for_sorting, ascending, sort_by_key\n ):\n ser = pd.Series(data_missing_for_sorting)\n result = ser.sort_values(ascending=ascending, key=sort_by_key)\n if ascending:\n expected = ser.iloc[[2, 0, 1]]\n else:\n expected = ser.iloc[[0, 2, 1]]\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("ascending", [True, False])\n def test_sort_values_frame(self, data_for_sorting, ascending):\n df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting})\n result = df.sort_values(["A", "B"])\n expected = pd.DataFrame(\n {"A": [1, 1, 2], "B": data_for_sorting.take([2, 0, 1])}, index=[2, 0, 1]\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("keep", ["first", "last", False])\n def test_duplicated(self, data, keep):\n arr = data.take([0, 1, 0, 1])\n result = arr.duplicated(keep=keep)\n if keep == "first":\n expected = np.array([False, False, True, True])\n elif keep == "last":\n expected = np.array([True, True, False, False])\n else:\n expected = np.array([True, True, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("box", [pd.Series, lambda x: x])\n @pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])\n def test_unique(self, data, box, method):\n duplicated = box(data._from_sequence([data[0], data[0]], dtype=data.dtype))\n\n result = method(duplicated)\n\n assert len(result) == 1\n assert isinstance(result, type(data))\n assert result[0] == duplicated[0]\n\n def test_factorize(self, data_for_grouping):\n codes, uniques = pd.factorize(data_for_grouping, use_na_sentinel=True)\n\n is_bool = data_for_grouping.dtype._is_boolean\n if is_bool:\n # only 2 unique values\n expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 0], dtype=np.intp)\n expected_uniques = data_for_grouping.take([0, 4])\n else:\n expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 2], dtype=np.intp)\n expected_uniques = data_for_grouping.take([0, 4, 7])\n\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_extension_array_equal(uniques, expected_uniques)\n\n def test_factorize_equivalence(self, data_for_grouping):\n codes_1, uniques_1 = pd.factorize(data_for_grouping, use_na_sentinel=True)\n codes_2, uniques_2 = data_for_grouping.factorize(use_na_sentinel=True)\n\n tm.assert_numpy_array_equal(codes_1, codes_2)\n tm.assert_extension_array_equal(uniques_1, uniques_2)\n assert len(uniques_1) == len(pd.unique(uniques_1))\n assert uniques_1.dtype == data_for_grouping.dtype\n\n def test_factorize_empty(self, data):\n codes, uniques = pd.factorize(data[:0])\n expected_codes = np.array([], dtype=np.intp)\n expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype)\n\n tm.assert_numpy_array_equal(codes, expected_codes)\n tm.assert_extension_array_equal(uniques, expected_uniques)\n\n def test_fillna_copy_frame(self, data_missing):\n arr = data_missing.take([1, 1])\n df = pd.DataFrame({"A": arr})\n df_orig = df.copy()\n\n filled_val = df.iloc[0, 0]\n result = df.fillna(filled_val)\n\n result.iloc[0, 0] = filled_val\n\n tm.assert_frame_equal(df, df_orig)\n\n def test_fillna_copy_series(self, data_missing):\n arr = data_missing.take([1, 1])\n ser = pd.Series(arr, copy=False)\n ser_orig = ser.copy()\n\n filled_val = ser[0]\n result = ser.fillna(filled_val)\n result.iloc[0] = filled_val\n\n tm.assert_series_equal(ser, ser_orig)\n\n def test_fillna_length_mismatch(self, data_missing):\n msg = "Length of 'value' does not match."\n with pytest.raises(ValueError, match=msg):\n data_missing.fillna(data_missing.take([1]))\n\n # Subclasses can override if we expect e.g Sparse[bool], boolean, pyarrow[bool]\n _combine_le_expected_dtype: Dtype = NumpyEADtype("bool")\n\n def test_combine_le(self, data_repeated):\n # GH 20825\n # Test that combine works when doing a <= (le) comparison\n orig_data1, orig_data2 = data_repeated(2)\n s1 = pd.Series(orig_data1)\n s2 = pd.Series(orig_data2)\n result = s1.combine(s2, lambda x1, x2: x1 <= x2)\n expected = pd.Series(\n pd.array(\n [a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],\n dtype=self._combine_le_expected_dtype,\n )\n )\n tm.assert_series_equal(result, expected)\n\n val = s1.iloc[0]\n result = s1.combine(val, lambda x1, x2: x1 <= x2)\n expected = pd.Series(\n pd.array(\n [a <= val for a in list(orig_data1)],\n dtype=self._combine_le_expected_dtype,\n )\n )\n tm.assert_series_equal(result, expected)\n\n def test_combine_add(self, data_repeated):\n # GH 20825\n orig_data1, orig_data2 = data_repeated(2)\n s1 = pd.Series(orig_data1)\n s2 = pd.Series(orig_data2)\n\n # Check if the operation is supported pointwise for our scalars. If not,\n # we will expect Series.combine to raise as well.\n try:\n with np.errstate(over="ignore"):\n expected = pd.Series(\n orig_data1._from_sequence(\n [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]\n )\n )\n except TypeError:\n # If the operation is not supported pointwise for our scalars,\n # then Series.combine should also raise\n with pytest.raises(TypeError):\n s1.combine(s2, lambda x1, x2: x1 + x2)\n return\n\n result = s1.combine(s2, lambda x1, x2: x1 + x2)\n tm.assert_series_equal(result, expected)\n\n val = s1.iloc[0]\n result = s1.combine(val, lambda x1, x2: x1 + x2)\n expected = pd.Series(\n orig_data1._from_sequence([a + val for a in list(orig_data1)])\n )\n tm.assert_series_equal(result, expected)\n\n def test_combine_first(self, data):\n # https://github.com/pandas-dev/pandas/issues/24147\n a = pd.Series(data[:3])\n b = pd.Series(data[2:5], index=[2, 3, 4])\n result = a.combine_first(b)\n expected = pd.Series(data[:5])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("frame", [True, False])\n @pytest.mark.parametrize(\n "periods, indices",\n [(-2, [2, 3, 4, -1, -1]), (0, [0, 1, 2, 3, 4]), (2, [-1, -1, 0, 1, 2])],\n )\n def test_container_shift(self, data, frame, periods, indices):\n # https://github.com/pandas-dev/pandas/issues/22386\n subset = data[:5]\n data = pd.Series(subset, name="A")\n expected = pd.Series(subset.take(indices, allow_fill=True), name="A")\n\n if frame:\n result = data.to_frame(name="A").assign(B=1).shift(periods)\n expected = pd.concat(\n [expected, pd.Series([1] * 5, name="B").shift(periods)], axis=1\n )\n compare = tm.assert_frame_equal\n else:\n result = data.shift(periods)\n compare = tm.assert_series_equal\n\n compare(result, expected)\n\n def test_shift_0_periods(self, data):\n # GH#33856 shifting with periods=0 should return a copy, not same obj\n result = data.shift(0)\n assert data[0] != data[1] # otherwise below is invalid\n data[0] = data[1]\n assert result[0] != result[1] # i.e. not the same object/view\n\n @pytest.mark.parametrize("periods", [1, -2])\n def test_diff(self, data, periods):\n data = data[:5]\n if is_bool_dtype(data.dtype):\n op = operator.xor\n else:\n op = operator.sub\n try:\n # does this array implement ops?\n op(data, data)\n except Exception:\n pytest.skip(f"{type(data)} does not support diff")\n s = pd.Series(data)\n result = s.diff(periods)\n expected = pd.Series(op(data, data.shift(periods)))\n tm.assert_series_equal(result, expected)\n\n df = pd.DataFrame({"A": data, "B": [1.0] * 5})\n result = df.diff(periods)\n if periods == 1:\n b = [np.nan, 0, 0, 0, 0]\n else:\n b = [0, 0, 0, np.nan, np.nan]\n expected = pd.DataFrame({"A": expected, "B": b})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "periods, indices",\n [[-4, [-1, -1]], [-1, [1, -1]], [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]],\n )\n def test_shift_non_empty_array(self, data, periods, indices):\n # https://github.com/pandas-dev/pandas/issues/23911\n subset = data[:2]\n result = subset.shift(periods)\n expected = subset.take(indices, allow_fill=True)\n tm.assert_extension_array_equal(result, expected)\n\n @pytest.mark.parametrize("periods", [-4, -1, 0, 1, 4])\n def test_shift_empty_array(self, data, periods):\n # https://github.com/pandas-dev/pandas/issues/23911\n empty = data[:0]\n result = empty.shift(periods)\n expected = empty\n tm.assert_extension_array_equal(result, expected)\n\n def test_shift_zero_copies(self, data):\n # GH#31502\n result = data.shift(0)\n assert result is not data\n\n result = data[:0].shift(2)\n assert result is not data\n\n def test_shift_fill_value(self, data):\n arr = data[:4]\n fill_value = data[0]\n result = arr.shift(1, fill_value=fill_value)\n expected = data.take([0, 0, 1, 2])\n tm.assert_extension_array_equal(result, expected)\n\n result = arr.shift(-2, fill_value=fill_value)\n expected = data.take([2, 3, 0, 0])\n tm.assert_extension_array_equal(result, expected)\n\n def test_not_hashable(self, data):\n # We are in general mutable, so not hashable\n with pytest.raises(TypeError, match="unhashable type"):\n hash(data)\n\n def test_hash_pandas_object_works(self, data, as_frame):\n # https://github.com/pandas-dev/pandas/issues/23066\n data = pd.Series(data)\n if as_frame:\n data = data.to_frame()\n a = pd.util.hash_pandas_object(data)\n b = pd.util.hash_pandas_object(data)\n tm.assert_equal(a, b)\n\n def test_searchsorted(self, data_for_sorting, as_series):\n if data_for_sorting.dtype._is_boolean:\n return self._test_searchsorted_bool_dtypes(data_for_sorting, as_series)\n\n b, c, a = data_for_sorting\n arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c]\n\n if as_series:\n arr = pd.Series(arr)\n assert arr.searchsorted(a) == 0\n assert arr.searchsorted(a, side="right") == 1\n\n assert arr.searchsorted(b) == 1\n assert arr.searchsorted(b, side="right") == 2\n\n assert arr.searchsorted(c) == 2\n assert arr.searchsorted(c, side="right") == 3\n\n result = arr.searchsorted(arr.take([0, 2]))\n expected = np.array([0, 2], dtype=np.intp)\n\n tm.assert_numpy_array_equal(result, expected)\n\n # sorter\n sorter = np.array([1, 2, 0])\n assert data_for_sorting.searchsorted(a, sorter=sorter) == 0\n\n def _test_searchsorted_bool_dtypes(self, data_for_sorting, as_series):\n # We call this from test_searchsorted in cases where we have a\n # boolean-like dtype. The non-bool test assumes we have more than 2\n # unique values.\n dtype = data_for_sorting.dtype\n data_for_sorting = pd.array([True, False], dtype=dtype)\n b, a = data_for_sorting\n arr = type(data_for_sorting)._from_sequence([a, b])\n\n if as_series:\n arr = pd.Series(arr)\n assert arr.searchsorted(a) == 0\n assert arr.searchsorted(a, side="right") == 1\n\n assert arr.searchsorted(b) == 1\n assert arr.searchsorted(b, side="right") == 2\n\n result = arr.searchsorted(arr.take([0, 1]))\n expected = np.array([0, 1], dtype=np.intp)\n\n tm.assert_numpy_array_equal(result, expected)\n\n # sorter\n sorter = np.array([1, 0])\n assert data_for_sorting.searchsorted(a, sorter=sorter) == 0\n\n def test_where_series(self, data, na_value, as_frame):\n assert data[0] != data[1]\n cls = type(data)\n a, b = data[:2]\n\n orig = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))\n ser = orig.copy()\n cond = np.array([True, True, False, False])\n\n if as_frame:\n ser = ser.to_frame(name="a")\n cond = cond.reshape(-1, 1)\n\n result = ser.where(cond)\n expected = pd.Series(\n cls._from_sequence([a, a, na_value, na_value], dtype=data.dtype)\n )\n\n if as_frame:\n expected = expected.to_frame(name="a")\n tm.assert_equal(result, expected)\n\n ser.mask(~cond, inplace=True)\n tm.assert_equal(ser, expected)\n\n # array other\n ser = orig.copy()\n if as_frame:\n ser = ser.to_frame(name="a")\n cond = np.array([True, False, True, True])\n other = cls._from_sequence([a, b, a, b], dtype=data.dtype)\n if as_frame:\n other = pd.DataFrame({"a": other})\n cond = pd.DataFrame({"a": cond})\n result = ser.where(cond, other)\n expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))\n if as_frame:\n expected = expected.to_frame(name="a")\n tm.assert_equal(result, expected)\n\n ser.mask(~cond, other, inplace=True)\n tm.assert_equal(ser, expected)\n\n @pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])\n def test_repeat(self, data, repeats, as_series, use_numpy):\n arr = type(data)._from_sequence(data[:3], dtype=data.dtype)\n if as_series:\n arr = pd.Series(arr)\n\n result = np.repeat(arr, repeats) if use_numpy else arr.repeat(repeats)\n\n repeats = [repeats] * 3 if isinstance(repeats, int) else repeats\n expected = [x for x, n in zip(arr, repeats) for _ in range(n)]\n expected = type(data)._from_sequence(expected, dtype=data.dtype)\n if as_series:\n expected = pd.Series(expected, index=arr.index.repeat(repeats))\n\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "repeats, kwargs, error, msg",\n [\n (2, {"axis": 1}, ValueError, "axis"),\n (-1, {}, ValueError, "negative"),\n ([1, 2], {}, ValueError, "shape"),\n (2, {"foo": "bar"}, TypeError, "'foo'"),\n ],\n )\n def test_repeat_raises(self, data, repeats, kwargs, error, msg, use_numpy):\n with pytest.raises(error, match=msg):\n if use_numpy:\n np.repeat(data, repeats, **kwargs)\n else:\n data.repeat(repeats, **kwargs)\n\n def test_delete(self, data):\n result = data.delete(0)\n expected = data[1:]\n tm.assert_extension_array_equal(result, expected)\n\n result = data.delete([1, 3])\n expected = data._concat_same_type([data[[0]], data[[2]], data[4:]])\n tm.assert_extension_array_equal(result, expected)\n\n def test_insert(self, data):\n # insert at the beginning\n result = data[1:].insert(0, data[0])\n tm.assert_extension_array_equal(result, data)\n\n result = data[1:].insert(-len(data[1:]), data[0])\n tm.assert_extension_array_equal(result, data)\n\n # insert at the middle\n result = data[:-1].insert(4, data[-1])\n\n taker = np.arange(len(data))\n taker[5:] = taker[4:-1]\n taker[4] = len(data) - 1\n expected = data.take(taker)\n tm.assert_extension_array_equal(result, expected)\n\n def test_insert_invalid(self, data, invalid_scalar):\n item = invalid_scalar\n\n with pytest.raises((TypeError, ValueError)):\n data.insert(0, item)\n\n with pytest.raises((TypeError, ValueError)):\n data.insert(4, item)\n\n with pytest.raises((TypeError, ValueError)):\n data.insert(len(data) - 1, item)\n\n def test_insert_invalid_loc(self, data):\n ub = len(data)\n\n with pytest.raises(IndexError):\n data.insert(ub + 1, data[0])\n\n with pytest.raises(IndexError):\n data.insert(-ub - 1, data[0])\n\n with pytest.raises(TypeError):\n # we expect TypeError here instead of IndexError to match np.insert\n data.insert(1.5, data[0])\n\n @pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])\n def test_equals(self, data, na_value, as_series, box):\n data2 = type(data)._from_sequence([data[0]] * len(data), dtype=data.dtype)\n data_na = type(data)._from_sequence([na_value] * len(data), dtype=data.dtype)\n\n data = tm.box_expected(data, box, transpose=False)\n data2 = tm.box_expected(data2, box, transpose=False)\n data_na = tm.box_expected(data_na, box, transpose=False)\n\n # we are asserting with `is True/False` explicitly, to test that the\n # result is an actual Python bool, and not something "truthy"\n\n assert data.equals(data) is True\n assert data.equals(data.copy()) is True\n\n # unequal other data\n assert data.equals(data2) is False\n assert data.equals(data_na) is False\n\n # different length\n assert data[:2].equals(data[:3]) is False\n\n # empty are equal\n assert data[:0].equals(data[:0]) is True\n\n # other types\n assert data.equals(None) is False\n assert data[[0]].equals(data[0]) is False\n\n def test_equals_same_data_different_object(self, data):\n # https://github.com/pandas-dev/pandas/issues/34660\n assert pd.Series(data).equals(pd.Series(data))\n
.venv\Lib\site-packages\pandas\tests\extension\base\methods.py
methods.py
Python
26,742
0.95
0.130556
0.095076
vue-tools
886
2024-02-04T10:04:09.726107
MIT
true
844ace26a7960ab36769f4df950f2c3e
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass BaseMissingTests:\n def test_isna(self, data_missing):\n expected = np.array([True, False])\n\n result = pd.isna(data_missing)\n tm.assert_numpy_array_equal(result, expected)\n\n result = pd.Series(data_missing).isna()\n expected = pd.Series(expected)\n tm.assert_series_equal(result, expected)\n\n # GH 21189\n result = pd.Series(data_missing).drop([0, 1]).isna()\n expected = pd.Series([], dtype=bool)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("na_func", ["isna", "notna"])\n def test_isna_returns_copy(self, data_missing, na_func):\n result = pd.Series(data_missing)\n expected = result.copy()\n mask = getattr(result, na_func)()\n if isinstance(mask.dtype, pd.SparseDtype):\n # TODO: GH 57739\n mask = np.array(mask)\n mask.flags.writeable = True\n\n mask[:] = True\n tm.assert_series_equal(result, expected)\n\n def test_dropna_array(self, data_missing):\n result = data_missing.dropna()\n expected = data_missing[[1]]\n tm.assert_extension_array_equal(result, expected)\n\n def test_dropna_series(self, data_missing):\n ser = pd.Series(data_missing)\n result = ser.dropna()\n expected = ser.iloc[[1]]\n tm.assert_series_equal(result, expected)\n\n def test_dropna_frame(self, data_missing):\n df = pd.DataFrame({"A": data_missing}, columns=pd.Index(["A"], dtype=object))\n\n # defaults\n result = df.dropna()\n expected = df.iloc[[1]]\n tm.assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.dropna(axis="columns")\n expected = pd.DataFrame(index=pd.RangeIndex(2), columns=pd.Index([]))\n tm.assert_frame_equal(result, expected)\n\n # multiple\n df = pd.DataFrame({"A": data_missing, "B": [1, np.nan]})\n result = df.dropna()\n expected = df.iloc[:0]\n tm.assert_frame_equal(result, expected)\n\n def test_fillna_scalar(self, data_missing):\n valid = data_missing[1]\n result = data_missing.fillna(valid)\n expected = data_missing.fillna(valid)\n tm.assert_extension_array_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:Series.fillna with 'method' is deprecated:FutureWarning"\n )\n def test_fillna_limit_pad(self, data_missing):\n arr = data_missing.take([1, 0, 0, 0, 1])\n result = pd.Series(arr).ffill(limit=2)\n expected = pd.Series(data_missing.take([1, 1, 1, 0, 1]))\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "limit_area, input_ilocs, expected_ilocs",\n [\n ("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]),\n ("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]),\n ("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]),\n ("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]),\n ("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]),\n ("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]),\n ("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]),\n ("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]),\n ],\n )\n def test_ffill_limit_area(\n self, data_missing, limit_area, input_ilocs, expected_ilocs\n ):\n # GH#56616\n arr = data_missing.take(input_ilocs)\n result = pd.Series(arr).ffill(limit_area=limit_area)\n expected = pd.Series(data_missing.take(expected_ilocs))\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:Series.fillna with 'method' is deprecated:FutureWarning"\n )\n def test_fillna_limit_backfill(self, data_missing):\n arr = data_missing.take([1, 0, 0, 0, 1])\n result = pd.Series(arr).fillna(method="backfill", limit=2)\n expected = pd.Series(data_missing.take([1, 0, 1, 1, 1]))\n tm.assert_series_equal(result, expected)\n\n def test_fillna_no_op_returns_copy(self, data):\n data = data[~data.isna()]\n\n valid = data[0]\n result = data.fillna(valid)\n assert result is not data\n tm.assert_extension_array_equal(result, data)\n\n result = data._pad_or_backfill(method="backfill")\n assert result is not data\n tm.assert_extension_array_equal(result, data)\n\n def test_fillna_series(self, data_missing):\n fill_value = data_missing[1]\n ser = pd.Series(data_missing)\n\n result = ser.fillna(fill_value)\n expected = pd.Series(\n data_missing._from_sequence(\n [fill_value, fill_value], dtype=data_missing.dtype\n )\n )\n tm.assert_series_equal(result, expected)\n\n # Fill with a series\n result = ser.fillna(expected)\n tm.assert_series_equal(result, expected)\n\n # Fill with a series not affecting the missing values\n result = ser.fillna(ser)\n tm.assert_series_equal(result, ser)\n\n def test_fillna_series_method(self, data_missing, fillna_method):\n fill_value = data_missing[1]\n\n if fillna_method == "ffill":\n data_missing = data_missing[::-1]\n\n result = getattr(pd.Series(data_missing), fillna_method)()\n expected = pd.Series(\n data_missing._from_sequence(\n [fill_value, fill_value], dtype=data_missing.dtype\n )\n )\n\n tm.assert_series_equal(result, expected)\n\n def test_fillna_frame(self, data_missing):\n fill_value = data_missing[1]\n\n result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)\n\n expected = pd.DataFrame(\n {\n "A": data_missing._from_sequence(\n [fill_value, fill_value], dtype=data_missing.dtype\n ),\n "B": [1, 2],\n }\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_fillna_fill_other(self, data):\n result = pd.DataFrame({"A": data, "B": [np.nan] * len(data)}).fillna({"B": 0.0})\n\n expected = pd.DataFrame({"A": data, "B": [0.0] * len(result)})\n\n tm.assert_frame_equal(result, expected)\n\n def test_use_inf_as_na_no_effect(self, data_missing):\n ser = pd.Series(data_missing)\n expected = ser.isna()\n msg = "use_inf_as_na option is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with pd.option_context("mode.use_inf_as_na", True):\n result = ser.isna()\n tm.assert_series_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\extension\base\missing.py
missing.py
Python
6,606
0.95
0.094737
0.052288
node-utils
674
2023-11-13T14:19:07.627228
BSD-3-Clause
true
9447adfb7ab03c36d3110334ba0e3c67
from __future__ import annotations\n\nfrom typing import final\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_string_dtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core import ops\n\n\nclass BaseOpsUtil:\n series_scalar_exc: type[Exception] | None = TypeError\n frame_scalar_exc: type[Exception] | None = TypeError\n series_array_exc: type[Exception] | None = TypeError\n divmod_exc: type[Exception] | None = TypeError\n\n def _get_expected_exception(\n self, op_name: str, obj, other\n ) -> type[Exception] | tuple[type[Exception], ...] | None:\n # Find the Exception, if any we expect to raise calling\n # obj.__op_name__(other)\n\n # The self.obj_bar_exc pattern isn't great in part because it can depend\n # on op_name or dtypes, but we use it here for backward-compatibility.\n if op_name in ["__divmod__", "__rdivmod__"]:\n result = self.divmod_exc\n elif isinstance(obj, pd.Series) and isinstance(other, pd.Series):\n result = self.series_array_exc\n elif isinstance(obj, pd.Series):\n result = self.series_scalar_exc\n else:\n result = self.frame_scalar_exc\n\n return result\n\n def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):\n # In _check_op we check that the result of a pointwise operation\n # (found via _combine) matches the result of the vectorized\n # operation obj.__op_name__(other).\n # In some cases pandas dtype inference on the scalar result may not\n # give a matching dtype even if both operations are behaving "correctly".\n # In these cases, do extra required casting here.\n return pointwise_result\n\n def get_op_from_name(self, op_name: str):\n return tm.get_op_from_name(op_name)\n\n # Subclasses are not expected to need to override check_opname, _check_op,\n # _check_divmod_op, or _combine.\n # Ideally any relevant overriding can be done in _cast_pointwise_result,\n # get_op_from_name, and the specification of `exc`. If you find a use\n # case that still requires overriding _check_op or _combine, please let\n # us know at github.com/pandas-dev/pandas/issues\n @final\n def check_opname(self, ser: pd.Series, op_name: str, other):\n exc = self._get_expected_exception(op_name, ser, other)\n op = self.get_op_from_name(op_name)\n\n self._check_op(ser, op, other, op_name, exc)\n\n # see comment on check_opname\n @final\n def _combine(self, obj, other, op):\n if isinstance(obj, pd.DataFrame):\n if len(obj.columns) != 1:\n raise NotImplementedError\n expected = obj.iloc[:, 0].combine(other, op).to_frame()\n else:\n expected = obj.combine(other, op)\n return expected\n\n # see comment on check_opname\n @final\n def _check_op(\n self, ser: pd.Series, op, other, op_name: str, exc=NotImplementedError\n ):\n # Check that the Series/DataFrame arithmetic/comparison method matches\n # the pointwise result from _combine.\n\n if exc is None:\n result = op(ser, other)\n expected = self._combine(ser, other, op)\n expected = self._cast_pointwise_result(op_name, ser, other, expected)\n assert isinstance(result, type(ser))\n tm.assert_equal(result, expected)\n else:\n with pytest.raises(exc):\n op(ser, other)\n\n # see comment on check_opname\n @final\n def _check_divmod_op(self, ser: pd.Series, op, other):\n # check that divmod behavior matches behavior of floordiv+mod\n if op is divmod:\n exc = self._get_expected_exception("__divmod__", ser, other)\n else:\n exc = self._get_expected_exception("__rdivmod__", ser, other)\n if exc is None:\n result_div, result_mod = op(ser, other)\n if op is divmod:\n expected_div, expected_mod = ser // other, ser % other\n else:\n expected_div, expected_mod = other // ser, other % ser\n tm.assert_series_equal(result_div, expected_div)\n tm.assert_series_equal(result_mod, expected_mod)\n else:\n with pytest.raises(exc):\n divmod(ser, other)\n\n\nclass BaseArithmeticOpsTests(BaseOpsUtil):\n """\n Various Series and DataFrame arithmetic ops methods.\n\n Subclasses supporting various ops should set the class variables\n to indicate that they support ops of that kind\n\n * series_scalar_exc = TypeError\n * frame_scalar_exc = TypeError\n * series_array_exc = TypeError\n * divmod_exc = TypeError\n """\n\n series_scalar_exc: type[Exception] | None = TypeError\n frame_scalar_exc: type[Exception] | None = TypeError\n series_array_exc: type[Exception] | None = TypeError\n divmod_exc: type[Exception] | None = TypeError\n\n def test_arith_series_with_scalar(self, data, all_arithmetic_operators):\n # series & scalar\n if all_arithmetic_operators == "__rmod__" and is_string_dtype(data.dtype):\n pytest.skip("Skip testing Python string formatting")\n\n op_name = all_arithmetic_operators\n ser = pd.Series(data)\n self.check_opname(ser, op_name, ser.iloc[0])\n\n def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):\n # frame & scalar\n if all_arithmetic_operators == "__rmod__" and is_string_dtype(data.dtype):\n pytest.skip("Skip testing Python string formatting")\n\n op_name = all_arithmetic_operators\n df = pd.DataFrame({"A": data})\n self.check_opname(df, op_name, data[0])\n\n def test_arith_series_with_array(self, data, all_arithmetic_operators):\n # ndarray & other series\n op_name = all_arithmetic_operators\n ser = pd.Series(data)\n self.check_opname(ser, op_name, pd.Series([ser.iloc[0]] * len(ser)))\n\n def test_divmod(self, data):\n ser = pd.Series(data)\n self._check_divmod_op(ser, divmod, 1)\n self._check_divmod_op(1, ops.rdivmod, ser)\n\n def test_divmod_series_array(self, data, data_for_twos):\n ser = pd.Series(data)\n self._check_divmod_op(ser, divmod, data)\n\n other = data_for_twos\n self._check_divmod_op(other, ops.rdivmod, ser)\n\n other = pd.Series(other)\n self._check_divmod_op(other, ops.rdivmod, ser)\n\n def test_add_series_with_extension_array(self, data):\n # Check adding an ExtensionArray to a Series of the same dtype matches\n # the behavior of adding the arrays directly and then wrapping in a\n # Series.\n\n ser = pd.Series(data)\n\n exc = self._get_expected_exception("__add__", ser, data)\n if exc is not None:\n with pytest.raises(exc):\n ser + data\n return\n\n result = ser + data\n expected = pd.Series(data + data)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame, pd.Index])\n @pytest.mark.parametrize(\n "op_name",\n [\n x\n for x in tm.arithmetic_dunder_methods + tm.comparison_dunder_methods\n if not x.startswith("__r")\n ],\n )\n def test_direct_arith_with_ndframe_returns_not_implemented(\n self, data, box, op_name\n ):\n # EAs should return NotImplemented for ops with Series/DataFrame/Index\n # Pandas takes care of unboxing the series and calling the EA's op.\n other = box(data)\n\n if hasattr(data, op_name):\n result = getattr(data, op_name)(other)\n assert result is NotImplemented\n\n\nclass BaseComparisonOpsTests(BaseOpsUtil):\n """Various Series and DataFrame comparison ops methods."""\n\n def _compare_other(self, ser: pd.Series, data, op, other):\n if op.__name__ in ["eq", "ne"]:\n # comparison should match point-wise comparisons\n result = op(ser, other)\n expected = ser.combine(other, op)\n expected = self._cast_pointwise_result(op.__name__, ser, other, expected)\n tm.assert_series_equal(result, expected)\n\n else:\n exc = None\n try:\n result = op(ser, other)\n except Exception as err:\n exc = err\n\n if exc is None:\n # Didn't error, then should match pointwise behavior\n expected = ser.combine(other, op)\n expected = self._cast_pointwise_result(\n op.__name__, ser, other, expected\n )\n tm.assert_series_equal(result, expected)\n else:\n with pytest.raises(type(exc)):\n ser.combine(other, op)\n\n def test_compare_scalar(self, data, comparison_op):\n ser = pd.Series(data)\n self._compare_other(ser, data, comparison_op, 0)\n\n def test_compare_array(self, data, comparison_op):\n ser = pd.Series(data)\n other = pd.Series([data[0]] * len(data), dtype=data.dtype)\n self._compare_other(ser, data, comparison_op, other)\n\n\nclass BaseUnaryOpsTests(BaseOpsUtil):\n def test_invert(self, data):\n ser = pd.Series(data, name="name")\n try:\n # 10 is an arbitrary choice here, just avoid iterating over\n # the whole array to trim test runtime\n [~x for x in data[:10]]\n except TypeError:\n # scalars don't support invert -> we don't expect the vectorized\n # operation to succeed\n with pytest.raises(TypeError):\n ~ser\n with pytest.raises(TypeError):\n ~data\n else:\n # Note we do not reuse the pointwise result to construct expected\n # because python semantics for negating bools are weird see GH#54569\n result = ~ser\n expected = pd.Series(~data, name="name")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs])\n def test_unary_ufunc_dunder_equivalence(self, data, ufunc):\n # the dunder __pos__ works if and only if np.positive works,\n # same for __neg__/np.negative and __abs__/np.abs\n attr = {np.positive: "__pos__", np.negative: "__neg__", np.abs: "__abs__"}[\n ufunc\n ]\n\n exc = None\n try:\n result = getattr(data, attr)()\n except Exception as err:\n exc = err\n\n # if __pos__ raised, then so should the ufunc\n with pytest.raises((type(exc), TypeError)):\n ufunc(data)\n else:\n alt = ufunc(data)\n tm.assert_extension_array_equal(result, alt)\n
.venv\Lib\site-packages\pandas\tests\extension\base\ops.py
ops.py
Python
10,760
0.95
0.179931
0.1875
vue-tools
348
2023-12-02T13:49:59.070605
BSD-3-Clause
true
9f69ffe505932407bbe2e8a2484b069e
import io\n\nimport pytest\n\nimport pandas as pd\n\n\nclass BasePrintingTests:\n """Tests checking the formatting of your EA when printed."""\n\n @pytest.mark.parametrize("size", ["big", "small"])\n def test_array_repr(self, data, size):\n if size == "small":\n data = data[:5]\n else:\n data = type(data)._concat_same_type([data] * 5)\n\n result = repr(data)\n assert type(data).__name__ in result\n assert f"Length: {len(data)}" in result\n assert str(data.dtype) in result\n if size == "big":\n assert "..." in result\n\n def test_array_repr_unicode(self, data):\n result = str(data)\n assert isinstance(result, str)\n\n def test_series_repr(self, data):\n ser = pd.Series(data)\n assert data.dtype.name in repr(ser)\n\n def test_dataframe_repr(self, data):\n df = pd.DataFrame({"A": data})\n repr(df)\n\n def test_dtype_name_in_info(self, data):\n buf = io.StringIO()\n pd.DataFrame({"A": data}).info(buf=buf)\n result = buf.getvalue()\n assert data.dtype.name in result\n
.venv\Lib\site-packages\pandas\tests\extension\base\printing.py
printing.py
Python
1,109
0.85
0.195122
0
node-utils
919
2024-07-19T08:51:46.782627
BSD-3-Clause
true
9319d901392e2d6297e4c5c039949082
from typing import final\n\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.types import is_numeric_dtype\n\n\nclass BaseReduceTests:\n """\n Reduction specific tests. Generally these only\n make sense for numeric/boolean operations.\n """\n\n def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:\n # Specify if we expect this reduction to succeed.\n return False\n\n def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):\n # We perform the same operation on the np.float64 data and check\n # that the results match. Override if you need to cast to something\n # other than float64.\n res_op = getattr(ser, op_name)\n\n try:\n alt = ser.astype("float64")\n except (TypeError, ValueError):\n # e.g. Interval can't cast (TypeError), StringArray can't cast\n # (ValueError), so let's cast to object and do\n # the reduction pointwise\n alt = ser.astype(object)\n\n exp_op = getattr(alt, op_name)\n if op_name == "count":\n result = res_op()\n expected = exp_op()\n else:\n result = res_op(skipna=skipna)\n expected = exp_op(skipna=skipna)\n tm.assert_almost_equal(result, expected)\n\n def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):\n # Find the expected dtype when the given reduction is done on a DataFrame\n # column with this array. The default assumes float64-like behavior,\n # i.e. retains the dtype.\n return arr.dtype\n\n # We anticipate that authors should not need to override check_reduce_frame,\n # but should be able to do any necessary overriding in\n # _get_expected_reduction_dtype. If you have a use case where this\n # does not hold, please let us know at github.com/pandas-dev/pandas/issues.\n @final\n def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):\n # Check that the 2D reduction done in a DataFrame reduction "looks like"\n # a wrapped version of the 1D reduction done by Series.\n arr = ser.array\n df = pd.DataFrame({"a": arr})\n\n kwargs = {"ddof": 1} if op_name in ["var", "std"] else {}\n\n cmp_dtype = self._get_expected_reduction_dtype(arr, op_name, skipna)\n\n # The DataFrame method just calls arr._reduce with keepdims=True,\n # so this first check is perfunctory.\n result1 = arr._reduce(op_name, skipna=skipna, keepdims=True, **kwargs)\n result2 = getattr(df, op_name)(skipna=skipna, **kwargs).array\n tm.assert_extension_array_equal(result1, result2)\n\n # Check that the 2D reduction looks like a wrapped version of the\n # 1D reduction\n if not skipna and ser.isna().any():\n expected = pd.array([pd.NA], dtype=cmp_dtype)\n else:\n exp_value = getattr(ser.dropna(), op_name)()\n expected = pd.array([exp_value], dtype=cmp_dtype)\n\n tm.assert_extension_array_equal(result1, expected)\n\n @pytest.mark.parametrize("skipna", [True, False])\n def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):\n op_name = all_boolean_reductions\n ser = pd.Series(data)\n\n if not self._supports_reduction(ser, op_name):\n # TODO: the message being checked here isn't actually checking anything\n msg = (\n "[Cc]annot perform|Categorical is not ordered for operation|"\n "does not support reduction|"\n )\n\n with pytest.raises(TypeError, match=msg):\n getattr(ser, op_name)(skipna=skipna)\n\n else:\n self.check_reduce(ser, op_name, skipna)\n\n @pytest.mark.filterwarnings("ignore::RuntimeWarning")\n @pytest.mark.parametrize("skipna", [True, False])\n def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):\n op_name = all_numeric_reductions\n ser = pd.Series(data)\n\n if not self._supports_reduction(ser, op_name):\n # TODO: the message being checked here isn't actually checking anything\n msg = (\n "[Cc]annot perform|Categorical is not ordered for operation|"\n "does not support reduction|"\n )\n\n with pytest.raises(TypeError, match=msg):\n getattr(ser, op_name)(skipna=skipna)\n\n else:\n # min/max with empty produce numpy warnings\n self.check_reduce(ser, op_name, skipna)\n\n @pytest.mark.parametrize("skipna", [True, False])\n def test_reduce_frame(self, data, all_numeric_reductions, skipna):\n op_name = all_numeric_reductions\n ser = pd.Series(data)\n if not is_numeric_dtype(ser.dtype):\n pytest.skip(f"{ser.dtype} is not numeric dtype")\n\n if op_name in ["count", "kurt", "sem"]:\n pytest.skip(f"{op_name} not an array method")\n\n if not self._supports_reduction(ser, op_name):\n pytest.skip(f"Reduction {op_name} not supported for this dtype")\n\n self.check_reduce_frame(ser, op_name, skipna)\n\n\n# TODO(3.0): remove BaseNoReduceTests, BaseNumericReduceTests,\n# BaseBooleanReduceTests\nclass BaseNoReduceTests(BaseReduceTests):\n """we don't define any reductions"""\n\n\nclass BaseNumericReduceTests(BaseReduceTests):\n # For backward compatibility only, this only runs the numeric reductions\n def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:\n if op_name in ["any", "all"]:\n pytest.skip("These are tested in BaseBooleanReduceTests")\n return True\n\n\nclass BaseBooleanReduceTests(BaseReduceTests):\n # For backward compatibility only, this only runs the numeric reductions\n def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:\n if op_name not in ["any", "all"]:\n pytest.skip("These are tested in BaseNumericReduceTests")\n return True\n
.venv\Lib\site-packages\pandas\tests\extension\base\reduce.py
reduce.py
Python
5,968
0.95
0.196078
0.225
react-lib
241
2023-12-20T21:23:41.548759
BSD-3-Clause
true
1bd4741896acae22060a9f7c22086237
import itertools\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.extensions import ExtensionArray\nfrom pandas.core.internals.blocks import EABackedBlock\n\n\nclass BaseReshapingTests:\n """Tests for reshaping and concatenation."""\n\n @pytest.mark.parametrize("in_frame", [True, False])\n def test_concat(self, data, in_frame):\n wrapped = pd.Series(data)\n if in_frame:\n wrapped = pd.DataFrame(wrapped)\n result = pd.concat([wrapped, wrapped], ignore_index=True)\n\n assert len(result) == len(data) * 2\n\n if in_frame:\n dtype = result.dtypes[0]\n else:\n dtype = result.dtype\n\n assert dtype == data.dtype\n if hasattr(result._mgr, "blocks"):\n assert isinstance(result._mgr.blocks[0], EABackedBlock)\n assert isinstance(result._mgr.arrays[0], ExtensionArray)\n\n @pytest.mark.parametrize("in_frame", [True, False])\n def test_concat_all_na_block(self, data_missing, in_frame):\n valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1])\n na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3])\n if in_frame:\n valid_block = pd.DataFrame({"a": valid_block})\n na_block = pd.DataFrame({"a": na_block})\n result = pd.concat([valid_block, na_block])\n if in_frame:\n expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])})\n tm.assert_frame_equal(result, expected)\n else:\n expected = pd.Series(data_missing.take([1, 1, 0, 0]))\n tm.assert_series_equal(result, expected)\n\n def test_concat_mixed_dtypes(self, data):\n # https://github.com/pandas-dev/pandas/issues/20762\n df1 = pd.DataFrame({"A": data[:3]})\n df2 = pd.DataFrame({"A": [1, 2, 3]})\n df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")\n dfs = [df1, df2, df3]\n\n # dataframes\n result = pd.concat(dfs)\n expected = pd.concat([x.astype(object) for x in dfs])\n tm.assert_frame_equal(result, expected)\n\n # series\n result = pd.concat([x["A"] for x in dfs])\n expected = pd.concat([x["A"].astype(object) for x in dfs])\n tm.assert_series_equal(result, expected)\n\n # simple test for just EA and one other\n result = pd.concat([df1, df2.astype(object)])\n expected = pd.concat([df1.astype("object"), df2.astype("object")])\n tm.assert_frame_equal(result, expected)\n\n result = pd.concat([df1["A"], df2["A"].astype(object)])\n expected = pd.concat([df1["A"].astype("object"), df2["A"].astype("object")])\n tm.assert_series_equal(result, expected)\n\n def test_concat_columns(self, data, na_value):\n df1 = pd.DataFrame({"A": data[:3]})\n df2 = pd.DataFrame({"B": [1, 2, 3]})\n\n expected = pd.DataFrame({"A": data[:3], "B": [1, 2, 3]})\n result = pd.concat([df1, df2], axis=1)\n tm.assert_frame_equal(result, expected)\n result = pd.concat([df1["A"], df2["B"]], axis=1)\n tm.assert_frame_equal(result, expected)\n\n # non-aligned\n df2 = pd.DataFrame({"B": [1, 2, 3]}, index=[1, 2, 3])\n expected = pd.DataFrame(\n {\n "A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),\n "B": [np.nan, 1, 2, 3],\n }\n )\n\n result = pd.concat([df1, df2], axis=1)\n tm.assert_frame_equal(result, expected)\n result = pd.concat([df1["A"], df2["B"]], axis=1)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_extension_arrays_copy_false(self, data, na_value):\n # GH 20756\n df1 = pd.DataFrame({"A": data[:3]})\n df2 = pd.DataFrame({"B": data[3:7]})\n expected = pd.DataFrame(\n {\n "A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),\n "B": data[3:7],\n }\n )\n result = pd.concat([df1, df2], axis=1, copy=False)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_with_reindex(self, data):\n # GH-33027\n a = pd.DataFrame({"a": data[:5]})\n b = pd.DataFrame({"b": data[:5]})\n result = pd.concat([a, b], ignore_index=True)\n expected = pd.DataFrame(\n {\n "a": data.take(list(range(5)) + ([-1] * 5), allow_fill=True),\n "b": data.take(([-1] * 5) + list(range(5)), allow_fill=True),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n def test_align(self, data, na_value):\n a = data[:3]\n b = data[2:5]\n r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3]))\n\n # Assumes that the ctor can take a list of scalars of the type\n e1 = pd.Series(data._from_sequence(list(a) + [na_value], dtype=data.dtype))\n e2 = pd.Series(data._from_sequence([na_value] + list(b), dtype=data.dtype))\n tm.assert_series_equal(r1, e1)\n tm.assert_series_equal(r2, e2)\n\n def test_align_frame(self, data, na_value):\n a = data[:3]\n b = data[2:5]\n r1, r2 = pd.DataFrame({"A": a}).align(pd.DataFrame({"A": b}, index=[1, 2, 3]))\n\n # Assumes that the ctor can take a list of scalars of the type\n e1 = pd.DataFrame(\n {"A": data._from_sequence(list(a) + [na_value], dtype=data.dtype)}\n )\n e2 = pd.DataFrame(\n {"A": data._from_sequence([na_value] + list(b), dtype=data.dtype)}\n )\n tm.assert_frame_equal(r1, e1)\n tm.assert_frame_equal(r2, e2)\n\n def test_align_series_frame(self, data, na_value):\n # https://github.com/pandas-dev/pandas/issues/20576\n ser = pd.Series(data, name="a")\n df = pd.DataFrame({"col": np.arange(len(ser) + 1)})\n r1, r2 = ser.align(df)\n\n e1 = pd.Series(\n data._from_sequence(list(data) + [na_value], dtype=data.dtype),\n name=ser.name,\n )\n\n tm.assert_series_equal(r1, e1)\n tm.assert_frame_equal(r2, df)\n\n def test_set_frame_expand_regular_with_extension(self, data):\n df = pd.DataFrame({"A": [1] * len(data)})\n df["B"] = data\n expected = pd.DataFrame({"A": [1] * len(data), "B": data})\n tm.assert_frame_equal(df, expected)\n\n def test_set_frame_expand_extension_with_regular(self, data):\n df = pd.DataFrame({"A": data})\n df["B"] = [1] * len(data)\n expected = pd.DataFrame({"A": data, "B": [1] * len(data)})\n tm.assert_frame_equal(df, expected)\n\n def test_set_frame_overwrite_object(self, data):\n # https://github.com/pandas-dev/pandas/issues/20555\n df = pd.DataFrame({"A": [1] * len(data)}, dtype=object)\n df["A"] = data\n assert df.dtypes["A"] == data.dtype\n\n def test_merge(self, data, na_value):\n # GH-20743\n df1 = pd.DataFrame({"ext": data[:3], "int1": [1, 2, 3], "key": [0, 1, 2]})\n df2 = pd.DataFrame({"int2": [1, 2, 3, 4], "key": [0, 0, 1, 3]})\n\n res = pd.merge(df1, df2)\n exp = pd.DataFrame(\n {\n "int1": [1, 1, 2],\n "int2": [1, 2, 3],\n "key": [0, 0, 1],\n "ext": data._from_sequence(\n [data[0], data[0], data[1]], dtype=data.dtype\n ),\n }\n )\n tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])\n\n res = pd.merge(df1, df2, how="outer")\n exp = pd.DataFrame(\n {\n "int1": [1, 1, 2, 3, np.nan],\n "int2": [1, 2, 3, np.nan, 4],\n "key": [0, 0, 1, 2, 3],\n "ext": data._from_sequence(\n [data[0], data[0], data[1], data[2], na_value], dtype=data.dtype\n ),\n }\n )\n tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])\n\n def test_merge_on_extension_array(self, data):\n # GH 23020\n a, b = data[:2]\n key = type(data)._from_sequence([a, b], dtype=data.dtype)\n\n df = pd.DataFrame({"key": key, "val": [1, 2]})\n result = pd.merge(df, df, on="key")\n expected = pd.DataFrame({"key": key, "val_x": [1, 2], "val_y": [1, 2]})\n tm.assert_frame_equal(result, expected)\n\n # order\n result = pd.merge(df.iloc[[1, 0]], df, on="key")\n expected = expected.iloc[[1, 0]].reset_index(drop=True)\n tm.assert_frame_equal(result, expected)\n\n def test_merge_on_extension_array_duplicates(self, data):\n # GH 23020\n a, b = data[:2]\n key = type(data)._from_sequence([a, b, a], dtype=data.dtype)\n df1 = pd.DataFrame({"key": key, "val": [1, 2, 3]})\n df2 = pd.DataFrame({"key": key, "val": [1, 2, 3]})\n\n result = pd.merge(df1, df2, on="key")\n expected = pd.DataFrame(\n {\n "key": key.take([0, 0, 1, 2, 2]),\n "val_x": [1, 1, 2, 3, 3],\n "val_y": [1, 3, 2, 1, 3],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n @pytest.mark.parametrize(\n "columns",\n [\n ["A", "B"],\n pd.MultiIndex.from_tuples(\n [("A", "a"), ("A", "b")], names=["outer", "inner"]\n ),\n ],\n )\n @pytest.mark.parametrize("future_stack", [True, False])\n def test_stack(self, data, columns, future_stack):\n df = pd.DataFrame({"A": data[:5], "B": data[:5]})\n df.columns = columns\n result = df.stack(future_stack=future_stack)\n expected = df.astype(object).stack(future_stack=future_stack)\n # we need a second astype(object), in case the constructor inferred\n # object -> specialized, as is done for period.\n expected = expected.astype(object)\n\n if isinstance(expected, pd.Series):\n assert result.dtype == df.iloc[:, 0].dtype\n else:\n assert all(result.dtypes == df.iloc[:, 0].dtype)\n\n result = result.astype(object)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "index",\n [\n # Two levels, uniform.\n pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", "b"]),\n # non-uniform\n pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "b")]),\n # three levels, non-uniform\n pd.MultiIndex.from_product([("A", "B"), ("a", "b", "c"), (0, 1, 2)]),\n pd.MultiIndex.from_tuples(\n [\n ("A", "a", 1),\n ("A", "b", 0),\n ("A", "a", 0),\n ("B", "a", 0),\n ("B", "c", 1),\n ]\n ),\n ],\n )\n @pytest.mark.parametrize("obj", ["series", "frame"])\n def test_unstack(self, data, index, obj):\n data = data[: len(index)]\n if obj == "series":\n ser = pd.Series(data, index=index)\n else:\n ser = pd.DataFrame({"A": data, "B": data}, index=index)\n\n n = index.nlevels\n levels = list(range(n))\n # [0, 1, 2]\n # [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]\n combinations = itertools.chain.from_iterable(\n itertools.permutations(levels, i) for i in range(1, n)\n )\n\n for level in combinations:\n result = ser.unstack(level=level)\n assert all(\n isinstance(result[col].array, type(data)) for col in result.columns\n )\n\n if obj == "series":\n # We should get the same result with to_frame+unstack+droplevel\n df = ser.to_frame()\n\n alt = df.unstack(level=level).droplevel(0, axis=1)\n tm.assert_frame_equal(result, alt)\n\n obj_ser = ser.astype(object)\n\n expected = obj_ser.unstack(level=level, fill_value=data.dtype.na_value)\n if obj == "series":\n assert (expected.dtypes == object).all()\n\n result = result.astype(object)\n tm.assert_frame_equal(result, expected)\n\n def test_ravel(self, data):\n # as long as EA is 1D-only, ravel is a no-op\n result = data.ravel()\n assert type(result) == type(data)\n\n if data.dtype._is_immutable:\n pytest.skip(f"test_ravel assumes mutability and {data.dtype} is immutable")\n\n # Check that we have a view, not a copy\n result[0] = result[1]\n assert data[0] == data[1]\n\n def test_transpose(self, data):\n result = data.transpose()\n assert type(result) == type(data)\n\n # check we get a new object\n assert result is not data\n\n # If we ever _did_ support 2D, shape should be reversed\n assert result.shape == data.shape[::-1]\n\n if data.dtype._is_immutable:\n pytest.skip(\n f"test_transpose assumes mutability and {data.dtype} is immutable"\n )\n\n # Check that we have a view, not a copy\n result[0] = result[1]\n assert data[0] == data[1]\n\n def test_transpose_frame(self, data):\n df = pd.DataFrame({"A": data[:4], "B": data[:4]}, index=["a", "b", "c", "d"])\n result = df.T\n expected = pd.DataFrame(\n {\n "a": type(data)._from_sequence([data[0]] * 2, dtype=data.dtype),\n "b": type(data)._from_sequence([data[1]] * 2, dtype=data.dtype),\n "c": type(data)._from_sequence([data[2]] * 2, dtype=data.dtype),\n "d": type(data)._from_sequence([data[3]] * 2, dtype=data.dtype),\n },\n index=["A", "B"],\n )\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(np.transpose(np.transpose(df)), df)\n tm.assert_frame_equal(np.transpose(np.transpose(df[["A"]])), df[["A"]])\n
.venv\Lib\site-packages\pandas\tests\extension\base\reshaping.py
reshaping.py
Python
13,931
0.95
0.108179
0.087227
awesome-app
992
2025-05-10T17:01:59.130198
BSD-3-Clause
true
e2c140416984c3d27319a757f2db3975
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass BaseSetitemTests:\n @pytest.fixture(\n params=[\n lambda x: x.index,\n lambda x: list(x.index),\n lambda x: slice(None),\n lambda x: slice(0, len(x)),\n lambda x: range(len(x)),\n lambda x: list(range(len(x))),\n lambda x: np.ones(len(x), dtype=bool),\n ],\n ids=[\n "index",\n "list[index]",\n "null_slice",\n "full_slice",\n "range",\n "list(range)",\n "mask",\n ],\n )\n def full_indexer(self, request):\n """\n Fixture for an indexer to pass to obj.loc to get/set the full length of the\n object.\n\n In some cases, assumes that obj.index is the default RangeIndex.\n """\n return request.param\n\n @pytest.fixture(autouse=True)\n def skip_if_immutable(self, dtype, request):\n if dtype._is_immutable:\n node = request.node\n if node.name.split("[")[0] == "test_is_immutable":\n # This fixture is auto-used, but we want to not-skip\n # test_is_immutable.\n return\n\n # When BaseSetitemTests is mixed into ExtensionTests, we only\n # want this fixture to operate on the tests defined in this\n # class/file.\n defined_in = node.function.__qualname__.split(".")[0]\n if defined_in == "BaseSetitemTests":\n pytest.skip("__setitem__ test not applicable with immutable dtype")\n\n def test_is_immutable(self, data):\n if data.dtype._is_immutable:\n with pytest.raises(TypeError):\n data[0] = data[0]\n else:\n data[0] = data[1]\n assert data[0] == data[1]\n\n def test_setitem_scalar_series(self, data, box_in_series):\n if box_in_series:\n data = pd.Series(data)\n data[0] = data[1]\n assert data[0] == data[1]\n\n def test_setitem_sequence(self, data, box_in_series):\n if box_in_series:\n data = pd.Series(data)\n original = data.copy()\n\n data[[0, 1]] = [data[1], data[0]]\n assert data[0] == original[1]\n assert data[1] == original[0]\n\n def test_setitem_sequence_mismatched_length_raises(self, data, as_array):\n ser = pd.Series(data)\n original = ser.copy()\n value = [data[0]]\n if as_array:\n value = data._from_sequence(value, dtype=data.dtype)\n\n xpr = "cannot set using a {} indexer with a different length"\n with pytest.raises(ValueError, match=xpr.format("list-like")):\n ser[[0, 1]] = value\n # Ensure no modifications made before the exception\n tm.assert_series_equal(ser, original)\n\n with pytest.raises(ValueError, match=xpr.format("slice")):\n ser[slice(3)] = value\n tm.assert_series_equal(ser, original)\n\n def test_setitem_empty_indexer(self, data, box_in_series):\n if box_in_series:\n data = pd.Series(data)\n original = data.copy()\n data[np.array([], dtype=int)] = []\n tm.assert_equal(data, original)\n\n def test_setitem_sequence_broadcasts(self, data, box_in_series):\n if box_in_series:\n data = pd.Series(data)\n data[[0, 1]] = data[2]\n assert data[0] == data[2]\n assert data[1] == data[2]\n\n @pytest.mark.parametrize("setter", ["loc", "iloc"])\n def test_setitem_scalar(self, data, setter):\n arr = pd.Series(data)\n setter = getattr(arr, setter)\n setter[0] = data[1]\n assert arr[0] == data[1]\n\n def test_setitem_loc_scalar_mixed(self, data):\n df = pd.DataFrame({"A": np.arange(len(data)), "B": data})\n df.loc[0, "B"] = data[1]\n assert df.loc[0, "B"] == data[1]\n\n def test_setitem_loc_scalar_single(self, data):\n df = pd.DataFrame({"B": data})\n df.loc[10, "B"] = data[1]\n assert df.loc[10, "B"] == data[1]\n\n def test_setitem_loc_scalar_multiple_homogoneous(self, data):\n df = pd.DataFrame({"A": data, "B": data})\n df.loc[10, "B"] = data[1]\n assert df.loc[10, "B"] == data[1]\n\n def test_setitem_iloc_scalar_mixed(self, data):\n df = pd.DataFrame({"A": np.arange(len(data)), "B": data})\n df.iloc[0, 1] = data[1]\n assert df.loc[0, "B"] == data[1]\n\n def test_setitem_iloc_scalar_single(self, data):\n df = pd.DataFrame({"B": data})\n df.iloc[10, 0] = data[1]\n assert df.loc[10, "B"] == data[1]\n\n def test_setitem_iloc_scalar_multiple_homogoneous(self, data):\n df = pd.DataFrame({"A": data, "B": data})\n df.iloc[10, 1] = data[1]\n assert df.loc[10, "B"] == data[1]\n\n @pytest.mark.parametrize(\n "mask",\n [\n np.array([True, True, True, False, False]),\n pd.array([True, True, True, False, False], dtype="boolean"),\n pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),\n ],\n ids=["numpy-array", "boolean-array", "boolean-array-na"],\n )\n def test_setitem_mask(self, data, mask, box_in_series):\n arr = data[:5].copy()\n expected = arr.take([0, 0, 0, 3, 4])\n if box_in_series:\n arr = pd.Series(arr)\n expected = pd.Series(expected)\n arr[mask] = data[0]\n tm.assert_equal(expected, arr)\n\n def test_setitem_mask_raises(self, data, box_in_series):\n # wrong length\n mask = np.array([True, False])\n\n if box_in_series:\n data = pd.Series(data)\n\n with pytest.raises(IndexError, match="wrong length"):\n data[mask] = data[0]\n\n mask = pd.array(mask, dtype="boolean")\n with pytest.raises(IndexError, match="wrong length"):\n data[mask] = data[0]\n\n def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):\n mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")\n mask[:3] = True\n mask[3:5] = pd.NA\n\n if box_in_series:\n data = pd.Series(data)\n\n data[mask] = data[0]\n\n assert (data[:3] == data[0]).all()\n\n @pytest.mark.parametrize(\n "idx",\n [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],\n ids=["list", "integer-array", "numpy-array"],\n )\n def test_setitem_integer_array(self, data, idx, box_in_series):\n arr = data[:5].copy()\n expected = data.take([0, 0, 0, 3, 4])\n\n if box_in_series:\n arr = pd.Series(arr)\n expected = pd.Series(expected)\n\n arr[idx] = arr[0]\n tm.assert_equal(arr, expected)\n\n @pytest.mark.parametrize(\n "idx, box_in_series",\n [\n ([0, 1, 2, pd.NA], False),\n pytest.param(\n [0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948")\n ),\n (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),\n (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),\n ],\n ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],\n )\n def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):\n arr = data.copy()\n\n # TODO(xfail) this raises KeyError about labels not found (it tries label-based)\n # for list of labels with Series\n if box_in_series:\n arr = pd.Series(data, index=[chr(100 + i) for i in range(len(data))])\n\n msg = "Cannot index with an integer indexer containing NA values"\n with pytest.raises(ValueError, match=msg):\n arr[idx] = arr[0]\n\n @pytest.mark.parametrize("as_callable", [True, False])\n @pytest.mark.parametrize("setter", ["loc", None])\n def test_setitem_mask_aligned(self, data, as_callable, setter):\n ser = pd.Series(data)\n mask = np.zeros(len(data), dtype=bool)\n mask[:2] = True\n\n if as_callable:\n mask2 = lambda x: mask\n else:\n mask2 = mask\n\n if setter:\n # loc\n target = getattr(ser, setter)\n else:\n # Series.__setitem__\n target = ser\n\n target[mask2] = data[5:7]\n\n ser[mask2] = data[5:7]\n assert ser[0] == data[5]\n assert ser[1] == data[6]\n\n @pytest.mark.parametrize("setter", ["loc", None])\n def test_setitem_mask_broadcast(self, data, setter):\n ser = pd.Series(data)\n mask = np.zeros(len(data), dtype=bool)\n mask[:2] = True\n\n if setter: # loc\n target = getattr(ser, setter)\n else: # __setitem__\n target = ser\n\n target[mask] = data[10]\n assert ser[0] == data[10]\n assert ser[1] == data[10]\n\n def test_setitem_expand_columns(self, data):\n df = pd.DataFrame({"A": data})\n result = df.copy()\n result["B"] = 1\n expected = pd.DataFrame({"A": data, "B": [1] * len(data)})\n tm.assert_frame_equal(result, expected)\n\n result = df.copy()\n result.loc[:, "B"] = 1\n tm.assert_frame_equal(result, expected)\n\n # overwrite with new type\n result["B"] = data\n expected = pd.DataFrame({"A": data, "B": data})\n tm.assert_frame_equal(result, expected)\n\n def test_setitem_expand_with_extension(self, data):\n df = pd.DataFrame({"A": [1] * len(data)})\n result = df.copy()\n result["B"] = data\n expected = pd.DataFrame({"A": [1] * len(data), "B": data})\n tm.assert_frame_equal(result, expected)\n\n result = df.copy()\n result.loc[:, "B"] = data\n tm.assert_frame_equal(result, expected)\n\n def test_setitem_frame_invalid_length(self, data):\n df = pd.DataFrame({"A": [1] * len(data)})\n xpr = (\n rf"Length of values \({len(data[:5])}\) "\n rf"does not match length of index \({len(df)}\)"\n )\n with pytest.raises(ValueError, match=xpr):\n df["B"] = data[:5]\n\n def test_setitem_tuple_index(self, data):\n ser = pd.Series(data[:2], index=[(0, 0), (0, 1)])\n expected = pd.Series(data.take([1, 1]), index=ser.index)\n ser[(0, 0)] = data[1]\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_slice(self, data, box_in_series):\n arr = data[:5].copy()\n expected = data.take([0, 0, 0, 3, 4])\n if box_in_series:\n arr = pd.Series(arr)\n expected = pd.Series(expected)\n\n arr[:3] = data[0]\n tm.assert_equal(arr, expected)\n\n def test_setitem_loc_iloc_slice(self, data):\n arr = data[:5].copy()\n s = pd.Series(arr, index=["a", "b", "c", "d", "e"])\n expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index)\n\n result = s.copy()\n result.iloc[:3] = data[0]\n tm.assert_equal(result, expected)\n\n result = s.copy()\n result.loc[:"c"] = data[0]\n tm.assert_equal(result, expected)\n\n def test_setitem_slice_mismatch_length_raises(self, data):\n arr = data[:5]\n with pytest.raises(ValueError):\n arr[:1] = arr[:2]\n\n def test_setitem_slice_array(self, data):\n arr = data[:5].copy()\n arr[:5] = data[-5:]\n tm.assert_extension_array_equal(arr, data[-5:])\n\n def test_setitem_scalar_key_sequence_raise(self, data):\n arr = data[:5].copy()\n with pytest.raises(ValueError):\n arr[0] = arr[[0, 1]]\n\n def test_setitem_preserves_views(self, data):\n # GH#28150 setitem shouldn't swap the underlying data\n view1 = data.view()\n view2 = data[:]\n\n data[0] = data[1]\n assert view1[0] == data[1]\n assert view2[0] == data[1]\n\n def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):\n # https://github.com/pandas-dev/pandas/issues/32395\n df = expected = pd.DataFrame({0: pd.Series(data)})\n result = pd.DataFrame(index=df.index)\n\n key = full_indexer(df)\n result.loc[key, 0] = df[0]\n\n tm.assert_frame_equal(result, expected)\n\n def test_setitem_with_expansion_row(self, data, na_value):\n df = pd.DataFrame({"data": data[:1]})\n\n df.loc[1, "data"] = data[1]\n expected = pd.DataFrame({"data": data[:2]})\n tm.assert_frame_equal(df, expected)\n\n # https://github.com/pandas-dev/pandas/issues/47284\n df.loc[2, "data"] = na_value\n expected = pd.DataFrame(\n {"data": pd.Series([data[0], data[1], na_value], dtype=data.dtype)}\n )\n tm.assert_frame_equal(df, expected)\n\n def test_setitem_series(self, data, full_indexer):\n # https://github.com/pandas-dev/pandas/issues/32395\n ser = pd.Series(data, name="data")\n result = pd.Series(index=ser.index, dtype=object, name="data")\n\n # because result has object dtype, the attempt to do setting inplace\n # is successful, and object dtype is retained\n key = full_indexer(ser)\n result.loc[key] = ser\n\n expected = pd.Series(\n data.astype(object), index=ser.index, name="data", dtype=object\n )\n tm.assert_series_equal(result, expected)\n\n def test_setitem_frame_2d_values(self, data):\n # GH#44514\n df = pd.DataFrame({"A": data})\n\n # Avoiding using_array_manager fixture\n # https://github.com/pandas-dev/pandas/pull/44514#discussion_r754002410\n using_array_manager = isinstance(df._mgr, pd.core.internals.ArrayManager)\n using_copy_on_write = pd.options.mode.copy_on_write\n\n blk_data = df._mgr.arrays[0]\n\n orig = df.copy()\n\n df.iloc[:] = df.copy()\n tm.assert_frame_equal(df, orig)\n\n df.iloc[:-1] = df.iloc[:-1].copy()\n tm.assert_frame_equal(df, orig)\n\n df.iloc[:] = df.values\n tm.assert_frame_equal(df, orig)\n if not using_array_manager and not using_copy_on_write:\n # GH#33457 Check that this setting occurred in-place\n # FIXME(ArrayManager): this should work there too\n assert df._mgr.arrays[0] is blk_data\n\n df.iloc[:-1] = df.values[:-1]\n tm.assert_frame_equal(df, orig)\n\n def test_delitem_series(self, data):\n # GH#40763\n ser = pd.Series(data, name="data")\n\n taker = np.arange(len(ser))\n taker = np.delete(taker, 1)\n\n expected = ser[taker]\n del ser[1]\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_invalid(self, data, invalid_scalar):\n msg = "" # messages vary by subclass, so we do not test it\n with pytest.raises((ValueError, TypeError), match=msg):\n data[0] = invalid_scalar\n\n with pytest.raises((ValueError, TypeError), match=msg):\n data[:] = invalid_scalar\n\n def test_setitem_2d_values(self, data):\n # GH50085\n original = data.copy()\n df = pd.DataFrame({"a": data, "b": data})\n df.loc[[0, 1], :] = df.loc[[1, 0], :].values\n assert (df.loc[0, :] == original[1]).all()\n assert (df.loc[1, :] == original[0]).all()\n
.venv\Lib\site-packages\pandas\tests\extension\base\setitem.py
setitem.py
Python
15,075
0.95
0.141907
0.068306
vue-tools
678
2023-10-26T19:19:23.987303
BSD-3-Clause
true
9ac83234d6c3590d5587f716f6746668
"""\nBase test suite for extension arrays.\n\nThese tests are intended for third-party libraries to subclass to validate\nthat their extension arrays and dtypes satisfy the interface. Moving or\nrenaming the tests should not be done lightly.\n\nLibraries are expected to implement a few pytest fixtures to provide data\nfor the tests. The fixtures may be located in either\n\n* The same module as your test class.\n* A ``conftest.py`` in the same directory as your test class.\n\nThe full list of fixtures may be found in the ``conftest.py`` next to this\nfile.\n\n.. code-block:: python\n\n import pytest\n from pandas.tests.extension.base import BaseDtypeTests\n\n\n @pytest.fixture\n def dtype():\n return MyDtype()\n\n\n class TestMyDtype(BaseDtypeTests):\n pass\n\n\nYour class ``TestDtype`` will inherit all the tests defined on\n``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype``\nwherever the test requires it. You're free to implement additional tests.\n\n"""\nfrom pandas.tests.extension.base.accumulate import BaseAccumulateTests\nfrom pandas.tests.extension.base.casting import BaseCastingTests\nfrom pandas.tests.extension.base.constructors import BaseConstructorsTests\nfrom pandas.tests.extension.base.dim2 import ( # noqa: F401\n Dim2CompatTests,\n NDArrayBacked2DTests,\n)\nfrom pandas.tests.extension.base.dtype import BaseDtypeTests\nfrom pandas.tests.extension.base.getitem import BaseGetitemTests\nfrom pandas.tests.extension.base.groupby import BaseGroupbyTests\nfrom pandas.tests.extension.base.index import BaseIndexTests\nfrom pandas.tests.extension.base.interface import BaseInterfaceTests\nfrom pandas.tests.extension.base.io import BaseParsingTests\nfrom pandas.tests.extension.base.methods import BaseMethodsTests\nfrom pandas.tests.extension.base.missing import BaseMissingTests\nfrom pandas.tests.extension.base.ops import ( # noqa: F401\n BaseArithmeticOpsTests,\n BaseComparisonOpsTests,\n BaseOpsUtil,\n BaseUnaryOpsTests,\n)\nfrom pandas.tests.extension.base.printing import BasePrintingTests\nfrom pandas.tests.extension.base.reduce import BaseReduceTests\nfrom pandas.tests.extension.base.reshaping import BaseReshapingTests\nfrom pandas.tests.extension.base.setitem import BaseSetitemTests\n\n\n# One test class that you can inherit as an alternative to inheriting all the\n# test classes above.\n# Note 1) this excludes Dim2CompatTests and NDArrayBacked2DTests.\n# Note 2) this uses BaseReduceTests and and _not_ BaseBooleanReduceTests,\n# BaseNoReduceTests, or BaseNumericReduceTests\nclass ExtensionTests(\n BaseAccumulateTests,\n BaseCastingTests,\n BaseConstructorsTests,\n BaseDtypeTests,\n BaseGetitemTests,\n BaseGroupbyTests,\n BaseIndexTests,\n BaseInterfaceTests,\n BaseParsingTests,\n BaseMethodsTests,\n BaseMissingTests,\n BaseArithmeticOpsTests,\n BaseComparisonOpsTests,\n BaseUnaryOpsTests,\n BasePrintingTests,\n BaseReduceTests,\n BaseReshapingTests,\n BaseSetitemTests,\n Dim2CompatTests,\n):\n pass\n\n\ndef __getattr__(name: str):\n import warnings\n\n if name == "BaseNoReduceTests":\n warnings.warn(\n "BaseNoReduceTests is deprecated and will be removed in a "\n "future version. Use BaseReduceTests and override "\n "`_supports_reduction` instead.",\n FutureWarning,\n )\n from pandas.tests.extension.base.reduce import BaseNoReduceTests\n\n return BaseNoReduceTests\n\n elif name == "BaseNumericReduceTests":\n warnings.warn(\n "BaseNumericReduceTests is deprecated and will be removed in a "\n "future version. Use BaseReduceTests and override "\n "`_supports_reduction` instead.",\n FutureWarning,\n )\n from pandas.tests.extension.base.reduce import BaseNumericReduceTests\n\n return BaseNumericReduceTests\n\n elif name == "BaseBooleanReduceTests":\n warnings.warn(\n "BaseBooleanReduceTests is deprecated and will be removed in a "\n "future version. Use BaseReduceTests and override "\n "`_supports_reduction` instead.",\n FutureWarning,\n )\n from pandas.tests.extension.base.reduce import BaseBooleanReduceTests\n\n return BaseBooleanReduceTests\n\n raise AttributeError(\n f"module 'pandas.tests.extension.base' has no attribute '{name}'"\n )\n
.venv\Lib\site-packages\pandas\tests\extension\base\__init__.py
__init__.py
Python
4,353
0.95
0.091603
0.065421
python-kit
481
2024-09-02T01:19:56.554840
BSD-3-Clause
true
b92cd1e28a7fbdf863abe0e1dc8b7706
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\accumulate.cpython-313.pyc
accumulate.cpython-313.pyc
Other
2,425
0.8
0.037037
0
python-kit
888
2025-03-12T11:38:46.554293
GPL-3.0
true
a3242bfb6f675fe8d8c9ea75c2c2c804
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\base.cpython-313.pyc
base.cpython-313.pyc
Other
436
0.8
0
0
awesome-app
154
2023-10-23T22:13:50.587015
MIT
true
63d8da3240ed5856a34c9a0cb79d8030
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\casting.cpython-313.pyc
casting.cpython-313.pyc
Other
6,291
0.8
0
0.020408
vue-tools
433
2024-05-28T05:56:43.583845
GPL-3.0
true
ebef2f9eae3cf2dcce3237900eb47485
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\constructors.cpython-313.pyc
constructors.cpython-313.pyc
Other
10,478
0.8
0
0
awesome-app
680
2025-06-19T16:14:43.811561
Apache-2.0
true
f3a7a1c82a60190f5dc076dbcc363aac
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\dim2.cpython-313.pyc
dim2.cpython-313.pyc
Other
19,035
0.8
0.021739
0
python-kit
944
2025-05-24T12:39:26.626394
BSD-3-Clause
true
94eaebd9828855bc6cb1a13963ed59e2
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\dtype.cpython-313.pyc
dtype.cpython-313.pyc
Other
7,752
0.95
0.052632
0
python-kit
339
2024-05-09T02:40:41.556494
MIT
true
2b2297e8664d517fbfb03b7df4c203fb
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\getitem.cpython-313.pyc
getitem.cpython-313.pyc
Other
27,077
0.8
0.007463
0
vue-tools
819
2024-01-25T22:31:21.224619
Apache-2.0
true
48110a2a1d093f29f7d4add40823881a
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\groupby.cpython-313.pyc
groupby.cpython-313.pyc
Other
10,228
0.95
0.010309
0
node-utils
862
2025-05-07T08:26:13.645108
Apache-2.0
true
57d1e0bb6c9e8d5cd90b4059f9d78442
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\index.cpython-313.pyc
index.cpython-313.pyc
Other
1,454
0.8
0.285714
0
awesome-app
380
2024-12-23T04:54:42.789841
GPL-3.0
true
339bc6860164c91e9d9bdacb113fad60
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\interface.cpython-313.pyc
interface.cpython-313.pyc
Other
8,702
0.8
0
0
python-kit
464
2024-09-25T06:39:35.671710
GPL-3.0
true
eba8e11f4a7e267ade87fda28d237179
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\io.cpython-313.pyc
io.cpython-313.pyc
Other
2,433
0.8
0
0
awesome-app
925
2025-05-02T22:13:56.469661
Apache-2.0
true
fa375c6e3f1ad905f0d115f56cfbbcf2
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\methods.cpython-313.pyc
methods.cpython-313.pyc
Other
43,208
0.8
0.003003
0.003106
awesome-app
66
2023-09-02T13:46:25.695412
GPL-3.0
true
77c54be80d7e85fa447ba1f6f6e4e47e
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\missing.cpython-313.pyc
missing.cpython-313.pyc
Other
11,036
0.8
0
0
python-kit
363
2023-10-29T17:20:30.971104
Apache-2.0
true
615bc77fd3c9117f776a00f7ceba28a2
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\ops.cpython-313.pyc
ops.cpython-313.pyc
Other
14,099
0.95
0.009615
0.039604
vue-tools
264
2024-08-29T15:32:17.844032
BSD-3-Clause
true
ce4455d06444390cf588beb8c5443c5f
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\printing.cpython-313.pyc
printing.cpython-313.pyc
Other
2,792
0.8
0
0
python-kit
397
2024-06-22T06:09:11.930211
BSD-3-Clause
true
f9985eaaa90d7293e6692b2e0494cff8
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\reduce.cpython-313.pyc
reduce.cpython-313.pyc
Other
7,239
0.8
0.052632
0
react-lib
994
2023-11-14T16:29:53.282118
GPL-3.0
true
741be725d5d18cfe3a9895eeea58a170
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\reshaping.cpython-313.pyc
reshaping.cpython-313.pyc
Other
22,896
0.8
0.005882
0
awesome-app
224
2024-06-20T13:03:45.733887
Apache-2.0
true
4f87a3f89364eda062182143060659c7
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\setitem.cpython-313.pyc
setitem.cpython-313.pyc
Other
27,021
0.95
0.009009
0
node-utils
667
2023-09-28T12:58:41.885961
MIT
true
f9b27a11942bcfae136ef7d04ad594e0
\n\n
.venv\Lib\site-packages\pandas\tests\extension\base\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
4,188
0.95
0.117647
0.039216
awesome-app
360
2025-01-01T22:37:08.312659
MIT
true
68c1849ce203058ff3c1b8db36953450
from __future__ import annotations\n\nimport datetime as dt\nfrom typing import (\n TYPE_CHECKING,\n Any,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas.core.dtypes.dtypes import register_extension_dtype\n\nfrom pandas.api.extensions import (\n ExtensionArray,\n ExtensionDtype,\n)\nfrom pandas.api.types import pandas_dtype\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from pandas._typing import (\n Dtype,\n PositionalIndexer,\n )\n\n\n@register_extension_dtype\nclass DateDtype(ExtensionDtype):\n @property\n def type(self):\n return dt.date\n\n @property\n def name(self):\n return "DateDtype"\n\n @classmethod\n def construct_from_string(cls, string: str):\n if not isinstance(string, str):\n raise TypeError(\n f"'construct_from_string' expects a string, got {type(string)}"\n )\n\n if string == cls.__name__:\n return cls()\n else:\n raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")\n\n @classmethod\n def construct_array_type(cls):\n return DateArray\n\n @property\n def na_value(self):\n return dt.date.min\n\n def __repr__(self) -> str:\n return self.name\n\n\nclass DateArray(ExtensionArray):\n def __init__(\n self,\n dates: (\n dt.date\n | Sequence[dt.date]\n | tuple[np.ndarray, np.ndarray, np.ndarray]\n | np.ndarray\n ),\n ) -> None:\n if isinstance(dates, dt.date):\n self._year = np.array([dates.year])\n self._month = np.array([dates.month])\n self._day = np.array([dates.year])\n return\n\n ldates = len(dates)\n if isinstance(dates, list):\n # pre-allocate the arrays since we know the size before hand\n self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999)\n self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)\n self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)\n # populate them\n for i, (y, m, d) in enumerate(\n (date.year, date.month, date.day) for date in dates\n ):\n self._year[i] = y\n self._month[i] = m\n self._day[i] = d\n\n elif isinstance(dates, tuple):\n # only support triples\n if ldates != 3:\n raise ValueError("only triples are valid")\n # check if all elements have the same type\n if any(not isinstance(x, np.ndarray) for x in dates):\n raise TypeError("invalid type")\n ly, lm, ld = (len(cast(np.ndarray, d)) for d in dates)\n if not ly == lm == ld:\n raise ValueError(\n f"tuple members must have the same length: {(ly, lm, ld)}"\n )\n self._year = dates[0].astype(np.uint16)\n self._month = dates[1].astype(np.uint8)\n self._day = dates[2].astype(np.uint8)\n\n elif isinstance(dates, np.ndarray) and dates.dtype == "U10":\n self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999)\n self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)\n self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)\n\n # error: "object_" object is not iterable\n obj = np.char.split(dates, sep="-")\n for (i,), (y, m, d) in np.ndenumerate(obj): # type: ignore[misc]\n self._year[i] = int(y)\n self._month[i] = int(m)\n self._day[i] = int(d)\n\n else:\n raise TypeError(f"{type(dates)} is not supported")\n\n @property\n def dtype(self) -> ExtensionDtype:\n return DateDtype()\n\n def astype(self, dtype, copy=True):\n dtype = pandas_dtype(dtype)\n\n if isinstance(dtype, DateDtype):\n data = self.copy() if copy else self\n else:\n data = self.to_numpy(dtype=dtype, copy=copy, na_value=dt.date.min)\n\n return data\n\n @property\n def nbytes(self) -> int:\n return self._year.nbytes + self._month.nbytes + self._day.nbytes\n\n def __len__(self) -> int:\n return len(self._year) # all 3 arrays are enforced to have the same length\n\n def __getitem__(self, item: PositionalIndexer):\n if isinstance(item, int):\n return dt.date(self._year[item], self._month[item], self._day[item])\n else:\n raise NotImplementedError("only ints are supported as indexes")\n\n def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None:\n if not isinstance(key, int):\n raise NotImplementedError("only ints are supported as indexes")\n\n if not isinstance(value, dt.date):\n raise TypeError("you can only set datetime.date types")\n\n self._year[key] = value.year\n self._month[key] = value.month\n self._day[key] = value.day\n\n def __repr__(self) -> str:\n return f"DateArray{list(zip(self._year, self._month, self._day))}"\n\n def copy(self) -> DateArray:\n return DateArray((self._year.copy(), self._month.copy(), self._day.copy()))\n\n def isna(self) -> np.ndarray:\n return np.logical_and(\n np.logical_and(\n self._year == dt.date.min.year, self._month == dt.date.min.month\n ),\n self._day == dt.date.min.day,\n )\n\n @classmethod\n def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False):\n if isinstance(scalars, dt.date):\n raise TypeError\n elif isinstance(scalars, DateArray):\n if dtype is not None:\n return scalars.astype(dtype, copy=copy)\n if copy:\n return scalars.copy()\n return scalars[:]\n elif isinstance(scalars, np.ndarray):\n scalars = scalars.astype("U10") # 10 chars for yyyy-mm-dd\n return DateArray(scalars)\n
.venv\Lib\site-packages\pandas\tests\extension\date\array.py
array.py
Python
5,971
0.95
0.223404
0.03268
awesome-app
955
2024-03-21T09:08:06.992892
BSD-3-Clause
true
b4ad43a6bec8bc714e8fcc035fe95e27
from pandas.tests.extension.date.array import (\n DateArray,\n DateDtype,\n)\n\n__all__ = ["DateArray", "DateDtype"]\n
.venv\Lib\site-packages\pandas\tests\extension\date\__init__.py
__init__.py
Python
118
0.85
0
0
vue-tools
839
2025-04-09T20:52:44.565028
Apache-2.0
true
7a56c0a417a82eb1a44a646b278cbbdd
\n\n
.venv\Lib\site-packages\pandas\tests\extension\date\__pycache__\array.cpython-313.pyc
array.cpython-313.pyc
Other
11,105
0.8
0
0.031746
awesome-app
923
2023-08-16T22:37:46.395736
Apache-2.0
true
8f0ed4ee99caa8bbec296e4435ed6ce4
\n\n
.venv\Lib\site-packages\pandas\tests\extension\date\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
333
0.7
0
0
react-lib
63
2024-09-17T11:32:09.783923
BSD-3-Clause
true
a3a71b05f5eada46793d54d16a50eed4
from __future__ import annotations\n\nimport decimal\nimport numbers\nimport sys\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.core.dtypes.base import ExtensionDtype\nfrom pandas.core.dtypes.common import (\n is_dtype_equal,\n is_float,\n is_integer,\n pandas_dtype,\n)\n\nimport pandas as pd\nfrom pandas.api.extensions import (\n no_default,\n register_extension_dtype,\n)\nfrom pandas.api.types import (\n is_list_like,\n is_scalar,\n)\nfrom pandas.core import arraylike\nfrom pandas.core.algorithms import value_counts_internal as value_counts\nfrom pandas.core.arraylike import OpsMixin\nfrom pandas.core.arrays import (\n ExtensionArray,\n ExtensionScalarOpsMixin,\n)\nfrom pandas.core.indexers import check_array_indexer\n\nif TYPE_CHECKING:\n from pandas._typing import type_t\n\n\n@register_extension_dtype\nclass DecimalDtype(ExtensionDtype):\n type = decimal.Decimal\n name = "decimal"\n na_value = decimal.Decimal("NaN")\n _metadata = ("context",)\n\n def __init__(self, context=None) -> None:\n self.context = context or decimal.getcontext()\n\n def __repr__(self) -> str:\n return f"DecimalDtype(context={self.context})"\n\n @classmethod\n def construct_array_type(cls) -> type_t[DecimalArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n return DecimalArray\n\n @property\n def _is_numeric(self) -> bool:\n return True\n\n\nclass DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray):\n __array_priority__ = 1000\n\n def __init__(self, values, dtype=None, copy=False, context=None) -> None:\n for i, val in enumerate(values):\n if is_float(val) or is_integer(val):\n if np.isnan(val):\n values[i] = DecimalDtype.na_value\n else:\n # error: Argument 1 has incompatible type "float | int |\n # integer[Any]"; expected "Decimal | float | str | tuple[int,\n # Sequence[int], int]"\n values[i] = DecimalDtype.type(val) # type: ignore[arg-type]\n elif not isinstance(val, decimal.Decimal):\n raise TypeError("All values must be of type " + str(decimal.Decimal))\n values = np.asarray(values, dtype=object)\n\n self._data = values\n # Some aliases for common attribute names to ensure pandas supports\n # these\n self._items = self.data = self._data\n # those aliases are currently not working due to assumptions\n # in internal code (GH-20735)\n # self._values = self.values = self.data\n self._dtype = DecimalDtype(context)\n\n @property\n def dtype(self):\n return self._dtype\n\n @classmethod\n def _from_sequence(cls, scalars, *, dtype=None, copy=False):\n return cls(scalars)\n\n @classmethod\n def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):\n return cls._from_sequence(\n [decimal.Decimal(x) for x in strings], dtype=dtype, copy=copy\n )\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls(values)\n\n _HANDLED_TYPES = (decimal.Decimal, numbers.Number, np.ndarray)\n\n def to_numpy(\n self,\n dtype=None,\n copy: bool = False,\n na_value: object = no_default,\n decimals=None,\n ) -> np.ndarray:\n result = np.asarray(self, dtype=dtype)\n if decimals is not None:\n result = np.asarray([round(x, decimals) for x in result])\n return result\n\n def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):\n #\n if not all(\n isinstance(t, self._HANDLED_TYPES + (DecimalArray,)) for t in inputs\n ):\n return NotImplemented\n\n result = arraylike.maybe_dispatch_ufunc_to_dunder_op(\n self, ufunc, method, *inputs, **kwargs\n )\n if result is not NotImplemented:\n # e.g. test_array_ufunc_series_scalar_other\n return result\n\n if "out" in kwargs:\n return arraylike.dispatch_ufunc_with_out(\n self, ufunc, method, *inputs, **kwargs\n )\n\n inputs = tuple(x._data if isinstance(x, DecimalArray) else x for x in inputs)\n result = getattr(ufunc, method)(*inputs, **kwargs)\n\n if method == "reduce":\n result = arraylike.dispatch_reduction_ufunc(\n self, ufunc, method, *inputs, **kwargs\n )\n if result is not NotImplemented:\n return result\n\n def reconstruct(x):\n if isinstance(x, (decimal.Decimal, numbers.Number)):\n return x\n else:\n return type(self)._from_sequence(x, dtype=self.dtype)\n\n if ufunc.nout > 1:\n return tuple(reconstruct(x) for x in result)\n else:\n return reconstruct(result)\n\n def __getitem__(self, item):\n if isinstance(item, numbers.Integral):\n return self._data[item]\n else:\n # array, slice.\n item = pd.api.indexers.check_array_indexer(self, item)\n return type(self)(self._data[item])\n\n def take(self, indexer, allow_fill=False, fill_value=None):\n from pandas.api.extensions import take\n\n data = self._data\n if allow_fill and fill_value is None:\n fill_value = self.dtype.na_value\n\n result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)\n return self._from_sequence(result, dtype=self.dtype)\n\n def copy(self):\n return type(self)(self._data.copy(), dtype=self.dtype)\n\n def astype(self, dtype, copy=True):\n if is_dtype_equal(dtype, self._dtype):\n if not copy:\n return self\n dtype = pandas_dtype(dtype)\n if isinstance(dtype, type(self.dtype)):\n return type(self)(self._data, copy=copy, context=dtype.context)\n\n return super().astype(dtype, copy=copy)\n\n def __setitem__(self, key, value) -> None:\n if is_list_like(value):\n if is_scalar(key):\n raise ValueError("setting an array element with a sequence.")\n value = [decimal.Decimal(v) for v in value]\n else:\n value = decimal.Decimal(value)\n\n key = check_array_indexer(self, key)\n self._data[key] = value\n\n def __len__(self) -> int:\n return len(self._data)\n\n def __contains__(self, item) -> bool | np.bool_:\n if not isinstance(item, decimal.Decimal):\n return False\n elif item.is_nan():\n return self.isna().any()\n else:\n return super().__contains__(item)\n\n @property\n def nbytes(self) -> int:\n n = len(self)\n if n:\n return n * sys.getsizeof(self[0])\n return 0\n\n def isna(self):\n return np.array([x.is_nan() for x in self._data], dtype=bool)\n\n @property\n def _na_value(self):\n return decimal.Decimal("NaN")\n\n def _formatter(self, boxed=False):\n if boxed:\n return "Decimal: {}".format\n return repr\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n return cls(np.concatenate([x._data for x in to_concat]))\n\n def _reduce(\n self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs\n ):\n if skipna and self.isna().any():\n # If we don't have any NAs, we can ignore skipna\n other = self[~self.isna()]\n result = other._reduce(name, **kwargs)\n elif name == "sum" and len(self) == 0:\n # GH#29630 avoid returning int 0 or np.bool_(False) on old numpy\n result = decimal.Decimal(0)\n else:\n try:\n op = getattr(self.data, name)\n except AttributeError as err:\n raise NotImplementedError(\n f"decimal does not support the {name} operation"\n ) from err\n result = op(axis=0)\n\n if keepdims:\n return type(self)([result])\n else:\n return result\n\n def _cmp_method(self, other, op):\n # For use with OpsMixin\n def convert_values(param):\n if isinstance(param, ExtensionArray) or is_list_like(param):\n ovalues = param\n else:\n # Assume it's an object\n ovalues = [param] * len(self)\n return ovalues\n\n lvalues = self\n rvalues = convert_values(other)\n\n # If the operator is not defined for the underlying objects,\n # a TypeError should be raised\n res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]\n\n return np.asarray(res, dtype=bool)\n\n def value_counts(self, dropna: bool = True):\n return value_counts(self.to_numpy(), dropna=dropna)\n\n # We override fillna here to simulate a 3rd party EA that has done so. This\n # lets us test the deprecation telling authors to implement _pad_or_backfill\n # Simulate a 3rd-party EA that has not yet updated to include a "copy"\n # keyword in its fillna method.\n # error: Signature of "fillna" incompatible with supertype "ExtensionArray"\n def fillna( # type: ignore[override]\n self,\n value=None,\n method=None,\n limit: int | None = None,\n ):\n return super().fillna(value=value, method=method, limit=limit, copy=True)\n\n\ndef to_decimal(values, context=None):\n return DecimalArray([decimal.Decimal(x) for x in values], context=context)\n\n\ndef make_data():\n return [decimal.Decimal(val) for val in np.random.default_rng(2).random(100)]\n\n\nDecimalArray._add_arithmetic_ops()\n
.venv\Lib\site-packages\pandas\tests\extension\decimal\array.py
array.py
Python
9,694
0.95
0.234727
0.087302
react-lib
724
2024-07-05T04:29:37.949433
BSD-3-Clause
true
7a886b8464e43f8e0281afb50c67f1ee
from __future__ import annotations\n\nimport decimal\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_version_gt2\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.extension import base\nfrom pandas.tests.extension.decimal.array import (\n DecimalArray,\n DecimalDtype,\n make_data,\n to_decimal,\n)\n\n\n@pytest.fixture\ndef dtype():\n return DecimalDtype()\n\n\n@pytest.fixture\ndef data():\n return DecimalArray(make_data())\n\n\n@pytest.fixture\ndef data_for_twos():\n return DecimalArray([decimal.Decimal(2) for _ in range(100)])\n\n\n@pytest.fixture\ndef data_missing():\n return DecimalArray([decimal.Decimal("NaN"), decimal.Decimal(1)])\n\n\n@pytest.fixture\ndef data_for_sorting():\n return DecimalArray(\n [decimal.Decimal("1"), decimal.Decimal("2"), decimal.Decimal("0")]\n )\n\n\n@pytest.fixture\ndef data_missing_for_sorting():\n return DecimalArray(\n [decimal.Decimal("1"), decimal.Decimal("NaN"), decimal.Decimal("0")]\n )\n\n\n@pytest.fixture\ndef na_cmp():\n return lambda x, y: x.is_nan() and y.is_nan()\n\n\n@pytest.fixture\ndef data_for_grouping():\n b = decimal.Decimal("1.0")\n a = decimal.Decimal("0.0")\n c = decimal.Decimal("2.0")\n na = decimal.Decimal("NaN")\n return DecimalArray([b, b, na, na, a, a, b, c])\n\n\nclass TestDecimalArray(base.ExtensionTests):\n def _get_expected_exception(\n self, op_name: str, obj, other\n ) -> type[Exception] | tuple[type[Exception], ...] | None:\n return None\n\n def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:\n return True\n\n def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):\n if op_name == "count":\n return super().check_reduce(ser, op_name, skipna)\n else:\n result = getattr(ser, op_name)(skipna=skipna)\n expected = getattr(np.asarray(ser), op_name)()\n tm.assert_almost_equal(result, expected)\n\n def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):\n if all_numeric_reductions in ["kurt", "skew", "sem", "median"]:\n mark = pytest.mark.xfail(raises=NotImplementedError)\n request.applymarker(mark)\n super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)\n\n def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):\n op_name = all_numeric_reductions\n if op_name in ["skew", "median"]:\n mark = pytest.mark.xfail(raises=NotImplementedError)\n request.applymarker(mark)\n\n return super().test_reduce_frame(data, all_numeric_reductions, skipna)\n\n def test_compare_scalar(self, data, comparison_op):\n ser = pd.Series(data)\n self._compare_other(ser, data, comparison_op, 0.5)\n\n def test_compare_array(self, data, comparison_op):\n ser = pd.Series(data)\n\n alter = np.random.default_rng(2).choice([-1, 0, 1], len(data))\n # Randomly double, halve or keep same value\n other = pd.Series(data) * [decimal.Decimal(pow(2.0, i)) for i in alter]\n self._compare_other(ser, data, comparison_op, other)\n\n def test_arith_series_with_array(self, data, all_arithmetic_operators):\n op_name = all_arithmetic_operators\n ser = pd.Series(data)\n\n context = decimal.getcontext()\n divbyzerotrap = context.traps[decimal.DivisionByZero]\n invalidoptrap = context.traps[decimal.InvalidOperation]\n context.traps[decimal.DivisionByZero] = 0\n context.traps[decimal.InvalidOperation] = 0\n\n # Decimal supports ops with int, but not float\n other = pd.Series([int(d * 100) for d in data])\n self.check_opname(ser, op_name, other)\n\n if "mod" not in op_name:\n self.check_opname(ser, op_name, ser * 2)\n\n self.check_opname(ser, op_name, 0)\n self.check_opname(ser, op_name, 5)\n context.traps[decimal.DivisionByZero] = divbyzerotrap\n context.traps[decimal.InvalidOperation] = invalidoptrap\n\n def test_fillna_frame(self, data_missing):\n msg = "ExtensionArray.fillna added a 'copy' keyword"\n with tm.assert_produces_warning(\n DeprecationWarning, match=msg, check_stacklevel=False\n ):\n super().test_fillna_frame(data_missing)\n\n def test_fillna_limit_pad(self, data_missing):\n msg = "ExtensionArray.fillna 'method' keyword is deprecated"\n with tm.assert_produces_warning(\n DeprecationWarning,\n match=msg,\n check_stacklevel=False,\n raise_on_extra_warnings=False,\n ):\n super().test_fillna_limit_pad(data_missing)\n\n msg = "The 'method' keyword in DecimalArray.fillna is deprecated"\n with tm.assert_produces_warning(\n FutureWarning,\n match=msg,\n check_stacklevel=False,\n raise_on_extra_warnings=False,\n ):\n super().test_fillna_limit_pad(data_missing)\n\n @pytest.mark.parametrize(\n "limit_area, input_ilocs, expected_ilocs",\n [\n ("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]),\n ("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]),\n ("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]),\n ("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]),\n ("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]),\n ("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]),\n ("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]),\n ("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]),\n ],\n )\n def test_ffill_limit_area(\n self, data_missing, limit_area, input_ilocs, expected_ilocs\n ):\n # GH#56616\n msg = "ExtensionArray.fillna 'method' keyword is deprecated"\n with tm.assert_produces_warning(\n DeprecationWarning,\n match=msg,\n check_stacklevel=False,\n raise_on_extra_warnings=False,\n ):\n msg = "DecimalArray does not implement limit_area"\n with pytest.raises(NotImplementedError, match=msg):\n super().test_ffill_limit_area(\n data_missing, limit_area, input_ilocs, expected_ilocs\n )\n\n def test_fillna_limit_backfill(self, data_missing):\n msg = "Series.fillna with 'method' is deprecated"\n with tm.assert_produces_warning(\n FutureWarning,\n match=msg,\n check_stacklevel=False,\n raise_on_extra_warnings=False,\n ):\n super().test_fillna_limit_backfill(data_missing)\n\n msg = "ExtensionArray.fillna 'method' keyword is deprecated"\n with tm.assert_produces_warning(\n DeprecationWarning,\n match=msg,\n check_stacklevel=False,\n raise_on_extra_warnings=False,\n ):\n super().test_fillna_limit_backfill(data_missing)\n\n msg = "The 'method' keyword in DecimalArray.fillna is deprecated"\n with tm.assert_produces_warning(\n FutureWarning,\n match=msg,\n check_stacklevel=False,\n raise_on_extra_warnings=False,\n ):\n super().test_fillna_limit_backfill(data_missing)\n\n def test_fillna_no_op_returns_copy(self, data):\n msg = "|".join(\n [\n "ExtensionArray.fillna 'method' keyword is deprecated",\n "The 'method' keyword in DecimalArray.fillna is deprecated",\n ]\n )\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False\n ):\n super().test_fillna_no_op_returns_copy(data)\n\n def test_fillna_series(self, data_missing):\n msg = "ExtensionArray.fillna added a 'copy' keyword"\n with tm.assert_produces_warning(\n DeprecationWarning, match=msg, check_stacklevel=False\n ):\n super().test_fillna_series(data_missing)\n\n def test_fillna_series_method(self, data_missing, fillna_method):\n msg = "|".join(\n [\n "ExtensionArray.fillna 'method' keyword is deprecated",\n "The 'method' keyword in DecimalArray.fillna is deprecated",\n ]\n )\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False\n ):\n super().test_fillna_series_method(data_missing, fillna_method)\n\n def test_fillna_copy_frame(self, data_missing, using_copy_on_write):\n warn = DeprecationWarning if not using_copy_on_write else None\n msg = "ExtensionArray.fillna added a 'copy' keyword"\n with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):\n super().test_fillna_copy_frame(data_missing)\n\n def test_fillna_copy_series(self, data_missing, using_copy_on_write):\n warn = DeprecationWarning if not using_copy_on_write else None\n msg = "ExtensionArray.fillna added a 'copy' keyword"\n with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):\n super().test_fillna_copy_series(data_missing)\n\n @pytest.mark.parametrize("dropna", [True, False])\n def test_value_counts(self, all_data, dropna, request):\n all_data = all_data[:10]\n if dropna:\n other = np.array(all_data[~all_data.isna()])\n else:\n other = all_data\n\n vcs = pd.Series(all_data).value_counts(dropna=dropna)\n vcs_ex = pd.Series(other).value_counts(dropna=dropna)\n\n with decimal.localcontext() as ctx:\n # avoid raising when comparing Decimal("NAN") < Decimal(2)\n ctx.traps[decimal.InvalidOperation] = False\n\n result = vcs.sort_index()\n expected = vcs_ex.sort_index()\n\n tm.assert_series_equal(result, expected)\n\n def test_series_repr(self, data):\n # Overriding this base test to explicitly test that\n # the custom _formatter is used\n ser = pd.Series(data)\n assert data.dtype.name in repr(ser)\n assert "Decimal: " in repr(ser)\n\n @pytest.mark.xfail(reason="Inconsistent array-vs-scalar behavior")\n @pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs])\n def test_unary_ufunc_dunder_equivalence(self, data, ufunc):\n super().test_unary_ufunc_dunder_equivalence(data, ufunc)\n\n def test_array_interface_copy(self, data):\n result_copy1 = np.array(data, copy=True)\n result_copy2 = np.array(data, copy=True)\n assert not np.may_share_memory(result_copy1, result_copy2)\n if not np_version_gt2:\n # copy=False semantics are only supported in NumPy>=2.\n return\n\n try:\n result_nocopy1 = np.array(data, copy=False)\n except ValueError:\n # An error is always acceptable for `copy=False`\n return\n\n result_nocopy2 = np.array(data, copy=False)\n # If copy=False was given and did not raise, these must share the same data\n assert np.may_share_memory(result_nocopy1, result_nocopy2)\n\n\ndef test_take_na_value_other_decimal():\n arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")])\n result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0"))\n expected = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("-1.0")])\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_series_constructor_coerce_data_to_extension_dtype():\n dtype = DecimalDtype()\n ser = pd.Series([0, 1, 2], dtype=dtype)\n\n arr = DecimalArray(\n [decimal.Decimal(0), decimal.Decimal(1), decimal.Decimal(2)],\n dtype=dtype,\n )\n exp = pd.Series(arr)\n tm.assert_series_equal(ser, exp)\n\n\ndef test_series_constructor_with_dtype():\n arr = DecimalArray([decimal.Decimal("10.0")])\n result = pd.Series(arr, dtype=DecimalDtype())\n expected = pd.Series(arr)\n tm.assert_series_equal(result, expected)\n\n result = pd.Series(arr, dtype="int64")\n expected = pd.Series([10])\n tm.assert_series_equal(result, expected)\n\n\ndef test_dataframe_constructor_with_dtype():\n arr = DecimalArray([decimal.Decimal("10.0")])\n\n result = pd.DataFrame({"A": arr}, dtype=DecimalDtype())\n expected = pd.DataFrame({"A": arr})\n tm.assert_frame_equal(result, expected)\n\n arr = DecimalArray([decimal.Decimal("10.0")])\n result = pd.DataFrame({"A": arr}, dtype="int64")\n expected = pd.DataFrame({"A": [10]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("frame", [True, False])\ndef test_astype_dispatches(frame):\n # This is a dtype-specific test that ensures Series[decimal].astype\n # gets all the way through to ExtensionArray.astype\n # Designing a reliable smoke test that works for arbitrary data types\n # is difficult.\n data = pd.Series(DecimalArray([decimal.Decimal(2)]), name="a")\n ctx = decimal.Context()\n ctx.prec = 5\n\n if frame:\n data = data.to_frame()\n\n result = data.astype(DecimalDtype(ctx))\n\n if frame:\n result = result["a"]\n\n assert result.dtype.context.prec == ctx.prec\n\n\nclass DecimalArrayWithoutFromSequence(DecimalArray):\n """Helper class for testing error handling in _from_sequence."""\n\n @classmethod\n def _from_sequence(cls, scalars, *, dtype=None, copy=False):\n raise KeyError("For the test")\n\n\nclass DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence):\n @classmethod\n def _create_arithmetic_method(cls, op):\n return cls._create_method(op, coerce_to_dtype=False)\n\n\nDecimalArrayWithoutCoercion._add_arithmetic_ops()\n\n\ndef test_combine_from_sequence_raises(monkeypatch):\n # https://github.com/pandas-dev/pandas/issues/22850\n cls = DecimalArrayWithoutFromSequence\n\n @classmethod\n def construct_array_type(cls):\n return DecimalArrayWithoutFromSequence\n\n monkeypatch.setattr(DecimalDtype, "construct_array_type", construct_array_type)\n\n arr = cls([decimal.Decimal("1.0"), decimal.Decimal("2.0")])\n ser = pd.Series(arr)\n result = ser.combine(ser, operator.add)\n\n # note: object dtype\n expected = pd.Series(\n [decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object"\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "class_", [DecimalArrayWithoutFromSequence, DecimalArrayWithoutCoercion]\n)\ndef test_scalar_ops_from_sequence_raises(class_):\n # op(EA, EA) should return an EA, or an ndarray if it's not possible\n # to return an EA with the return values.\n arr = class_([decimal.Decimal("1.0"), decimal.Decimal("2.0")])\n result = arr + arr\n expected = np.array(\n [decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object"\n )\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "reverse, expected_div, expected_mod",\n [(False, [0, 1, 1, 2], [1, 0, 1, 0]), (True, [2, 1, 0, 0], [0, 0, 2, 2])],\n)\ndef test_divmod_array(reverse, expected_div, expected_mod):\n # https://github.com/pandas-dev/pandas/issues/22930\n arr = to_decimal([1, 2, 3, 4])\n if reverse:\n div, mod = divmod(2, arr)\n else:\n div, mod = divmod(arr, 2)\n expected_div = to_decimal(expected_div)\n expected_mod = to_decimal(expected_mod)\n\n tm.assert_extension_array_equal(div, expected_div)\n tm.assert_extension_array_equal(mod, expected_mod)\n\n\ndef test_ufunc_fallback(data):\n a = data[:5]\n s = pd.Series(a, index=range(3, 8))\n result = np.abs(s)\n expected = pd.Series(np.abs(a), index=range(3, 8))\n tm.assert_series_equal(result, expected)\n\n\ndef test_array_ufunc():\n a = to_decimal([1, 2, 3])\n result = np.exp(a)\n expected = to_decimal(np.exp(a._data))\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_array_ufunc_series():\n a = to_decimal([1, 2, 3])\n s = pd.Series(a)\n result = np.exp(s)\n expected = pd.Series(to_decimal(np.exp(a._data)))\n tm.assert_series_equal(result, expected)\n\n\ndef test_array_ufunc_series_scalar_other():\n # check _HANDLED_TYPES\n a = to_decimal([1, 2, 3])\n s = pd.Series(a)\n result = np.add(s, decimal.Decimal(1))\n expected = pd.Series(np.add(a, decimal.Decimal(1)))\n tm.assert_series_equal(result, expected)\n\n\ndef test_array_ufunc_series_defer():\n a = to_decimal([1, 2, 3])\n s = pd.Series(a)\n\n expected = pd.Series(to_decimal([2, 4, 6]))\n r1 = np.add(s, a)\n r2 = np.add(a, s)\n\n tm.assert_series_equal(r1, expected)\n tm.assert_series_equal(r2, expected)\n\n\ndef test_groupby_agg():\n # Ensure that the result of agg is inferred to be decimal dtype\n # https://github.com/pandas-dev/pandas/issues/29141\n\n data = make_data()[:5]\n df = pd.DataFrame(\n {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}\n )\n\n # single key, selected column\n expected = pd.Series(to_decimal([data[0], data[3]]))\n result = df.groupby("id1")["decimals"].agg(lambda x: x.iloc[0])\n tm.assert_series_equal(result, expected, check_names=False)\n result = df["decimals"].groupby(df["id1"]).agg(lambda x: x.iloc[0])\n tm.assert_series_equal(result, expected, check_names=False)\n\n # multiple keys, selected column\n expected = pd.Series(\n to_decimal([data[0], data[1], data[3]]),\n index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 1)]),\n )\n result = df.groupby(["id1", "id2"])["decimals"].agg(lambda x: x.iloc[0])\n tm.assert_series_equal(result, expected, check_names=False)\n result = df["decimals"].groupby([df["id1"], df["id2"]]).agg(lambda x: x.iloc[0])\n tm.assert_series_equal(result, expected, check_names=False)\n\n # multiple columns\n expected = pd.DataFrame({"id2": [0, 1], "decimals": to_decimal([data[0], data[3]])})\n result = df.groupby("id1").agg(lambda x: x.iloc[0])\n tm.assert_frame_equal(result, expected, check_names=False)\n\n\ndef test_groupby_agg_ea_method(monkeypatch):\n # Ensure that the result of agg is inferred to be decimal dtype\n # https://github.com/pandas-dev/pandas/issues/29141\n\n def DecimalArray__my_sum(self):\n return np.sum(np.array(self))\n\n monkeypatch.setattr(DecimalArray, "my_sum", DecimalArray__my_sum, raising=False)\n\n data = make_data()[:5]\n df = pd.DataFrame({"id": [0, 0, 0, 1, 1], "decimals": DecimalArray(data)})\n expected = pd.Series(to_decimal([data[0] + data[1] + data[2], data[3] + data[4]]))\n\n result = df.groupby("id")["decimals"].agg(lambda x: x.values.my_sum())\n tm.assert_series_equal(result, expected, check_names=False)\n s = pd.Series(DecimalArray(data))\n grouper = np.array([0, 0, 0, 1, 1], dtype=np.int64)\n result = s.groupby(grouper).agg(lambda x: x.values.my_sum())\n tm.assert_series_equal(result, expected, check_names=False)\n\n\ndef test_indexing_no_materialize(monkeypatch):\n # See https://github.com/pandas-dev/pandas/issues/29708\n # Ensure that indexing operations do not materialize (convert to a numpy\n # array) the ExtensionArray unnecessary\n\n def DecimalArray__array__(self, dtype=None):\n raise Exception("tried to convert a DecimalArray to a numpy array")\n\n monkeypatch.setattr(DecimalArray, "__array__", DecimalArray__array__, raising=False)\n\n data = make_data()\n s = pd.Series(DecimalArray(data))\n df = pd.DataFrame({"a": s, "b": range(len(s))})\n\n # ensure the following operations do not raise an error\n s[s > 0.5]\n df[s > 0.5]\n s.at[0]\n df.at[0, "a"]\n\n\ndef test_to_numpy_keyword():\n # test the extra keyword\n values = [decimal.Decimal("1.1111"), decimal.Decimal("2.2222")]\n expected = np.array(\n [decimal.Decimal("1.11"), decimal.Decimal("2.22")], dtype="object"\n )\n a = pd.array(values, dtype="decimal")\n result = a.to_numpy(decimals=2)\n tm.assert_numpy_array_equal(result, expected)\n\n result = pd.Series(a).to_numpy(decimals=2)\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_array_copy_on_write(using_copy_on_write):\n df = pd.DataFrame({"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype="object")\n df2 = df.astype(DecimalDtype())\n df.iloc[0, 0] = 0\n if using_copy_on_write:\n expected = pd.DataFrame(\n {"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype=DecimalDtype()\n )\n tm.assert_equal(df2.values, expected.values)\n
.venv\Lib\site-packages\pandas\tests\extension\decimal\test_decimal.py
test_decimal.py
Python
20,248
0.95
0.129472
0.067538
vue-tools
616
2024-01-03T23:10:44.765333
Apache-2.0
true
769a7050104618ef8e2215b7c8b6f224
from pandas.tests.extension.decimal.array import (\n DecimalArray,\n DecimalDtype,\n make_data,\n to_decimal,\n)\n\n__all__ = ["DecimalArray", "DecimalDtype", "to_decimal", "make_data"]\n
.venv\Lib\site-packages\pandas\tests\extension\decimal\__init__.py
__init__.py
Python
191
0.85
0
0
python-kit
0
2024-10-31T23:45:04.344787
Apache-2.0
true
c1fd41d226f96c5bfdd9bc335e1f9de5
\n\n
.venv\Lib\site-packages\pandas\tests\extension\decimal\__pycache__\array.cpython-313.pyc
array.cpython-313.pyc
Other
15,497
0.8
0
0
node-utils
126
2023-10-26T05:17:41.373207
MIT
true
e3fb39dff7b437f5c47f2822fb48ed72
\n\n
.venv\Lib\site-packages\pandas\tests\extension\decimal\__pycache__\test_decimal.cpython-313.pyc
test_decimal.cpython-313.pyc
Other
33,796
0.95
0.004808
0.032178
awesome-app
593
2023-10-11T04:09:02.792135
MIT
true
37ff2e3042e6f05fae074dffca8447b0
\n\n
.venv\Lib\site-packages\pandas\tests\extension\decimal\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
399
0.7
0
0
python-kit
530
2024-08-24T19:42:44.613329
Apache-2.0
true
ab82b3bd5f453951447236a728c46865
"""\nTest extension array for storing nested data in a pandas container.\n\nThe JSONArray stores lists of dictionaries. The storage mechanism is a list,\nnot an ndarray.\n\nNote\n----\nWe currently store lists of UserDicts. Pandas has a few places\ninternally that specifically check for dicts, and does non-scalar things\nin that case. We *want* the dictionaries to be treated as scalars, so we\nhack around pandas by using UserDicts.\n"""\nfrom __future__ import annotations\n\nfrom collections import (\n UserDict,\n abc,\n)\nimport itertools\nimport numbers\nimport string\nimport sys\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.cast import construct_1d_object_array_from_listlike\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_list_like,\n pandas_dtype,\n)\n\nimport pandas as pd\nfrom pandas.api.extensions import (\n ExtensionArray,\n ExtensionDtype,\n)\nfrom pandas.core.indexers import unpack_tuple_and_ellipses\n\nif TYPE_CHECKING:\n from collections.abc import Mapping\n\n from pandas._typing import type_t\n\n\nclass JSONDtype(ExtensionDtype):\n type = abc.Mapping\n name = "json"\n na_value: Mapping[str, Any] = UserDict()\n\n @classmethod\n def construct_array_type(cls) -> type_t[JSONArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n return JSONArray\n\n\nclass JSONArray(ExtensionArray):\n dtype = JSONDtype()\n __array_priority__ = 1000\n\n def __init__(self, values, dtype=None, copy=False) -> None:\n for val in values:\n if not isinstance(val, self.dtype.type):\n raise TypeError("All values must be of type " + str(self.dtype.type))\n self.data = values\n\n # Some aliases for common attribute names to ensure pandas supports\n # these\n self._items = self._data = self.data\n # those aliases are currently not working due to assumptions\n # in internal code (GH-20735)\n # self._values = self.values = self.data\n\n @classmethod\n def _from_sequence(cls, scalars, *, dtype=None, copy=False):\n return cls(scalars)\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls([UserDict(x) for x in values if x != ()])\n\n def __getitem__(self, item):\n if isinstance(item, tuple):\n item = unpack_tuple_and_ellipses(item)\n\n if isinstance(item, numbers.Integral):\n return self.data[item]\n elif isinstance(item, slice) and item == slice(None):\n # Make sure we get a view\n return type(self)(self.data)\n elif isinstance(item, slice):\n # slice\n return type(self)(self.data[item])\n elif not is_list_like(item):\n # e.g. "foo" or 2.5\n # exception message copied from numpy\n raise IndexError(\n r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "\n r"(`None`) and integer or boolean arrays are valid indices"\n )\n else:\n item = pd.api.indexers.check_array_indexer(self, item)\n if is_bool_dtype(item.dtype):\n return type(self)._from_sequence(\n [x for x, m in zip(self, item) if m], dtype=self.dtype\n )\n # integer\n return type(self)([self.data[i] for i in item])\n\n def __setitem__(self, key, value) -> None:\n if isinstance(key, numbers.Integral):\n self.data[key] = value\n else:\n if not isinstance(value, (type(self), abc.Sequence)):\n # broadcast value\n value = itertools.cycle([value])\n\n if isinstance(key, np.ndarray) and key.dtype == "bool":\n # masking\n for i, (k, v) in enumerate(zip(key, value)):\n if k:\n assert isinstance(v, self.dtype.type)\n self.data[i] = v\n else:\n for k, v in zip(key, value):\n assert isinstance(v, self.dtype.type)\n self.data[k] = v\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __eq__(self, other):\n return NotImplemented\n\n def __ne__(self, other):\n return NotImplemented\n\n def __array__(self, dtype=None, copy=None):\n if copy is False:\n warnings.warn(\n "Starting with NumPy 2.0, the behavior of the 'copy' keyword has "\n "changed and passing 'copy=False' raises an error when returning "\n "a zero-copy NumPy array is not possible. pandas will follow "\n "this behavior starting with pandas 3.0.\nThis conversion to "\n "NumPy requires a copy, but 'copy=False' was passed. Consider "\n "using 'np.asarray(..)' instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if dtype is None:\n dtype = object\n if dtype == object:\n # on py38 builds it looks like numpy is inferring to a non-1D array\n return construct_1d_object_array_from_listlike(list(self))\n if copy is None:\n # Note: branch avoids `copy=None` for NumPy 1.x support\n return np.asarray(self.data, dtype=dtype)\n return np.asarray(self.data, dtype=dtype, copy=copy)\n\n @property\n def nbytes(self) -> int:\n return sys.getsizeof(self.data)\n\n def isna(self):\n return np.array([x == self.dtype.na_value for x in self.data], dtype=bool)\n\n def take(self, indexer, allow_fill=False, fill_value=None):\n # re-implement here, since NumPy has trouble setting\n # sized objects like UserDicts into scalar slots of\n # an ndarary.\n indexer = np.asarray(indexer)\n msg = (\n "Index is out of bounds or cannot do a "\n "non-empty take from an empty array."\n )\n\n if allow_fill:\n if fill_value is None:\n fill_value = self.dtype.na_value\n # bounds check\n if (indexer < -1).any():\n raise ValueError\n try:\n output = [\n self.data[loc] if loc != -1 else fill_value for loc in indexer\n ]\n except IndexError as err:\n raise IndexError(msg) from err\n else:\n try:\n output = [self.data[loc] for loc in indexer]\n except IndexError as err:\n raise IndexError(msg) from err\n\n return type(self)._from_sequence(output, dtype=self.dtype)\n\n def copy(self):\n return type(self)(self.data[:])\n\n def astype(self, dtype, copy=True):\n # NumPy has issues when all the dicts are the same length.\n # np.array([UserDict(...), UserDict(...)]) fails,\n # but np.array([{...}, {...}]) works, so cast.\n from pandas.core.arrays.string_ import StringDtype\n\n dtype = pandas_dtype(dtype)\n # needed to add this check for the Series constructor\n if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:\n if copy:\n return self.copy()\n return self\n elif isinstance(dtype, StringDtype):\n arr_cls = dtype.construct_array_type()\n return arr_cls._from_sequence(self, dtype=dtype, copy=False)\n elif not copy:\n return np.asarray([dict(x) for x in self], dtype=dtype)\n else:\n return np.array([dict(x) for x in self], dtype=dtype, copy=copy)\n\n def unique(self):\n # Parent method doesn't work since np.array will try to infer\n # a 2-dim object.\n return type(self)([dict(x) for x in {tuple(d.items()) for d in self.data}])\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n data = list(itertools.chain.from_iterable(x.data for x in to_concat))\n return cls(data)\n\n def _values_for_factorize(self):\n frozen = self._values_for_argsort()\n if len(frozen) == 0:\n # factorize_array expects 1-d array, this is a len-0 2-d array.\n frozen = frozen.ravel()\n return frozen, ()\n\n def _values_for_argsort(self):\n # Bypass NumPy's shape inference to get a (N,) array of tuples.\n frozen = [tuple(x.items()) for x in self]\n return construct_1d_object_array_from_listlike(frozen)\n\n def _pad_or_backfill(self, *, method, limit=None, copy=True):\n # GH#56616 - test EA method without limit_area argument\n return super()._pad_or_backfill(method=method, limit=limit, copy=copy)\n\n\ndef make_data():\n # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer\n rng = np.random.default_rng(2)\n return [\n UserDict(\n [\n (rng.choice(list(string.ascii_letters)), rng.integers(0, 100))\n for _ in range(rng.integers(0, 10))\n ]\n )\n for _ in range(100)\n ]\n
.venv\Lib\site-packages\pandas\tests\extension\json\array.py
array.py
Python
9,091
0.95
0.25641
0.121739
node-utils
887
2025-06-05T04:38:42.151378
BSD-3-Clause
true
b98849960e538398d0df56bab9e328aa
import collections\nimport operator\nimport sys\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.extension import base\nfrom pandas.tests.extension.json.array import (\n JSONArray,\n JSONDtype,\n make_data,\n)\n\n# We intentionally don't run base.BaseSetitemTests because pandas'\n# internals has trouble setting sequences of values into scalar positions.\nunhashable = pytest.mark.xfail(reason="Unhashable")\n\n\n@pytest.fixture\ndef dtype():\n return JSONDtype()\n\n\n@pytest.fixture\ndef data():\n """Length-100 PeriodArray for semantics test."""\n data = make_data()\n\n # Why the while loop? NumPy is unable to construct an ndarray from\n # equal-length ndarrays. Many of our operations involve coercing the\n # EA to an ndarray of objects. To avoid random test failures, we ensure\n # that our data is coercible to an ndarray. Several tests deal with only\n # the first two elements, so that's what we'll check.\n\n while len(data[0]) == len(data[1]):\n data = make_data()\n\n return JSONArray(data)\n\n\n@pytest.fixture\ndef data_missing():\n """Length 2 array with [NA, Valid]"""\n return JSONArray([{}, {"a": 10}])\n\n\n@pytest.fixture\ndef data_for_sorting():\n return JSONArray([{"b": 1}, {"c": 4}, {"a": 2, "c": 3}])\n\n\n@pytest.fixture\ndef data_missing_for_sorting():\n return JSONArray([{"b": 1}, {}, {"a": 4}])\n\n\n@pytest.fixture\ndef na_cmp():\n return operator.eq\n\n\n@pytest.fixture\ndef data_for_grouping():\n return JSONArray(\n [\n {"b": 1},\n {"b": 1},\n {},\n {},\n {"a": 0, "c": 2},\n {"a": 0, "c": 2},\n {"b": 1},\n {"c": 2},\n ]\n )\n\n\nclass TestJSONArray(base.ExtensionTests):\n @pytest.mark.xfail(\n reason="comparison method not implemented for JSONArray (GH-37867)"\n )\n def test_contains(self, data):\n # GH-37867\n super().test_contains(data)\n\n @pytest.mark.xfail(reason="not implemented constructor from dtype")\n def test_from_dtype(self, data):\n # construct from our dtype & string dtype\n super().test_from_dtype(data)\n\n @pytest.mark.xfail(reason="RecursionError, GH-33900")\n def test_series_constructor_no_data_with_index(self, dtype, na_value):\n # RecursionError: maximum recursion depth exceeded in comparison\n rec_limit = sys.getrecursionlimit()\n try:\n # Limit to avoid stack overflow on Windows CI\n sys.setrecursionlimit(100)\n super().test_series_constructor_no_data_with_index(dtype, na_value)\n finally:\n sys.setrecursionlimit(rec_limit)\n\n @pytest.mark.xfail(reason="RecursionError, GH-33900")\n def test_series_constructor_scalar_na_with_index(self, dtype, na_value):\n # RecursionError: maximum recursion depth exceeded in comparison\n rec_limit = sys.getrecursionlimit()\n try:\n # Limit to avoid stack overflow on Windows CI\n sys.setrecursionlimit(100)\n super().test_series_constructor_scalar_na_with_index(dtype, na_value)\n finally:\n sys.setrecursionlimit(rec_limit)\n\n @pytest.mark.xfail(reason="collection as scalar, GH-33901")\n def test_series_constructor_scalar_with_index(self, data, dtype):\n # TypeError: All values must be of type <class 'collections.abc.Mapping'>\n rec_limit = sys.getrecursionlimit()\n try:\n # Limit to avoid stack overflow on Windows CI\n sys.setrecursionlimit(100)\n super().test_series_constructor_scalar_with_index(data, dtype)\n finally:\n sys.setrecursionlimit(rec_limit)\n\n @pytest.mark.xfail(reason="Different definitions of NA")\n def test_stack(self):\n """\n The test does .astype(object).stack(future_stack=True). If we happen to have\n any missing values in `data`, then we'll end up with different\n rows since we consider `{}` NA, but `.astype(object)` doesn't.\n """\n super().test_stack()\n\n @pytest.mark.xfail(reason="dict for NA")\n def test_unstack(self, data, index):\n # The base test has NaN for the expected NA value.\n # this matches otherwise\n return super().test_unstack(data, index)\n\n @pytest.mark.xfail(reason="Setting a dict as a scalar")\n def test_fillna_series(self):\n """We treat dictionaries as a mapping in fillna, not a scalar."""\n super().test_fillna_series()\n\n @pytest.mark.xfail(reason="Setting a dict as a scalar")\n def test_fillna_frame(self):\n """We treat dictionaries as a mapping in fillna, not a scalar."""\n super().test_fillna_frame()\n\n @pytest.mark.parametrize(\n "limit_area, input_ilocs, expected_ilocs",\n [\n ("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]),\n ("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]),\n ("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]),\n ("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]),\n ("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]),\n ("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]),\n ("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]),\n ("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]),\n ],\n )\n def test_ffill_limit_area(\n self, data_missing, limit_area, input_ilocs, expected_ilocs\n ):\n # GH#56616\n msg = "JSONArray does not implement limit_area"\n with pytest.raises(NotImplementedError, match=msg):\n super().test_ffill_limit_area(\n data_missing, limit_area, input_ilocs, expected_ilocs\n )\n\n @unhashable\n def test_value_counts(self, all_data, dropna):\n super().test_value_counts(all_data, dropna)\n\n @unhashable\n def test_value_counts_with_normalize(self, data):\n super().test_value_counts_with_normalize(data)\n\n @unhashable\n def test_sort_values_frame(self):\n # TODO (EA.factorize): see if _values_for_factorize allows this.\n super().test_sort_values_frame()\n\n @pytest.mark.parametrize("ascending", [True, False])\n def test_sort_values(self, data_for_sorting, ascending, sort_by_key):\n super().test_sort_values(data_for_sorting, ascending, sort_by_key)\n\n @pytest.mark.parametrize("ascending", [True, False])\n def test_sort_values_missing(\n self, data_missing_for_sorting, ascending, sort_by_key\n ):\n super().test_sort_values_missing(\n data_missing_for_sorting, ascending, sort_by_key\n )\n\n @pytest.mark.xfail(reason="combine for JSONArray not supported")\n def test_combine_le(self, data_repeated):\n super().test_combine_le(data_repeated)\n\n @pytest.mark.xfail(\n reason="combine for JSONArray not supported - "\n "may pass depending on random data",\n strict=False,\n raises=AssertionError,\n )\n def test_combine_first(self, data):\n super().test_combine_first(data)\n\n @pytest.mark.xfail(reason="broadcasting error")\n def test_where_series(self, data, na_value):\n # Fails with\n # *** ValueError: operands could not be broadcast together\n # with shapes (4,) (4,) (0,)\n super().test_where_series(data, na_value)\n\n @pytest.mark.xfail(reason="Can't compare dicts.")\n def test_searchsorted(self, data_for_sorting):\n super().test_searchsorted(data_for_sorting)\n\n @pytest.mark.xfail(reason="Can't compare dicts.")\n def test_equals(self, data, na_value, as_series):\n super().test_equals(data, na_value, as_series)\n\n @pytest.mark.skip("fill-value is interpreted as a dict of values")\n def test_fillna_copy_frame(self, data_missing):\n super().test_fillna_copy_frame(data_missing)\n\n def test_equals_same_data_different_object(\n self, data, using_copy_on_write, request\n ):\n if using_copy_on_write:\n mark = pytest.mark.xfail(reason="Fails with CoW")\n request.applymarker(mark)\n super().test_equals_same_data_different_object(data)\n\n @pytest.mark.xfail(reason="failing on np.array(self, dtype=str)")\n def test_astype_str(self):\n """This currently fails in NumPy on np.array(self, dtype=str) with\n\n *** ValueError: setting an array element with a sequence\n """\n super().test_astype_str()\n\n @unhashable\n def test_groupby_extension_transform(self):\n """\n This currently fails in Series.name.setter, since the\n name must be hashable, but the value is a dictionary.\n I think this is what we want, i.e. `.name` should be the original\n values, and not the values for factorization.\n """\n super().test_groupby_extension_transform()\n\n @unhashable\n def test_groupby_extension_apply(self):\n """\n This fails in Index._do_unique_check with\n\n > hash(val)\n E TypeError: unhashable type: 'UserDict' with\n\n I suspect that once we support Index[ExtensionArray],\n we'll be able to dispatch unique.\n """\n super().test_groupby_extension_apply()\n\n @unhashable\n def test_groupby_extension_agg(self):\n """\n This fails when we get to tm.assert_series_equal when left.index\n contains dictionaries, which are not hashable.\n """\n super().test_groupby_extension_agg()\n\n @unhashable\n def test_groupby_extension_no_sort(self):\n """\n This fails when we get to tm.assert_series_equal when left.index\n contains dictionaries, which are not hashable.\n """\n super().test_groupby_extension_no_sort()\n\n def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):\n if len(data[0]) != 1:\n mark = pytest.mark.xfail(reason="raises in coercing to Series")\n request.applymarker(mark)\n super().test_arith_frame_with_scalar(data, all_arithmetic_operators)\n\n def test_compare_array(self, data, comparison_op, request):\n if comparison_op.__name__ in ["eq", "ne"]:\n mark = pytest.mark.xfail(reason="Comparison methods not implemented")\n request.applymarker(mark)\n super().test_compare_array(data, comparison_op)\n\n @pytest.mark.xfail(reason="ValueError: Must have equal len keys and value")\n def test_setitem_loc_scalar_mixed(self, data):\n super().test_setitem_loc_scalar_mixed(data)\n\n @pytest.mark.xfail(reason="ValueError: Must have equal len keys and value")\n def test_setitem_loc_scalar_multiple_homogoneous(self, data):\n super().test_setitem_loc_scalar_multiple_homogoneous(data)\n\n @pytest.mark.xfail(reason="ValueError: Must have equal len keys and value")\n def test_setitem_iloc_scalar_mixed(self, data):\n super().test_setitem_iloc_scalar_mixed(data)\n\n @pytest.mark.xfail(reason="ValueError: Must have equal len keys and value")\n def test_setitem_iloc_scalar_multiple_homogoneous(self, data):\n super().test_setitem_iloc_scalar_multiple_homogoneous(data)\n\n @pytest.mark.parametrize(\n "mask",\n [\n np.array([True, True, True, False, False]),\n pd.array([True, True, True, False, False], dtype="boolean"),\n pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),\n ],\n ids=["numpy-array", "boolean-array", "boolean-array-na"],\n )\n def test_setitem_mask(self, data, mask, box_in_series, request):\n if box_in_series:\n mark = pytest.mark.xfail(\n reason="cannot set using a list-like indexer with a different length"\n )\n request.applymarker(mark)\n elif not isinstance(mask, np.ndarray):\n mark = pytest.mark.xfail(reason="Issues unwanted DeprecationWarning")\n request.applymarker(mark)\n super().test_setitem_mask(data, mask, box_in_series)\n\n def test_setitem_mask_raises(self, data, box_in_series, request):\n if not box_in_series:\n mark = pytest.mark.xfail(reason="Fails to raise")\n request.applymarker(mark)\n\n super().test_setitem_mask_raises(data, box_in_series)\n\n @pytest.mark.xfail(\n reason="cannot set using a list-like indexer with a different length"\n )\n def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):\n super().test_setitem_mask_boolean_array_with_na(data, box_in_series)\n\n @pytest.mark.parametrize(\n "idx",\n [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],\n ids=["list", "integer-array", "numpy-array"],\n )\n def test_setitem_integer_array(self, data, idx, box_in_series, request):\n if box_in_series:\n mark = pytest.mark.xfail(\n reason="cannot set using a list-like indexer with a different length"\n )\n request.applymarker(mark)\n super().test_setitem_integer_array(data, idx, box_in_series)\n\n @pytest.mark.xfail(reason="list indices must be integers or slices, not NAType")\n @pytest.mark.parametrize(\n "idx, box_in_series",\n [\n ([0, 1, 2, pd.NA], False),\n pytest.param(\n [0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948")\n ),\n (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),\n (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),\n ],\n ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],\n )\n def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):\n super().test_setitem_integer_with_missing_raises(data, idx, box_in_series)\n\n @pytest.mark.xfail(reason="Fails to raise")\n def test_setitem_scalar_key_sequence_raise(self, data):\n super().test_setitem_scalar_key_sequence_raise(data)\n\n def test_setitem_with_expansion_dataframe_column(self, data, full_indexer, request):\n if "full_slice" in request.node.name:\n mark = pytest.mark.xfail(reason="slice is not iterable")\n request.applymarker(mark)\n super().test_setitem_with_expansion_dataframe_column(data, full_indexer)\n\n @pytest.mark.xfail(reason="slice is not iterable")\n def test_setitem_frame_2d_values(self, data):\n super().test_setitem_frame_2d_values(data)\n\n @pytest.mark.xfail(\n reason="cannot set using a list-like indexer with a different length"\n )\n @pytest.mark.parametrize("setter", ["loc", None])\n def test_setitem_mask_broadcast(self, data, setter):\n super().test_setitem_mask_broadcast(data, setter)\n\n @pytest.mark.xfail(\n reason="cannot set using a slice indexer with a different length"\n )\n def test_setitem_slice(self, data, box_in_series):\n super().test_setitem_slice(data, box_in_series)\n\n @pytest.mark.xfail(reason="slice object is not iterable")\n def test_setitem_loc_iloc_slice(self, data):\n super().test_setitem_loc_iloc_slice(data)\n\n @pytest.mark.xfail(reason="slice object is not iterable")\n def test_setitem_slice_mismatch_length_raises(self, data):\n super().test_setitem_slice_mismatch_length_raises(data)\n\n @pytest.mark.xfail(reason="slice object is not iterable")\n def test_setitem_slice_array(self, data):\n super().test_setitem_slice_array(data)\n\n @pytest.mark.xfail(reason="Fail to raise")\n def test_setitem_invalid(self, data, invalid_scalar):\n super().test_setitem_invalid(data, invalid_scalar)\n\n @pytest.mark.xfail(reason="only integer scalar arrays can be converted")\n def test_setitem_2d_values(self, data):\n super().test_setitem_2d_values(data)\n\n @pytest.mark.xfail(reason="data type 'json' not understood")\n @pytest.mark.parametrize("engine", ["c", "python"])\n def test_EA_types(self, engine, data, request):\n super().test_EA_types(engine, data, request)\n\n\ndef custom_assert_series_equal(left, right, *args, **kwargs):\n # NumPy doesn't handle an array of equal-length UserDicts.\n # The default assert_series_equal eventually does a\n # Series.values, which raises. We work around it by\n # converting the UserDicts to dicts.\n if left.dtype.name == "json":\n assert left.dtype == right.dtype\n left = pd.Series(\n JSONArray(left.values.astype(object)), index=left.index, name=left.name\n )\n right = pd.Series(\n JSONArray(right.values.astype(object)),\n index=right.index,\n name=right.name,\n )\n tm.assert_series_equal(left, right, *args, **kwargs)\n\n\ndef custom_assert_frame_equal(left, right, *args, **kwargs):\n obj_type = kwargs.get("obj", "DataFrame")\n tm.assert_index_equal(\n left.columns,\n right.columns,\n exact=kwargs.get("check_column_type", "equiv"),\n check_names=kwargs.get("check_names", True),\n check_exact=kwargs.get("check_exact", False),\n check_categorical=kwargs.get("check_categorical", True),\n obj=f"{obj_type}.columns",\n )\n\n jsons = (left.dtypes == "json").index\n\n for col in jsons:\n custom_assert_series_equal(left[col], right[col], *args, **kwargs)\n\n left = left.drop(columns=jsons)\n right = right.drop(columns=jsons)\n tm.assert_frame_equal(left, right, *args, **kwargs)\n\n\ndef test_custom_asserts():\n # This would always trigger the KeyError from trying to put\n # an array of equal-length UserDicts inside an ndarray.\n data = JSONArray(\n [\n collections.UserDict({"a": 1}),\n collections.UserDict({"b": 2}),\n collections.UserDict({"c": 3}),\n ]\n )\n a = pd.Series(data)\n custom_assert_series_equal(a, a)\n custom_assert_frame_equal(a.to_frame(), a.to_frame())\n\n b = pd.Series(data.take([0, 0, 1]))\n msg = r"Series are different"\n with pytest.raises(AssertionError, match=msg):\n custom_assert_series_equal(a, b)\n\n with pytest.raises(AssertionError, match=msg):\n custom_assert_frame_equal(a.to_frame(), b.to_frame())\n
.venv\Lib\site-packages\pandas\tests\extension\json\test_json.py
test_json.py
Python
17,951
0.95
0.169388
0.071605
awesome-app
459
2024-03-19T19:28:40.835965
MIT
true
8d2c16374c7b9b9500f89aaba2433a4b
from pandas.tests.extension.json.array import (\n JSONArray,\n JSONDtype,\n make_data,\n)\n\n__all__ = ["JSONArray", "JSONDtype", "make_data"]\n
.venv\Lib\site-packages\pandas\tests\extension\json\__init__.py
__init__.py
Python
146
0.85
0
0
react-lib
313
2025-06-08T03:19:22.130983
GPL-3.0
true
91d803a075fd3e9c0d24f178b00ad010
\n\n
.venv\Lib\site-packages\pandas\tests\extension\json\__pycache__\array.cpython-313.pyc
array.cpython-313.pyc
Other
13,429
0.95
0.017544
0
node-utils
737
2024-01-02T13:09:50.970828
Apache-2.0
true
8a3476d11425ea5977d1dba6d268a61e
\n\n
.venv\Lib\site-packages\pandas\tests\extension\json\__pycache__\test_json.cpython-313.pyc
test_json.cpython-313.pyc
Other
27,726
0.8
0.019481
0.003356
node-utils
728
2025-01-08T02:54:21.161525
BSD-3-Clause
true
01cc7c2fda6c35cd59a958cf9fe6a07f
\n\n
.venv\Lib\site-packages\pandas\tests\extension\json\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
343
0.7
0
0
awesome-app
999
2023-09-19T09:31:39.546254
BSD-3-Clause
true
05c8cb147cbab8116c1a482ba461137d
"""\nTest extension array for storing nested data in a pandas container.\n\nThe ListArray stores an ndarray of lists.\n"""\nfrom __future__ import annotations\n\nimport numbers\nimport string\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.core.dtypes.base import ExtensionDtype\n\nimport pandas as pd\nfrom pandas.api.types import (\n is_object_dtype,\n is_string_dtype,\n)\nfrom pandas.core.arrays import ExtensionArray\n\nif TYPE_CHECKING:\n from pandas._typing import type_t\n\n\nclass ListDtype(ExtensionDtype):\n type = list\n name = "list"\n na_value = np.nan\n\n @classmethod\n def construct_array_type(cls) -> type_t[ListArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n return ListArray\n\n\nclass ListArray(ExtensionArray):\n dtype = ListDtype()\n __array_priority__ = 1000\n\n def __init__(self, values, dtype=None, copy=False) -> None:\n if not isinstance(values, np.ndarray):\n raise TypeError("Need to pass a numpy array as values")\n for val in values:\n if not isinstance(val, self.dtype.type) and not pd.isna(val):\n raise TypeError("All values must be of type " + str(self.dtype.type))\n self.data = values\n\n @classmethod\n def _from_sequence(cls, scalars, *, dtype=None, copy=False):\n data = np.empty(len(scalars), dtype=object)\n data[:] = scalars\n return cls(data)\n\n def __getitem__(self, item):\n if isinstance(item, numbers.Integral):\n return self.data[item]\n else:\n # slice, list-like, mask\n return type(self)(self.data[item])\n\n def __len__(self) -> int:\n return len(self.data)\n\n def isna(self):\n return np.array(\n [not isinstance(x, list) and np.isnan(x) for x in self.data], dtype=bool\n )\n\n def take(self, indexer, allow_fill=False, fill_value=None):\n # re-implement here, since NumPy has trouble setting\n # sized objects like UserDicts into scalar slots of\n # an ndarary.\n indexer = np.asarray(indexer)\n msg = (\n "Index is out of bounds or cannot do a "\n "non-empty take from an empty array."\n )\n\n if allow_fill:\n if fill_value is None:\n fill_value = self.dtype.na_value\n # bounds check\n if (indexer < -1).any():\n raise ValueError\n try:\n output = [\n self.data[loc] if loc != -1 else fill_value for loc in indexer\n ]\n except IndexError as err:\n raise IndexError(msg) from err\n else:\n try:\n output = [self.data[loc] for loc in indexer]\n except IndexError as err:\n raise IndexError(msg) from err\n\n return self._from_sequence(output)\n\n def copy(self):\n return type(self)(self.data[:])\n\n def astype(self, dtype, copy=True):\n if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:\n if copy:\n return self.copy()\n return self\n elif is_string_dtype(dtype) and not is_object_dtype(dtype):\n # numpy has problems with astype(str) for nested elements\n return np.array([str(x) for x in self.data], dtype=dtype)\n elif not copy:\n return np.asarray(self.data, dtype=dtype)\n else:\n return np.array(self.data, dtype=dtype, copy=copy)\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n data = np.concatenate([x.data for x in to_concat])\n return cls(data)\n\n\ndef make_data():\n # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer\n rng = np.random.default_rng(2)\n data = np.empty(100, dtype=object)\n data[:] = [\n [rng.choice(list(string.ascii_letters)) for _ in range(rng.integers(0, 10))]\n for _ in range(100)\n ]\n return data\n
.venv\Lib\site-packages\pandas\tests\extension\list\array.py
array.py
Python
4,001
0.95
0.255474
0.0625
python-kit
164
2025-05-10T19:24:24.183486
BSD-3-Clause
true
bacb3b1e2c5b33385adb1a62e90db8fe
import pytest\n\nimport pandas as pd\nfrom pandas.tests.extension.list.array import (\n ListArray,\n ListDtype,\n make_data,\n)\n\n\n@pytest.fixture\ndef dtype():\n return ListDtype()\n\n\n@pytest.fixture\ndef data():\n """Length-100 ListArray for semantics test."""\n data = make_data()\n\n while len(data[0]) == len(data[1]):\n data = make_data()\n\n return ListArray(data)\n\n\ndef test_to_csv(data):\n # https://github.com/pandas-dev/pandas/issues/28840\n # array with list-likes fail when doing astype(str) on the numpy array\n # which was done in get_values_for_csv\n df = pd.DataFrame({"a": data})\n res = df.to_csv()\n assert str(data[0]) in res\n
.venv\Lib\site-packages\pandas\tests\extension\list\test_list.py
test_list.py
Python
671
0.95
0.151515
0.125
awesome-app
406
2023-11-16T12:22:48.544719
GPL-3.0
true
731af0cba85c5c61695faeaed4291d0e
from pandas.tests.extension.list.array import (\n ListArray,\n ListDtype,\n make_data,\n)\n\n__all__ = ["ListArray", "ListDtype", "make_data"]\n
.venv\Lib\site-packages\pandas\tests\extension\list\__init__.py
__init__.py
Python
146
0.85
0
0
node-utils
40
2024-02-01T08:47:54.868348
Apache-2.0
true
b6d0bf2688778d60f9b3fab93bb6c071
\n\n
.venv\Lib\site-packages\pandas\tests\extension\list\__pycache__\array.cpython-313.pyc
array.cpython-313.pyc
Other
7,080
0.8
0.013158
0.028571
node-utils
292
2024-02-14T01:57:52.998534
Apache-2.0
true
9d8f142d73622d77207844793ee6b48d
\n\n
.venv\Lib\site-packages\pandas\tests\extension\list\__pycache__\test_list.cpython-313.pyc
test_list.cpython-313.pyc
Other
1,342
0.8
0.076923
0
react-lib
802
2023-08-19T05:34:05.725309
BSD-3-Clause
true
05b87e29b25cc0c6e10e91c14b3a605e
\n\n
.venv\Lib\site-packages\pandas\tests\extension\list\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
343
0.7
0
0
awesome-app
106
2025-01-17T10:16:44.861445
MIT
true
39330961d0e24ffae6627181d5318217
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\conftest.cpython-313.pyc
conftest.cpython-313.pyc
Other
7,704
0.95
0.083916
0.015748
awesome-app
233
2025-06-28T14:23:08.156235
MIT
true
c8854d0e6a4182c624db0784207998d5
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_categorical.cpython-313.pyc
test_categorical.cpython-313.pyc
Other
10,699
0.95
0.074074
0.026667
vue-tools
108
2024-10-06T19:19:56.786552
Apache-2.0
true
15f2fb3217246abc1fec8d533e4834c1
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_common.cpython-313.pyc
test_common.cpython-313.pyc
Other
6,047
0.8
0
0
node-utils
733
2024-11-02T01:00:06.382802
MIT
true
8707e7b20916c636e841a29eabc4ccd2
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_datetime.cpython-313.pyc
test_datetime.cpython-313.pyc
Other
7,934
0.95
0.053763
0
node-utils
136
2025-01-14T18:30:49.464346
GPL-3.0
true
33d5e899113dcd0bab2691e1f1f7d284
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_extension.cpython-313.pyc
test_extension.cpython-313.pyc
Other
1,621
0.8
0.086957
0.136364
python-kit
655
2024-03-23T09:00:45.159564
MIT
true
62c0908b2ec1f9ff3d189a392f800cb9
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_interval.cpython-313.pyc
test_interval.cpython-313.pyc
Other
6,605
0.95
0.122449
0
vue-tools
412
2023-12-30T05:45:22.118479
MIT
true
f6acc29298449d7c947a4c465040c766
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_masked.cpython-313.pyc
test_masked.cpython-313.pyc
Other
18,990
0.95
0.052288
0
node-utils
308
2024-11-17T23:34:47.958654
Apache-2.0
true
da13c7eb04e5e023fecce4e65eb53900
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_numpy.cpython-313.pyc
test_numpy.cpython-313.pyc
Other
23,171
0.95
0.0625
0
node-utils
154
2025-07-03T09:57:42.655877
MIT
true
b1777f1685efe618a5df1ad05226f010
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_period.cpython-313.pyc
test_period.cpython-313.pyc
Other
6,226
0.95
0.113636
0
react-lib
489
2024-09-01T15:47:50.649261
GPL-3.0
true
2a777bb2aeaa2bb21ee97a4dd014ac66
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_sparse.cpython-313.pyc
test_sparse.cpython-313.pyc
Other
30,084
0.95
0.034314
0
python-kit
315
2024-04-29T17:48:43.299578
MIT
true
6bdaa15ef30084bbb06132ed87b506d2
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\test_string.cpython-313.pyc
test_string.cpython-313.pyc
Other
14,686
0.95
0.037037
0.013245
react-lib
568
2023-10-03T11:03:52.659722
GPL-3.0
true
a98b31bda3888dc2344ca54bbdce5d78
\n\n
.venv\Lib\site-packages\pandas\tests\extension\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
197
0.7
0
0
react-lib
372
2025-06-13T07:39:18.520218
GPL-3.0
true
8b13f9b918a1810c0d9aa74170036cef
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom pandas import (\n DataFrame,\n concat,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import AxisInt\n\n\ndef _check_mixed_float(df, dtype=None):\n # float16 are most likely to be upcasted to float32\n dtypes = {"A": "float32", "B": "float32", "C": "float16", "D": "float64"}\n if isinstance(dtype, str):\n dtypes = {k: dtype for k, v in dtypes.items()}\n elif isinstance(dtype, dict):\n dtypes.update(dtype)\n if dtypes.get("A"):\n assert df.dtypes["A"] == dtypes["A"]\n if dtypes.get("B"):\n assert df.dtypes["B"] == dtypes["B"]\n if dtypes.get("C"):\n assert df.dtypes["C"] == dtypes["C"]\n if dtypes.get("D"):\n assert df.dtypes["D"] == dtypes["D"]\n\n\ndef _check_mixed_int(df, dtype=None):\n dtypes = {"A": "int32", "B": "uint64", "C": "uint8", "D": "int64"}\n if isinstance(dtype, str):\n dtypes = {k: dtype for k, v in dtypes.items()}\n elif isinstance(dtype, dict):\n dtypes.update(dtype)\n if dtypes.get("A"):\n assert df.dtypes["A"] == dtypes["A"]\n if dtypes.get("B"):\n assert df.dtypes["B"] == dtypes["B"]\n if dtypes.get("C"):\n assert df.dtypes["C"] == dtypes["C"]\n if dtypes.get("D"):\n assert df.dtypes["D"] == dtypes["D"]\n\n\ndef zip_frames(frames: list[DataFrame], axis: AxisInt = 1) -> DataFrame:\n """\n take a list of frames, zip them together under the\n assumption that these all have the first frames' index/columns.\n\n Returns\n -------\n new_frame : DataFrame\n """\n if axis == 1:\n columns = frames[0].columns\n zipped = [f.loc[:, c] for c in columns for f in frames]\n return concat(zipped, axis=1)\n else:\n index = frames[0].index\n zipped = [f.loc[i, :] for i in index for f in frames]\n return DataFrame(zipped)\n
.venv\Lib\site-packages\pandas\tests\frame\common.py
common.py
Python
1,873
0.95
0.333333
0.018868
react-lib
131
2023-10-26T12:32:20.757180
MIT
true
0bc9ecf1a851f224bf4abb53fe0c20aa
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n NaT,\n date_range,\n)\n\n\n@pytest.fixture\ndef datetime_frame() -> DataFrame:\n """\n Fixture for DataFrame of floats with DatetimeIndex\n\n Columns are ['A', 'B', 'C', 'D']\n """\n return DataFrame(\n np.random.default_rng(2).standard_normal((100, 4)),\n columns=Index(list("ABCD")),\n index=date_range("2000-01-01", periods=100, freq="B"),\n )\n\n\n@pytest.fixture\ndef float_string_frame():\n """\n Fixture for DataFrame of floats and strings with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D', 'foo'].\n """\n df = DataFrame(\n np.random.default_rng(2).standard_normal((30, 4)),\n index=Index([f"foo_{i}" for i in range(30)], dtype=object),\n columns=Index(list("ABCD")),\n )\n df["foo"] = "bar"\n return df\n\n\n@pytest.fixture\ndef mixed_float_frame():\n """\n Fixture for DataFrame of different float types with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D'].\n """\n df = DataFrame(\n {\n col: np.random.default_rng(2).random(30, dtype=dtype)\n for col, dtype in zip(\n list("ABCD"), ["float32", "float32", "float32", "float64"]\n )\n },\n index=Index([f"foo_{i}" for i in range(30)], dtype=object),\n )\n # not supported by numpy random\n df["C"] = df["C"].astype("float16")\n return df\n\n\n@pytest.fixture\ndef mixed_int_frame():\n """\n Fixture for DataFrame of different int types with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D'].\n """\n return DataFrame(\n {\n col: np.ones(30, dtype=dtype)\n for col, dtype in zip(list("ABCD"), ["int32", "uint64", "uint8", "int64"])\n },\n index=Index([f"foo_{i}" for i in range(30)], dtype=object),\n )\n\n\n@pytest.fixture\ndef timezone_frame():\n """\n Fixture for DataFrame of date_range Series with different time zones\n\n Columns are ['A', 'B', 'C']; some entries are missing\n\n A B C\n 0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00\n 1 2013-01-02 NaT NaT\n 2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00\n """\n df = DataFrame(\n {\n "A": date_range("20130101", periods=3),\n "B": date_range("20130101", periods=3, tz="US/Eastern"),\n "C": date_range("20130101", periods=3, tz="CET"),\n }\n )\n df.iloc[1, 1] = NaT\n df.iloc[1, 2] = NaT\n return df\n
.venv\Lib\site-packages\pandas\tests\frame\conftest.py
conftest.py
Python
2,616
0.95
0.15
0.012048
vue-tools
219
2024-07-07T07:42:18.160439
Apache-2.0
true
25ae1ca52dfa6fbd4243fad712de9b15
from datetime import datetime\n\nimport pytz\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\n\nclass TestDataFrameAlterAxes:\n # Tests for setting index/columns attributes directly (i.e. __setattr__)\n\n def test_set_axis_setattr_index(self):\n # GH 6785\n # set the index manually\n\n df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])\n expected = df.set_index("ts")\n df.index = df["ts"]\n df.pop("ts")\n tm.assert_frame_equal(df, expected)\n\n # Renaming\n\n def test_assign_columns(self, float_frame):\n float_frame["hi"] = "there"\n\n df = float_frame.copy()\n df.columns = ["foo", "bar", "baz", "quux", "foo2"]\n tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False)\n tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False)\n
.venv\Lib\site-packages\pandas\tests\frame\test_alter_axes.py
test_alter_axes.py
Python
873
0.95
0.133333
0.190476
awesome-app
823
2025-01-25T19:45:14.492791
MIT
true
2e33c0b305783c383072bb38359600ae
from copy import deepcopy\nimport inspect\nimport pydoc\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\nfrom pandas._config.config import option_context\n\nfrom pandas.compat import HAS_PYARROW\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Series,\n date_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDataFrameMisc:\n def test_getitem_pop_assign_name(self, float_frame):\n s = float_frame["A"]\n assert s.name == "A"\n\n s = float_frame.pop("A")\n assert s.name == "A"\n\n s = float_frame.loc[:, "B"]\n assert s.name == "B"\n\n s2 = s.loc[:]\n assert s2.name == "B"\n\n def test_get_axis(self, float_frame):\n f = float_frame\n assert f._get_axis_number(0) == 0\n assert f._get_axis_number(1) == 1\n assert f._get_axis_number("index") == 0\n assert f._get_axis_number("rows") == 0\n assert f._get_axis_number("columns") == 1\n\n assert f._get_axis_name(0) == "index"\n assert f._get_axis_name(1) == "columns"\n assert f._get_axis_name("index") == "index"\n assert f._get_axis_name("rows") == "index"\n assert f._get_axis_name("columns") == "columns"\n\n assert f._get_axis(0) is f.index\n assert f._get_axis(1) is f.columns\n\n with pytest.raises(ValueError, match="No axis named"):\n f._get_axis_number(2)\n\n with pytest.raises(ValueError, match="No axis.*foo"):\n f._get_axis_name("foo")\n\n with pytest.raises(ValueError, match="No axis.*None"):\n f._get_axis_name(None)\n\n with pytest.raises(ValueError, match="No axis named"):\n f._get_axis_number(None)\n\n def test_column_contains_raises(self, float_frame):\n with pytest.raises(TypeError, match="unhashable type: 'Index'"):\n float_frame.columns in float_frame\n\n def test_tab_completion(self):\n # DataFrame whose columns are identifiers shall have them in __dir__.\n df = DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))\n for key in list("ABCD"):\n assert key in dir(df)\n assert isinstance(df.__getitem__("A"), Series)\n\n # DataFrame whose first-level columns are identifiers shall have\n # them in __dir__.\n df = DataFrame(\n [list("abcd"), list("efgh")],\n columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),\n )\n for key in list("ABCD"):\n assert key in dir(df)\n for key in list("EFGH"):\n assert key not in dir(df)\n assert isinstance(df.__getitem__("A"), DataFrame)\n\n def test_display_max_dir_items(self):\n # display.max_dir_items increaes the number of columns that are in __dir__.\n columns = ["a" + str(i) for i in range(420)]\n values = [range(420), range(420)]\n df = DataFrame(values, columns=columns)\n\n # The default value for display.max_dir_items is 100\n assert "a99" in dir(df)\n assert "a100" not in dir(df)\n\n with option_context("display.max_dir_items", 300):\n df = DataFrame(values, columns=columns)\n assert "a299" in dir(df)\n assert "a300" not in dir(df)\n\n with option_context("display.max_dir_items", None):\n df = DataFrame(values, columns=columns)\n assert "a419" in dir(df)\n\n def test_not_hashable(self):\n empty_frame = DataFrame()\n\n df = DataFrame([1])\n msg = "unhashable type: 'DataFrame'"\n with pytest.raises(TypeError, match=msg):\n hash(df)\n with pytest.raises(TypeError, match=msg):\n hash(empty_frame)\n\n @pytest.mark.xfail(\n using_string_dtype() and HAS_PYARROW, reason="surrogates not allowed"\n )\n def test_column_name_contains_unicode_surrogate(self):\n # GH 25509\n colname = "\ud83d"\n df = DataFrame({colname: []})\n # this should not crash\n assert colname not in dir(df)\n assert df.columns[0] == colname\n\n def test_new_empty_index(self):\n df1 = DataFrame(np.random.default_rng(2).standard_normal((0, 3)))\n df2 = DataFrame(np.random.default_rng(2).standard_normal((0, 3)))\n df1.index.name = "foo"\n assert df2.index.name is None\n\n def test_get_agg_axis(self, float_frame):\n cols = float_frame._get_agg_axis(0)\n assert cols is float_frame.columns\n\n idx = float_frame._get_agg_axis(1)\n assert idx is float_frame.index\n\n msg = r"Axis must be 0 or 1 \(got 2\)"\n with pytest.raises(ValueError, match=msg):\n float_frame._get_agg_axis(2)\n\n def test_empty(self, float_frame, float_string_frame):\n empty_frame = DataFrame()\n assert empty_frame.empty\n\n assert not float_frame.empty\n assert not float_string_frame.empty\n\n # corner case\n df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))\n del df["A"]\n assert not df.empty\n\n def test_len(self, float_frame):\n assert len(float_frame) == len(float_frame.index)\n\n # single block corner case\n arr = float_frame[["A", "B"]].values\n expected = float_frame.reindex(columns=["A", "B"]).values\n tm.assert_almost_equal(arr, expected)\n\n def test_axis_aliases(self, float_frame):\n f = float_frame\n\n # reg name\n expected = f.sum(axis=0)\n result = f.sum(axis="index")\n tm.assert_series_equal(result, expected)\n\n expected = f.sum(axis=1)\n result = f.sum(axis="columns")\n tm.assert_series_equal(result, expected)\n\n def test_class_axis(self):\n # GH 18147\n # no exception and no empty docstring\n assert pydoc.getdoc(DataFrame.index)\n assert pydoc.getdoc(DataFrame.columns)\n\n def test_series_put_names(self, float_string_frame):\n series = float_string_frame._series\n for k, v in series.items():\n assert v.name == k\n\n def test_empty_nonzero(self):\n df = DataFrame([1, 2, 3])\n assert not df.empty\n df = DataFrame(index=[1], columns=[1])\n assert not df.empty\n df = DataFrame(index=["a", "b"], columns=["c", "d"]).dropna()\n assert df.empty\n assert df.T.empty\n\n @pytest.mark.parametrize(\n "df",\n [\n DataFrame(),\n DataFrame(index=[1]),\n DataFrame(columns=[1]),\n DataFrame({1: []}),\n ],\n )\n def test_empty_like(self, df):\n assert df.empty\n assert df.T.empty\n\n def test_with_datetimelikes(self):\n df = DataFrame(\n {\n "A": date_range("20130101", periods=10),\n "B": timedelta_range("1 day", periods=10),\n }\n )\n t = df.T\n\n result = t.dtypes.value_counts()\n expected = Series({np.dtype("object"): 10}, name="count")\n tm.assert_series_equal(result, expected)\n\n def test_deepcopy(self, float_frame):\n cp = deepcopy(float_frame)\n cp.loc[0, "A"] = 10\n assert not float_frame.equals(cp)\n\n def test_inplace_return_self(self):\n # GH 1893\n\n data = DataFrame(\n {"a": ["foo", "bar", "baz", "qux"], "b": [0, 0, 1, 1], "c": [1, 2, 3, 4]}\n )\n\n def _check_f(base, f):\n result = f(base)\n assert result is None\n\n # -----DataFrame-----\n\n # set_index\n f = lambda x: x.set_index("a", inplace=True)\n _check_f(data.copy(), f)\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True)\n _check_f(data.set_index("a"), f)\n\n # drop_duplicates\n f = lambda x: x.drop_duplicates(inplace=True)\n _check_f(data.copy(), f)\n\n # sort\n f = lambda x: x.sort_values("b", inplace=True)\n _check_f(data.copy(), f)\n\n # sort_index\n f = lambda x: x.sort_index(inplace=True)\n _check_f(data.copy(), f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(data.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(data.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: "foo"}, inplace=True)\n _check_f(data.copy(), f)\n\n # -----Series-----\n d = data.copy()["c"]\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True, drop=True)\n _check_f(data.set_index("a")["c"], f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(d.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(d.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: "foo"}, inplace=True)\n _check_f(d.copy(), f)\n\n def test_tab_complete_warning(self, ip, frame_or_series):\n # GH 16409\n pytest.importorskip("IPython", minversion="6.0.0")\n from IPython.core.completer import provisionalcompleter\n\n if frame_or_series is DataFrame:\n code = "from pandas import DataFrame; obj = DataFrame()"\n else:\n code = "from pandas import Series; obj = Series(dtype=object)"\n\n ip.run_cell(code)\n # GH 31324 newer jedi version raises Deprecation warning;\n # appears resolved 2021-02-02\n with tm.assert_produces_warning(None, raise_on_extra_warnings=False):\n with provisionalcompleter("ignore"):\n list(ip.Completer.completions("obj.", 1))\n\n def test_attrs(self):\n df = DataFrame({"A": [2, 3]})\n assert df.attrs == {}\n df.attrs["version"] = 1\n\n result = df.rename(columns=str)\n assert result.attrs == {"version": 1}\n\n def test_attrs_deepcopy(self):\n df = DataFrame({"A": [2, 3]})\n assert df.attrs == {}\n df.attrs["tags"] = {"spam", "ham"}\n\n result = df.rename(columns=str)\n assert result.attrs == df.attrs\n assert result.attrs["tags"] is not df.attrs["tags"]\n\n @pytest.mark.parametrize("allows_duplicate_labels", [True, False, None])\n def test_set_flags(\n self,\n allows_duplicate_labels,\n frame_or_series,\n using_copy_on_write,\n warn_copy_on_write,\n ):\n obj = DataFrame({"A": [1, 2]})\n key = (0, 0)\n if frame_or_series is Series:\n obj = obj["A"]\n key = 0\n\n result = obj.set_flags(allows_duplicate_labels=allows_duplicate_labels)\n\n if allows_duplicate_labels is None:\n # We don't update when it's not provided\n assert result.flags.allows_duplicate_labels is True\n else:\n assert result.flags.allows_duplicate_labels is allows_duplicate_labels\n\n # We made a copy\n assert obj is not result\n\n # We didn't mutate obj\n assert obj.flags.allows_duplicate_labels is True\n\n # But we didn't copy data\n if frame_or_series is Series:\n assert np.may_share_memory(obj.values, result.values)\n else:\n assert np.may_share_memory(obj["A"].values, result["A"].values)\n\n with tm.assert_cow_warning(warn_copy_on_write):\n result.iloc[key] = 0\n if using_copy_on_write:\n assert obj.iloc[key] == 1\n else:\n assert obj.iloc[key] == 0\n # set back to 1 for test below\n with tm.assert_cow_warning(warn_copy_on_write):\n result.iloc[key] = 1\n\n # Now we do copy.\n result = obj.set_flags(\n copy=True, allows_duplicate_labels=allows_duplicate_labels\n )\n result.iloc[key] = 10\n assert obj.iloc[key] == 1\n\n def test_constructor_expanddim(self):\n # GH#33628 accessing _constructor_expanddim should not raise NotImplementedError\n # GH38782 pandas has no container higher than DataFrame (two-dim), so\n # DataFrame._constructor_expand_dim, doesn't make sense, so is removed.\n df = DataFrame()\n\n msg = "'DataFrame' object has no attribute '_constructor_expanddim'"\n with pytest.raises(AttributeError, match=msg):\n df._constructor_expanddim(np.arange(27).reshape(3, 3, 3))\n\n def test_inspect_getmembers(self):\n # GH38740\n df = DataFrame()\n msg = "DataFrame._data is deprecated"\n with tm.assert_produces_warning(\n DeprecationWarning, match=msg, check_stacklevel=False\n ):\n inspect.getmembers(df)\n
.venv\Lib\site-packages\pandas\tests\frame\test_api.py
test_api.py
Python
12,454
0.95
0.098734
0.126984
vue-tools
261
2023-12-07T19:50:40.384665
BSD-3-Clause
true
91fda6e8865123ad11fb28d530aef64b
from collections import deque\nfrom datetime import (\n datetime,\n timezone,\n)\nfrom enum import Enum\nimport functools\nimport operator\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import HAS_PYARROW\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.core.computation import expressions as expr\nfrom pandas.tests.frame.common import (\n _check_mixed_float,\n _check_mixed_int,\n)\n\n\n@pytest.fixture\ndef simple_frame():\n """\n Fixture for simple 3x3 DataFrame\n\n Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c'].\n\n one two three\n a 1.0 2.0 3.0\n b 4.0 5.0 6.0\n c 7.0 8.0 9.0\n """\n arr = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])\n\n return DataFrame(arr, columns=["one", "two", "three"], index=["a", "b", "c"])\n\n\n@pytest.fixture(autouse=True, params=[0, 100], ids=["numexpr", "python"])\ndef switch_numexpr_min_elements(request, monkeypatch):\n with monkeypatch.context() as m:\n m.setattr(expr, "_MIN_ELEMENTS", request.param)\n yield request.param\n\n\nclass DummyElement:\n def __init__(self, value, dtype) -> None:\n self.value = value\n self.dtype = np.dtype(dtype)\n\n def __array__(self, dtype=None, copy=None):\n return np.array(self.value, dtype=self.dtype)\n\n def __str__(self) -> str:\n return f"DummyElement({self.value}, {self.dtype})"\n\n def __repr__(self) -> str:\n return str(self)\n\n def astype(self, dtype, copy=False):\n self.dtype = dtype\n return self\n\n def view(self, dtype):\n return type(self)(self.value.view(dtype), dtype)\n\n def any(self, axis=None):\n return bool(self.value)\n\n\n# -------------------------------------------------------------------\n# Comparisons\n\n\nclass TestFrameComparisons:\n # Specifically _not_ flex-comparisons\n\n def test_comparison_with_categorical_dtype(self):\n # GH#12564\n\n df = DataFrame({"A": ["foo", "bar", "baz"]})\n exp = DataFrame({"A": [True, False, False]})\n\n res = df == "foo"\n tm.assert_frame_equal(res, exp)\n\n # casting to categorical shouldn't affect the result\n df["A"] = df["A"].astype("category")\n\n res = df == "foo"\n tm.assert_frame_equal(res, exp)\n\n def test_frame_in_list(self):\n # GH#12689 this should raise at the DataFrame level, not blocks\n df = DataFrame(\n np.random.default_rng(2).standard_normal((6, 4)), columns=list("ABCD")\n )\n msg = "The truth value of a DataFrame is ambiguous"\n with pytest.raises(ValueError, match=msg):\n df in [None]\n\n @pytest.mark.parametrize(\n "arg, arg2",\n [\n [\n {\n "a": np.random.default_rng(2).integers(10, size=10),\n "b": pd.date_range("20010101", periods=10),\n },\n {\n "a": np.random.default_rng(2).integers(10, size=10),\n "b": np.random.default_rng(2).integers(10, size=10),\n },\n ],\n [\n {\n "a": np.random.default_rng(2).integers(10, size=10),\n "b": np.random.default_rng(2).integers(10, size=10),\n },\n {\n "a": np.random.default_rng(2).integers(10, size=10),\n "b": pd.date_range("20010101", periods=10),\n },\n ],\n [\n {\n "a": pd.date_range("20010101", periods=10),\n "b": pd.date_range("20010101", periods=10),\n },\n {\n "a": np.random.default_rng(2).integers(10, size=10),\n "b": np.random.default_rng(2).integers(10, size=10),\n },\n ],\n [\n {\n "a": np.random.default_rng(2).integers(10, size=10),\n "b": pd.date_range("20010101", periods=10),\n },\n {\n "a": pd.date_range("20010101", periods=10),\n "b": pd.date_range("20010101", periods=10),\n },\n ],\n ],\n )\n def test_comparison_invalid(self, arg, arg2):\n # GH4968\n # invalid date/int comparisons\n x = DataFrame(arg)\n y = DataFrame(arg2)\n # we expect the result to match Series comparisons for\n # == and !=, inequalities should raise\n result = x == y\n expected = DataFrame(\n {col: x[col] == y[col] for col in x.columns},\n index=x.index,\n columns=x.columns,\n )\n tm.assert_frame_equal(result, expected)\n\n result = x != y\n expected = DataFrame(\n {col: x[col] != y[col] for col in x.columns},\n index=x.index,\n columns=x.columns,\n )\n tm.assert_frame_equal(result, expected)\n\n msgs = [\n r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",\n "invalid type promotion",\n (\n # npdev 1.20.0\n r"The DTypes <class 'numpy.dtype\[.*\]'> and "\n r"<class 'numpy.dtype\[.*\]'> do not have a common DType."\n ),\n ]\n msg = "|".join(msgs)\n with pytest.raises(TypeError, match=msg):\n x >= y\n with pytest.raises(TypeError, match=msg):\n x > y\n with pytest.raises(TypeError, match=msg):\n x < y\n with pytest.raises(TypeError, match=msg):\n x <= y\n\n @pytest.mark.parametrize(\n "left, right",\n [\n ("gt", "lt"),\n ("lt", "gt"),\n ("ge", "le"),\n ("le", "ge"),\n ("eq", "eq"),\n ("ne", "ne"),\n ],\n )\n def test_timestamp_compare(self, left, right):\n # make sure we can compare Timestamps on the right AND left hand side\n # GH#4982\n df = DataFrame(\n {\n "dates1": pd.date_range("20010101", periods=10),\n "dates2": pd.date_range("20010102", periods=10),\n "intcol": np.random.default_rng(2).integers(1000000000, size=10),\n "floatcol": np.random.default_rng(2).standard_normal(10),\n "stringcol": [chr(100 + i) for i in range(10)],\n }\n )\n df.loc[np.random.default_rng(2).random(len(df)) > 0.5, "dates2"] = pd.NaT\n left_f = getattr(operator, left)\n right_f = getattr(operator, right)\n\n # no nats\n if left in ["eq", "ne"]:\n expected = left_f(df, pd.Timestamp("20010109"))\n result = right_f(pd.Timestamp("20010109"), df)\n tm.assert_frame_equal(result, expected)\n else:\n msg = (\n "'(<|>)=?' not supported between "\n "instances of 'numpy.ndarray' and 'Timestamp'"\n )\n with pytest.raises(TypeError, match=msg):\n left_f(df, pd.Timestamp("20010109"))\n with pytest.raises(TypeError, match=msg):\n right_f(pd.Timestamp("20010109"), df)\n # nats\n if left in ["eq", "ne"]:\n expected = left_f(df, pd.Timestamp("nat"))\n result = right_f(pd.Timestamp("nat"), df)\n tm.assert_frame_equal(result, expected)\n else:\n msg = (\n "'(<|>)=?' not supported between "\n "instances of 'numpy.ndarray' and 'NaTType'"\n )\n with pytest.raises(TypeError, match=msg):\n left_f(df, pd.Timestamp("nat"))\n with pytest.raises(TypeError, match=msg):\n right_f(pd.Timestamp("nat"), df)\n\n def test_mixed_comparison(self):\n # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,\n # not raise TypeError\n # (this appears to be fixed before GH#22163, not sure when)\n df = DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])\n other = DataFrame([["a", "b"], ["c", "d"]])\n\n result = df == other\n assert not result.any().any()\n\n result = df != other\n assert result.all().all()\n\n def test_df_boolean_comparison_error(self):\n # GH#4576, GH#22880\n # comparing DataFrame against list/tuple with len(obj) matching\n # len(df.columns) is supported as of GH#22800\n df = DataFrame(np.arange(6).reshape((3, 2)))\n\n expected = DataFrame([[False, False], [True, False], [False, False]])\n\n result = df == (2, 2)\n tm.assert_frame_equal(result, expected)\n\n result = df == [2, 2]\n tm.assert_frame_equal(result, expected)\n\n def test_df_float_none_comparison(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((8, 3)),\n index=range(8),\n columns=["A", "B", "C"],\n )\n\n result = df.__eq__(None)\n assert not result.any().any()\n\n def test_df_string_comparison(self):\n df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])\n mask_a = df.a > 1\n tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])\n tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])\n\n mask_b = df.b == "foo"\n tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])\n tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])\n\n\nclass TestFrameFlexComparisons:\n # TODO: test_bool_flex_frame needs a better name\n @pytest.mark.parametrize("op", ["eq", "ne", "gt", "lt", "ge", "le"])\n def test_bool_flex_frame(self, op):\n data = np.random.default_rng(2).standard_normal((5, 3))\n other_data = np.random.default_rng(2).standard_normal((5, 3))\n df = DataFrame(data)\n other = DataFrame(other_data)\n ndim_5 = np.ones(df.shape + (1, 3))\n\n # DataFrame\n assert df.eq(df).values.all()\n assert not df.ne(df).values.any()\n f = getattr(df, op)\n o = getattr(operator, op)\n # No NAs\n tm.assert_frame_equal(f(other), o(df, other))\n # Unaligned\n part_o = other.loc[3:, 1:].copy()\n rs = f(part_o)\n xp = o(df, part_o.reindex(index=df.index, columns=df.columns))\n tm.assert_frame_equal(rs, xp)\n # ndarray\n tm.assert_frame_equal(f(other.values), o(df, other.values))\n # scalar\n tm.assert_frame_equal(f(0), o(df, 0))\n # NAs\n msg = "Unable to coerce to Series/DataFrame"\n tm.assert_frame_equal(f(np.nan), o(df, np.nan))\n with pytest.raises(ValueError, match=msg):\n f(ndim_5)\n\n @pytest.mark.parametrize("box", [np.array, Series])\n def test_bool_flex_series(self, box):\n # Series\n # list/tuple\n data = np.random.default_rng(2).standard_normal((5, 3))\n df = DataFrame(data)\n idx_ser = box(np.random.default_rng(2).standard_normal(5))\n col_ser = box(np.random.default_rng(2).standard_normal(3))\n\n idx_eq = df.eq(idx_ser, axis=0)\n col_eq = df.eq(col_ser)\n idx_ne = df.ne(idx_ser, axis=0)\n col_ne = df.ne(col_ser)\n tm.assert_frame_equal(col_eq, df == Series(col_ser))\n tm.assert_frame_equal(col_eq, -col_ne)\n tm.assert_frame_equal(idx_eq, -idx_ne)\n tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)\n tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))\n tm.assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))\n tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))\n\n idx_gt = df.gt(idx_ser, axis=0)\n col_gt = df.gt(col_ser)\n idx_le = df.le(idx_ser, axis=0)\n col_le = df.le(col_ser)\n\n tm.assert_frame_equal(col_gt, df > Series(col_ser))\n tm.assert_frame_equal(col_gt, -col_le)\n tm.assert_frame_equal(idx_gt, -idx_le)\n tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)\n\n idx_ge = df.ge(idx_ser, axis=0)\n col_ge = df.ge(col_ser)\n idx_lt = df.lt(idx_ser, axis=0)\n col_lt = df.lt(col_ser)\n tm.assert_frame_equal(col_ge, df >= Series(col_ser))\n tm.assert_frame_equal(col_ge, -col_lt)\n tm.assert_frame_equal(idx_ge, -idx_lt)\n tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)\n\n idx_ser = Series(np.random.default_rng(2).standard_normal(5))\n col_ser = Series(np.random.default_rng(2).standard_normal(3))\n\n def test_bool_flex_frame_na(self):\n df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))\n # NA\n df.loc[0, 0] = np.nan\n rs = df.eq(df)\n assert not rs.loc[0, 0]\n rs = df.ne(df)\n assert rs.loc[0, 0]\n rs = df.gt(df)\n assert not rs.loc[0, 0]\n rs = df.lt(df)\n assert not rs.loc[0, 0]\n rs = df.ge(df)\n assert not rs.loc[0, 0]\n rs = df.le(df)\n assert not rs.loc[0, 0]\n\n def test_bool_flex_frame_complex_dtype(self):\n # complex\n arr = np.array([np.nan, 1, 6, np.nan])\n arr2 = np.array([2j, np.nan, 7, None])\n df = DataFrame({"a": arr})\n df2 = DataFrame({"a": arr2})\n\n msg = "|".join(\n [\n "'>' not supported between instances of '.*' and 'complex'",\n r"unorderable types: .*complex\(\)", # PY35\n ]\n )\n with pytest.raises(TypeError, match=msg):\n # inequalities are not well-defined for complex numbers\n df.gt(df2)\n with pytest.raises(TypeError, match=msg):\n # regression test that we get the same behavior for Series\n df["a"].gt(df2["a"])\n with pytest.raises(TypeError, match=msg):\n # Check that we match numpy behavior here\n df.values > df2.values\n\n rs = df.ne(df2)\n assert rs.values.all()\n\n arr3 = np.array([2j, np.nan, None])\n df3 = DataFrame({"a": arr3})\n\n with pytest.raises(TypeError, match=msg):\n # inequalities are not well-defined for complex numbers\n df3.gt(2j)\n with pytest.raises(TypeError, match=msg):\n # regression test that we get the same behavior for Series\n df3["a"].gt(2j)\n with pytest.raises(TypeError, match=msg):\n # Check that we match numpy behavior here\n df3.values > 2j\n\n def test_bool_flex_frame_object_dtype(self):\n # corner, dtype=object\n df1 = DataFrame({"col": ["foo", np.nan, "bar"]}, dtype=object)\n df2 = DataFrame({"col": ["foo", datetime.now(), "bar"]}, dtype=object)\n result = df1.ne(df2)\n exp = DataFrame({"col": [False, True, False]})\n tm.assert_frame_equal(result, exp)\n\n def test_flex_comparison_nat(self):\n # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,\n # and _definitely_ not be NaN\n df = DataFrame([pd.NaT])\n\n result = df == pd.NaT\n # result.iloc[0, 0] is a np.bool_ object\n assert result.iloc[0, 0].item() is False\n\n result = df.eq(pd.NaT)\n assert result.iloc[0, 0].item() is False\n\n result = df != pd.NaT\n assert result.iloc[0, 0].item() is True\n\n result = df.ne(pd.NaT)\n assert result.iloc[0, 0].item() is True\n\n @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])\n def test_df_flex_cmp_constant_return_types(self, opname):\n # GH 15077, non-empty DataFrame\n df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})\n const = 2\n\n result = getattr(df, opname)(const).dtypes.value_counts()\n tm.assert_series_equal(\n result, Series([2], index=[np.dtype(bool)], name="count")\n )\n\n @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])\n def test_df_flex_cmp_constant_return_types_empty(self, opname):\n # GH 15077 empty DataFrame\n df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})\n const = 2\n\n empty = df.iloc[:0]\n result = getattr(empty, opname)(const).dtypes.value_counts()\n tm.assert_series_equal(\n result, Series([2], index=[np.dtype(bool)], name="count")\n )\n\n def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):\n ii = pd.IntervalIndex.from_breaks([1, 2, 3])\n df = DataFrame({"A": ii, "B": ii})\n\n ser = Series([0, 0])\n res = df.eq(ser, axis=0)\n\n expected = DataFrame({"A": [False, False], "B": [False, False]})\n tm.assert_frame_equal(res, expected)\n\n ser2 = Series([1, 2], index=["A", "B"])\n res2 = df.eq(ser2, axis=1)\n tm.assert_frame_equal(res2, expected)\n\n\n# -------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestFrameFlexArithmetic:\n def test_floordiv_axis0(self):\n # make sure we df.floordiv(ser, axis=0) matches column-wise result\n arr = np.arange(3)\n ser = Series(arr)\n df = DataFrame({"A": ser, "B": ser})\n\n result = df.floordiv(ser, axis=0)\n\n expected = DataFrame({col: df[col] // ser for col in df.columns})\n\n tm.assert_frame_equal(result, expected)\n\n result2 = df.floordiv(ser.values, axis=0)\n tm.assert_frame_equal(result2, expected)\n\n def test_df_add_td64_columnwise(self):\n # GH 22534 Check that column-wise addition broadcasts correctly\n dti = pd.date_range("2016-01-01", periods=10)\n tdi = pd.timedelta_range("1", periods=10)\n tser = Series(tdi)\n df = DataFrame({0: dti, 1: tdi})\n\n result = df.add(tser, axis=0)\n expected = DataFrame({0: dti + tdi, 1: tdi + tdi})\n tm.assert_frame_equal(result, expected)\n\n def test_df_add_flex_filled_mixed_dtypes(self):\n # GH 19611\n dti = pd.date_range("2016-01-01", periods=3)\n ser = Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")\n df = DataFrame({"A": dti, "B": ser})\n other = DataFrame({"A": ser, "B": ser})\n fill = pd.Timedelta(days=1).to_timedelta64()\n result = df.add(other, fill_value=fill)\n\n expected = DataFrame(\n {\n "A": Series(\n ["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"\n ),\n "B": ser * 2,\n }\n )\n tm.assert_frame_equal(result, expected)\n\n def test_arith_flex_frame(\n self, all_arithmetic_operators, float_frame, mixed_float_frame\n ):\n # one instance of parametrized fixture\n op = all_arithmetic_operators\n\n def f(x, y):\n # r-versions not in operator-stdlib; get op without "r" and invert\n if op.startswith("__r"):\n return getattr(operator, op.replace("__r", "__"))(y, x)\n return getattr(operator, op)(x, y)\n\n result = getattr(float_frame, op)(2 * float_frame)\n expected = f(float_frame, 2 * float_frame)\n tm.assert_frame_equal(result, expected)\n\n # vs mix float\n result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)\n expected = f(mixed_float_frame, 2 * mixed_float_frame)\n tm.assert_frame_equal(result, expected)\n _check_mixed_float(result, dtype={"C": None})\n\n @pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])\n def test_arith_flex_frame_mixed(\n self,\n op,\n int_frame,\n mixed_int_frame,\n mixed_float_frame,\n switch_numexpr_min_elements,\n ):\n f = getattr(operator, op)\n\n # vs mix int\n result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)\n expected = f(mixed_int_frame, 2 + mixed_int_frame)\n\n # no overflow in the uint\n dtype = None\n if op in ["__sub__"]:\n dtype = {"B": "uint64", "C": None}\n elif op in ["__add__", "__mul__"]:\n dtype = {"C": None}\n if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0:\n # when using numexpr, the casting rules are slightly different:\n # in the `2 + mixed_int_frame` operation, int32 column becomes\n # and int64 column (not preserving dtype in operation with Python\n # scalar), and then the int32/int64 combo results in int64 result\n dtype["A"] = (2 + mixed_int_frame)["A"].dtype\n tm.assert_frame_equal(result, expected)\n _check_mixed_int(result, dtype=dtype)\n\n # vs mix float\n result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)\n expected = f(mixed_float_frame, 2 * mixed_float_frame)\n tm.assert_frame_equal(result, expected)\n _check_mixed_float(result, dtype={"C": None})\n\n # vs plain int\n result = getattr(int_frame, op)(2 * int_frame)\n expected = f(int_frame, 2 * int_frame)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("dim", range(3, 6))\n def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame, dim):\n # one instance of parametrized fixture\n op = all_arithmetic_operators\n\n # Check that arrays with dim >= 3 raise\n arr = np.ones((1,) * dim)\n msg = "Unable to coerce to Series/DataFrame"\n with pytest.raises(ValueError, match=msg):\n getattr(float_frame, op)(arr)\n\n def test_arith_flex_frame_corner(self, float_frame):\n const_add = float_frame.add(1)\n tm.assert_frame_equal(const_add, float_frame + 1)\n\n # corner cases\n result = float_frame.add(float_frame[:0])\n expected = float_frame.sort_index() * np.nan\n tm.assert_frame_equal(result, expected)\n\n result = float_frame[:0].add(float_frame)\n expected = float_frame.sort_index() * np.nan\n tm.assert_frame_equal(result, expected)\n\n with pytest.raises(NotImplementedError, match="fill_value"):\n float_frame.add(float_frame.iloc[0], fill_value=3)\n\n with pytest.raises(NotImplementedError, match="fill_value"):\n float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)\n\n @pytest.mark.parametrize("op", ["add", "sub", "mul", "mod"])\n def test_arith_flex_series_ops(self, simple_frame, op):\n # after arithmetic refactor, add truediv here\n df = simple_frame\n\n row = df.xs("a")\n col = df["two"]\n f = getattr(df, op)\n op = getattr(operator, op)\n tm.assert_frame_equal(f(row), op(df, row))\n tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)\n\n def test_arith_flex_series(self, simple_frame):\n df = simple_frame\n\n row = df.xs("a")\n col = df["two"]\n # special case for some reason\n tm.assert_frame_equal(df.add(row, axis=None), df + row)\n\n # cases which will be refactored after big arithmetic refactor\n tm.assert_frame_equal(df.div(row), df / row)\n tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)\n\n @pytest.mark.parametrize("dtype", ["int64", "float64"])\n def test_arith_flex_series_broadcasting(self, dtype):\n # broadcasting issue in GH 7325\n df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype=dtype)\n expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])\n result = df.div(df[0], axis="index")\n tm.assert_frame_equal(result, expected)\n\n def test_arith_flex_zero_len_raises(self):\n # GH 19522 passing fill_value to frame flex arith methods should\n # raise even in the zero-length special cases\n ser_len0 = Series([], dtype=object)\n df_len0 = DataFrame(columns=["A", "B"])\n df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])\n\n with pytest.raises(NotImplementedError, match="fill_value"):\n df.add(ser_len0, fill_value="E")\n\n with pytest.raises(NotImplementedError, match="fill_value"):\n df_len0.sub(df["A"], axis=None, fill_value=3)\n\n def test_flex_add_scalar_fill_value(self):\n # GH#12723\n dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")\n df = DataFrame({"foo": dat}, index=range(6))\n\n exp = df.fillna(0).add(2)\n res = df.add(2, fill_value=0)\n tm.assert_frame_equal(res, exp)\n\n def test_sub_alignment_with_duplicate_index(self):\n # GH#5185 dup aligning operations should work\n df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])\n df2 = DataFrame([1, 2, 3], index=[1, 2, 3])\n expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])\n result = df1.sub(df2)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("op", ["__add__", "__mul__", "__sub__", "__truediv__"])\n def test_arithmetic_with_duplicate_columns(self, op):\n # operations\n df = DataFrame({"A": np.arange(10), "B": np.random.default_rng(2).random(10)})\n expected = getattr(df, op)(df)\n expected.columns = ["A", "A"]\n df.columns = ["A", "A"]\n result = getattr(df, op)(df)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("level", [0, None])\n def test_broadcast_multiindex(self, level):\n # GH34388\n df1 = DataFrame({"A": [0, 1, 2], "B": [1, 2, 3]})\n df1.columns = df1.columns.set_names("L1")\n\n df2 = DataFrame({("A", "C"): [0, 0, 0], ("A", "D"): [0, 0, 0]})\n df2.columns = df2.columns.set_names(["L1", "L2"])\n\n result = df1.add(df2, level=level)\n expected = DataFrame({("A", "C"): [0, 1, 2], ("A", "D"): [0, 1, 2]})\n expected.columns = expected.columns.set_names(["L1", "L2"])\n\n tm.assert_frame_equal(result, expected)\n\n def test_frame_multiindex_operations(self):\n # GH 43321\n df = DataFrame(\n {2010: [1, 2, 3], 2020: [3, 4, 5]},\n index=MultiIndex.from_product(\n [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]\n ),\n )\n\n series = Series(\n [0.4],\n index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]),\n )\n\n expected = DataFrame(\n {2010: [1.4, 2.4, 3.4], 2020: [3.4, 4.4, 5.4]},\n index=MultiIndex.from_product(\n [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]\n ),\n )\n result = df.add(series, axis=0)\n\n tm.assert_frame_equal(result, expected)\n\n def test_frame_multiindex_operations_series_index_to_frame_index(self):\n # GH 43321\n df = DataFrame(\n {2010: [1], 2020: [3]},\n index=MultiIndex.from_product([["a"], ["b"]], names=["scen", "mod"]),\n )\n\n series = Series(\n [10.0, 20.0, 30.0],\n index=MultiIndex.from_product(\n [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]\n ),\n )\n\n expected = DataFrame(\n {2010: [11.0, 21, 31.0], 2020: [13.0, 23.0, 33.0]},\n index=MultiIndex.from_product(\n [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]\n ),\n )\n result = df.add(series, axis=0)\n\n tm.assert_frame_equal(result, expected)\n\n def test_frame_multiindex_operations_no_align(self):\n df = DataFrame(\n {2010: [1, 2, 3], 2020: [3, 4, 5]},\n index=MultiIndex.from_product(\n [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]\n ),\n )\n\n series = Series(\n [0.4],\n index=MultiIndex.from_product([["c"], ["a"]], names=["mod", "scen"]),\n )\n\n expected = DataFrame(\n {2010: np.nan, 2020: np.nan},\n index=MultiIndex.from_tuples(\n [\n ("a", "b", 0),\n ("a", "b", 1),\n ("a", "b", 2),\n ("a", "c", np.nan),\n ],\n names=["scen", "mod", "id"],\n ),\n )\n result = df.add(series, axis=0)\n\n tm.assert_frame_equal(result, expected)\n\n def test_frame_multiindex_operations_part_align(self):\n df = DataFrame(\n {2010: [1, 2, 3], 2020: [3, 4, 5]},\n index=MultiIndex.from_tuples(\n [\n ("a", "b", 0),\n ("a", "b", 1),\n ("a", "c", 2),\n ],\n names=["scen", "mod", "id"],\n ),\n )\n\n series = Series(\n [0.4],\n index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]),\n )\n\n expected = DataFrame(\n {2010: [1.4, 2.4, np.nan], 2020: [3.4, 4.4, np.nan]},\n index=MultiIndex.from_tuples(\n [\n ("a", "b", 0),\n ("a", "b", 1),\n ("a", "c", 2),\n ],\n names=["scen", "mod", "id"],\n ),\n )\n result = df.add(series, axis=0)\n\n tm.assert_frame_equal(result, expected)\n\n\nclass TestFrameArithmetic:\n def test_td64_op_nat_casting(self):\n # Make sure we don't accidentally treat timedelta64(NaT) as datetime64\n # when calling dispatch_to_series in DataFrame arithmetic\n ser = Series(["NaT", "NaT"], dtype="timedelta64[ns]")\n df = DataFrame([[1, 2], [3, 4]])\n\n result = df * ser\n expected = DataFrame({0: ser, 1: ser})\n tm.assert_frame_equal(result, expected)\n\n def test_df_add_2d_array_rowlike_broadcasts(self):\n # GH#23000\n arr = np.arange(6).reshape(3, 2)\n df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])\n\n rowlike = arr[[1], :] # shape --> (1, ncols)\n assert rowlike.shape == (1, df.shape[1])\n\n expected = DataFrame(\n [[2, 4], [4, 6], [6, 8]],\n columns=df.columns,\n index=df.index,\n # specify dtype explicitly to avoid failing\n # on 32bit builds\n dtype=arr.dtype,\n )\n result = df + rowlike\n tm.assert_frame_equal(result, expected)\n result = rowlike + df\n tm.assert_frame_equal(result, expected)\n\n def test_df_add_2d_array_collike_broadcasts(self):\n # GH#23000\n arr = np.arange(6).reshape(3, 2)\n df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])\n\n collike = arr[:, [1]] # shape --> (nrows, 1)\n assert collike.shape == (df.shape[0], 1)\n\n expected = DataFrame(\n [[1, 2], [5, 6], [9, 10]],\n columns=df.columns,\n index=df.index,\n # specify dtype explicitly to avoid failing\n # on 32bit builds\n dtype=arr.dtype,\n )\n result = df + collike\n tm.assert_frame_equal(result, expected)\n result = collike + df\n tm.assert_frame_equal(result, expected)\n\n def test_df_arith_2d_array_rowlike_broadcasts(\n self, request, all_arithmetic_operators, using_array_manager\n ):\n # GH#23000\n opname = all_arithmetic_operators\n\n if using_array_manager and opname in ("__rmod__", "__rfloordiv__"):\n # TODO(ArrayManager) decide on dtypes\n td.mark_array_manager_not_yet_implemented(request)\n\n arr = np.arange(6).reshape(3, 2)\n df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])\n\n rowlike = arr[[1], :] # shape --> (1, ncols)\n assert rowlike.shape == (1, df.shape[1])\n\n exvals = [\n getattr(df.loc["A"], opname)(rowlike.squeeze()),\n getattr(df.loc["B"], opname)(rowlike.squeeze()),\n getattr(df.loc["C"], opname)(rowlike.squeeze()),\n ]\n\n expected = DataFrame(exvals, columns=df.columns, index=df.index)\n\n result = getattr(df, opname)(rowlike)\n tm.assert_frame_equal(result, expected)\n\n def test_df_arith_2d_array_collike_broadcasts(\n self, request, all_arithmetic_operators, using_array_manager\n ):\n # GH#23000\n opname = all_arithmetic_operators\n\n if using_array_manager and opname in ("__rmod__", "__rfloordiv__"):\n # TODO(ArrayManager) decide on dtypes\n td.mark_array_manager_not_yet_implemented(request)\n\n arr = np.arange(6).reshape(3, 2)\n df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])\n\n collike = arr[:, [1]] # shape --> (nrows, 1)\n assert collike.shape == (df.shape[0], 1)\n\n exvals = {\n True: getattr(df[True], opname)(collike.squeeze()),\n False: getattr(df[False], opname)(collike.squeeze()),\n }\n\n dtype = None\n if opname in ["__rmod__", "__rfloordiv__"]:\n # Series ops may return mixed int/float dtypes in cases where\n # DataFrame op will return all-float. So we upcast `expected`\n dtype = np.common_type(*(x.values for x in exvals.values()))\n\n expected = DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)\n\n result = getattr(df, opname)(collike)\n tm.assert_frame_equal(result, expected)\n\n def test_df_bool_mul_int(self):\n # GH 22047, GH 22163 multiplication by 1 should result in int dtype,\n # not object dtype\n df = DataFrame([[False, True], [False, False]])\n result = df * 1\n\n # On appveyor this comes back as np.int32 instead of np.int64,\n # so we check dtype.kind instead of just dtype\n kinds = result.dtypes.apply(lambda x: x.kind)\n assert (kinds == "i").all()\n\n result = 1 * df\n kinds = result.dtypes.apply(lambda x: x.kind)\n assert (kinds == "i").all()\n\n def test_arith_mixed(self):\n left = DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})\n\n result = left + left\n expected = DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("col", ["A", "B"])\n def test_arith_getitem_commute(self, all_arithmetic_functions, col):\n df = DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})\n result = all_arithmetic_functions(df, 1)[col]\n expected = all_arithmetic_functions(df[col], 1)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]\n )\n def test_arith_alignment_non_pandas_object(self, values):\n # GH#17901\n df = DataFrame({"A": [1, 1], "B": [1, 1]})\n expected = DataFrame({"A": [2, 2], "B": [3, 3]})\n result = df + values\n tm.assert_frame_equal(result, expected)\n\n def test_arith_non_pandas_object(self):\n df = DataFrame(\n np.arange(1, 10, dtype="f8").reshape(3, 3),\n columns=["one", "two", "three"],\n index=["a", "b", "c"],\n )\n\n val1 = df.xs("a").values\n added = DataFrame(df.values + val1, index=df.index, columns=df.columns)\n tm.assert_frame_equal(df + val1, added)\n\n added = DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)\n tm.assert_frame_equal(df.add(val1, axis=0), added)\n\n val2 = list(df["two"])\n\n added = DataFrame(df.values + val2, index=df.index, columns=df.columns)\n tm.assert_frame_equal(df + val2, added)\n\n added = DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)\n tm.assert_frame_equal(df.add(val2, axis="index"), added)\n\n val3 = np.random.default_rng(2).random(df.shape)\n added = DataFrame(df.values + val3, index=df.index, columns=df.columns)\n tm.assert_frame_equal(df.add(val3), added)\n\n def test_operations_with_interval_categories_index(self, all_arithmetic_operators):\n # GH#27415\n op = all_arithmetic_operators\n ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))\n data = [1, 2]\n df = DataFrame([data], columns=ind)\n num = 10\n result = getattr(df, op)(num)\n expected = DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)\n tm.assert_frame_equal(result, expected)\n\n def test_frame_with_frame_reindex(self):\n # GH#31623\n df = DataFrame(\n {\n "foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],\n "bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],\n },\n columns=["foo", "bar"],\n dtype="M8[ns]",\n )\n df2 = df[["foo"]]\n\n result = df - df2\n\n expected = DataFrame(\n {"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},\n columns=["bar", "foo"],\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "value, dtype",\n [\n (1, "i8"),\n (1.0, "f8"),\n (2**63, "f8"),\n (1j, "complex128"),\n (2**63, "complex128"),\n (True, "bool"),\n (np.timedelta64(20, "ns"), "<m8[ns]"),\n (np.datetime64(20, "ns"), "<M8[ns]"),\n ],\n )\n @pytest.mark.parametrize(\n "op",\n [\n operator.add,\n operator.sub,\n operator.mul,\n operator.truediv,\n operator.mod,\n operator.pow,\n ],\n ids=lambda x: x.__name__,\n )\n def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements):\n skip = {\n (operator.truediv, "bool"),\n (operator.pow, "bool"),\n (operator.add, "bool"),\n (operator.mul, "bool"),\n }\n\n elem = DummyElement(value, dtype)\n df = DataFrame({"A": [elem.value, elem.value]}, dtype=elem.dtype)\n\n invalid = {\n (operator.pow, "<M8[ns]"),\n (operator.mod, "<M8[ns]"),\n (operator.truediv, "<M8[ns]"),\n (operator.mul, "<M8[ns]"),\n (operator.add, "<M8[ns]"),\n (operator.pow, "<m8[ns]"),\n (operator.mul, "<m8[ns]"),\n (operator.sub, "bool"),\n (operator.mod, "complex128"),\n }\n\n if (op, dtype) in invalid:\n warn = None\n if (dtype == "<M8[ns]" and op == operator.add) or (\n dtype == "<m8[ns]" and op == operator.mul\n ):\n msg = None\n elif dtype == "complex128":\n msg = "ufunc 'remainder' not supported for the input types"\n elif op is operator.sub:\n msg = "numpy boolean subtract, the `-` operator, is "\n if (\n dtype == "bool"\n and expr.USE_NUMEXPR\n and switch_numexpr_min_elements == 0\n ):\n warn = UserWarning # "evaluating in Python space because ..."\n else:\n msg = (\n f"cannot perform __{op.__name__}__ with this "\n "index type: (DatetimeArray|TimedeltaArray)"\n )\n\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(warn):\n op(df, elem.value)\n\n elif (op, dtype) in skip:\n if op in [operator.add, operator.mul]:\n if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0:\n # "evaluating in Python space because ..."\n warn = UserWarning\n else:\n warn = None\n with tm.assert_produces_warning(warn):\n op(df, elem.value)\n\n else:\n msg = "operator '.*' not implemented for .* dtypes"\n with pytest.raises(NotImplementedError, match=msg):\n op(df, elem.value)\n\n else:\n with tm.assert_produces_warning(None):\n result = op(df, elem.value).dtypes\n expected = op(df, value).dtypes\n tm.assert_series_equal(result, expected)\n\n def test_arithmetic_midx_cols_different_dtypes(self):\n # GH#49769\n midx = MultiIndex.from_arrays([Series([1, 2]), Series([3, 4])])\n midx2 = MultiIndex.from_arrays([Series([1, 2], dtype="Int8"), Series([3, 4])])\n left = DataFrame([[1, 2], [3, 4]], columns=midx)\n right = DataFrame([[1, 2], [3, 4]], columns=midx2)\n result = left - right\n expected = DataFrame([[0, 0], [0, 0]], columns=midx)\n tm.assert_frame_equal(result, expected)\n\n def test_arithmetic_midx_cols_different_dtypes_different_order(self):\n # GH#49769\n midx = MultiIndex.from_arrays([Series([1, 2]), Series([3, 4])])\n midx2 = MultiIndex.from_arrays([Series([2, 1], dtype="Int8"), Series([4, 3])])\n left = DataFrame([[1, 2], [3, 4]], columns=midx)\n right = DataFrame([[1, 2], [3, 4]], columns=midx2)\n result = left - right\n expected = DataFrame([[-1, 1], [-1, 1]], columns=midx)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_frame_with_zero_len_series_corner_cases():\n # GH#28600\n # easy all-float case\n df = DataFrame(\n np.random.default_rng(2).standard_normal(6).reshape(3, 2), columns=["A", "B"]\n )\n ser = Series(dtype=np.float64)\n\n result = df + ser\n expected = DataFrame(df.values * np.nan, columns=df.columns)\n tm.assert_frame_equal(result, expected)\n\n with pytest.raises(ValueError, match="not aligned"):\n # Automatic alignment for comparisons deprecated GH#36795, enforced 2.0\n df == ser\n\n # non-float case should not raise TypeError on comparison\n df2 = DataFrame(df.values.view("M8[ns]"), columns=df.columns)\n with pytest.raises(ValueError, match="not aligned"):\n # Automatic alignment for comparisons deprecated\n df2 == ser\n\n\ndef test_zero_len_frame_with_series_corner_cases():\n # GH#28600\n df = DataFrame(columns=["A", "B"], dtype=np.float64)\n ser = Series([1, 2], index=["A", "B"])\n\n result = df + ser\n expected = df\n tm.assert_frame_equal(result, expected)\n\n\ndef test_frame_single_columns_object_sum_axis_1():\n # GH 13758\n data = {\n "One": Series(["A", 1.2, np.nan]),\n }\n df = DataFrame(data)\n result = df.sum(axis=1)\n expected = Series(["A", 1.2, 0])\n tm.assert_series_equal(result, expected)\n\n\n# -------------------------------------------------------------------\n# Unsorted\n# These arithmetic tests were previously in other files, eventually\n# should be parametrized and put into tests.arithmetic\n\n\nclass TestFrameArithmeticUnsorted:\n def test_frame_add_tz_mismatch_converts_to_utc(self):\n rng = pd.date_range("1/1/2011", periods=10, freq="h", tz="US/Eastern")\n df = DataFrame(\n np.random.default_rng(2).standard_normal(len(rng)), index=rng, columns=["a"]\n )\n\n df_moscow = df.tz_convert("Europe/Moscow")\n result = df + df_moscow\n assert result.index.tz is timezone.utc\n\n result = df_moscow + df\n assert result.index.tz is timezone.utc\n\n def test_align_frame(self):\n rng = pd.period_range("1/1/2000", "1/1/2010", freq="Y")\n ts = DataFrame(\n np.random.default_rng(2).standard_normal((len(rng), 3)), index=rng\n )\n\n result = ts + ts[::2]\n expected = ts + ts\n expected.iloc[1::2] = np.nan\n tm.assert_frame_equal(result, expected)\n\n half = ts[::2]\n result = ts + half.take(np.random.default_rng(2).permutation(len(half)))\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "op", [operator.add, operator.sub, operator.mul, operator.truediv]\n )\n def test_operators_none_as_na(self, op):\n df = DataFrame(\n {"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object\n )\n\n # since filling converts dtypes from object, changed expected to be\n # object\n msg = "Downcasting object dtype arrays"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n filled = df.fillna(np.nan)\n result = op(df, 3)\n expected = op(filled, 3).astype(object)\n expected[pd.isna(expected)] = np.nan\n tm.assert_frame_equal(result, expected)\n\n result = op(df, df)\n expected = op(filled, filled).astype(object)\n expected[pd.isna(expected)] = np.nan\n tm.assert_frame_equal(result, expected)\n\n msg = "Downcasting object dtype arrays"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = op(df, df.fillna(7))\n tm.assert_frame_equal(result, expected)\n\n msg = "Downcasting object dtype arrays"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = op(df.fillna(7), df)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])\n # TODO: not sure what's correct here.\n @pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")\n def test_logical_typeerror_with_non_valid(self, op, res, float_frame):\n # we are comparing floats vs a string\n result = getattr(float_frame, op)("foo")\n assert bool(result.all().all()) is res\n\n @pytest.mark.parametrize("op", ["add", "sub", "mul", "div", "truediv"])\n def test_binary_ops_align(self, op):\n # test aligning binary ops\n\n # GH 6681\n index = MultiIndex.from_product(\n [list("abc"), ["one", "two", "three"], [1, 2, 3]],\n names=["first", "second", "third"],\n )\n\n df = DataFrame(\n np.arange(27 * 3).reshape(27, 3),\n index=index,\n columns=["value1", "value2", "value3"],\n ).sort_index()\n\n idx = pd.IndexSlice\n opa = getattr(operator, op, None)\n if opa is None:\n return\n\n x = Series([1.0, 10.0, 100.0], [1, 2, 3])\n result = getattr(df, op)(x, level="third", axis=0)\n\n expected = pd.concat(\n [opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]\n ).sort_index()\n tm.assert_frame_equal(result, expected)\n\n x = Series([1.0, 10.0], ["two", "three"])\n result = getattr(df, op)(x, level="second", axis=0)\n\n expected = (\n pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])\n .reindex_like(df)\n .sort_index()\n )\n tm.assert_frame_equal(result, expected)\n\n def test_binary_ops_align_series_dataframe(self):\n # GH9463 (alignment level of dataframe with series)\n\n midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])\n df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)\n s = Series({"a": 1, "b": 2})\n\n df2 = df.copy()\n df2.columns.names = ["lvl0", "lvl1"]\n s2 = s.copy()\n s2.index.name = "lvl1"\n\n # different cases of integer/string level names:\n res1 = df.mul(s, axis=1, level=1)\n res2 = df.mul(s2, axis=1, level=1)\n res3 = df2.mul(s, axis=1, level=1)\n res4 = df2.mul(s2, axis=1, level=1)\n res5 = df2.mul(s, axis=1, level="lvl1")\n res6 = df2.mul(s2, axis=1, level="lvl1")\n\n exp = DataFrame(\n np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx\n )\n\n for res in [res1, res2]:\n tm.assert_frame_equal(res, exp)\n\n exp.columns.names = ["lvl0", "lvl1"]\n for res in [res3, res4, res5, res6]:\n tm.assert_frame_equal(res, exp)\n\n def test_add_with_dti_mismatched_tzs(self):\n base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")\n idx1 = base.tz_convert("Asia/Tokyo")[:2]\n idx2 = base.tz_convert("US/Eastern")[1:]\n\n df1 = DataFrame({"A": [1, 2]}, index=idx1)\n df2 = DataFrame({"A": [1, 1]}, index=idx2)\n exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)\n tm.assert_frame_equal(df1 + df2, exp)\n\n def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):\n frame_copy = float_frame.reindex(float_frame.index[::2])\n\n del frame_copy["D"]\n # adding NAs to first 5 values of column "C"\n frame_copy.loc[: frame_copy.index[4], "C"] = np.nan\n\n added = float_frame + frame_copy\n\n indexer = added["A"].dropna().index\n exp = (float_frame["A"] * 2).copy()\n\n tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])\n\n exp.loc[~exp.index.isin(indexer)] = np.nan\n tm.assert_series_equal(added["A"], exp.loc[added["A"].index])\n\n assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()\n\n # assert(False)\n\n assert np.isnan(added["D"]).all()\n\n self_added = float_frame + float_frame\n tm.assert_index_equal(self_added.index, float_frame.index)\n\n added_rev = frame_copy + float_frame\n assert np.isnan(added["D"]).all()\n assert np.isnan(added_rev["D"]).all()\n\n # corner cases\n\n # empty\n plus_empty = float_frame + DataFrame()\n assert np.isnan(plus_empty.values).all()\n\n empty_plus = DataFrame() + float_frame\n assert np.isnan(empty_plus.values).all()\n\n empty_empty = DataFrame() + DataFrame()\n assert empty_empty.empty\n\n # out of order\n reverse = float_frame.reindex(columns=float_frame.columns[::-1])\n\n tm.assert_frame_equal(reverse + float_frame, float_frame * 2)\n\n # mix vs float64, upcast\n added = float_frame + mixed_float_frame\n _check_mixed_float(added, dtype="float64")\n added = mixed_float_frame + float_frame\n _check_mixed_float(added, dtype="float64")\n\n # mix vs mix\n added = mixed_float_frame + mixed_float_frame\n _check_mixed_float(added, dtype={"C": None})\n\n # with int\n added = float_frame + mixed_int_frame\n _check_mixed_float(added, dtype="float64")\n\n def test_combine_series(self, float_frame, mixed_float_frame, mixed_int_frame):\n # Series\n series = float_frame.xs(float_frame.index[0])\n\n added = float_frame + series\n\n for key, s in added.items():\n tm.assert_series_equal(s, float_frame[key] + series[key])\n\n larger_series = series.to_dict()\n larger_series["E"] = 1\n larger_series = Series(larger_series)\n larger_added = float_frame + larger_series\n\n for key, s in float_frame.items():\n tm.assert_series_equal(larger_added[key], s + series[key])\n assert "E" in larger_added\n assert np.isnan(larger_added["E"]).all()\n\n # no upcast needed\n added = mixed_float_frame + series\n assert np.all(added.dtypes == series.dtype)\n\n # vs mix (upcast) as needed\n added = mixed_float_frame + series.astype("float32")\n _check_mixed_float(added, dtype={"C": None})\n added = mixed_float_frame + series.astype("float16")\n _check_mixed_float(added, dtype={"C": None})\n\n # these used to raise with numexpr as we are adding an int64 to an\n # uint64....weird vs int\n added = mixed_int_frame + (100 * series).astype("int64")\n _check_mixed_int(\n added, dtype={"A": "int64", "B": "float64", "C": "int64", "D": "int64"}\n )\n added = mixed_int_frame + (100 * series).astype("int32")\n _check_mixed_int(\n added, dtype={"A": "int32", "B": "float64", "C": "int32", "D": "int64"}\n )\n\n def test_combine_timeseries(self, datetime_frame):\n # TimeSeries\n ts = datetime_frame["A"]\n\n # 10890\n # we no longer allow auto timeseries broadcasting\n # and require explicit broadcasting\n added = datetime_frame.add(ts, axis="index")\n\n for key, col in datetime_frame.items():\n result = col + ts\n tm.assert_series_equal(added[key], result, check_names=False)\n assert added[key].name == key\n if col.name == ts.name:\n assert result.name == "A"\n else:\n assert result.name is None\n\n smaller_frame = datetime_frame[:-5]\n smaller_added = smaller_frame.add(ts, axis="index")\n\n tm.assert_index_equal(smaller_added.index, datetime_frame.index)\n\n smaller_ts = ts[:-5]\n smaller_added2 = datetime_frame.add(smaller_ts, axis="index")\n tm.assert_frame_equal(smaller_added, smaller_added2)\n\n # length 0, result is all-nan\n result = datetime_frame.add(ts[:0], axis="index")\n expected = DataFrame(\n np.nan, index=datetime_frame.index, columns=datetime_frame.columns\n )\n tm.assert_frame_equal(result, expected)\n\n # Frame is all-nan\n result = datetime_frame[:0].add(ts, axis="index")\n expected = DataFrame(\n np.nan, index=datetime_frame.index, columns=datetime_frame.columns\n )\n tm.assert_frame_equal(result, expected)\n\n # empty but with non-empty index\n frame = datetime_frame[:1].reindex(columns=[])\n result = frame.mul(ts, axis="index")\n assert len(result) == len(ts)\n\n def test_combineFunc(self, float_frame, mixed_float_frame):\n result = float_frame * 2\n tm.assert_numpy_array_equal(result.values, float_frame.values * 2)\n\n # vs mix\n result = mixed_float_frame * 2\n for c, s in result.items():\n tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)\n _check_mixed_float(result, dtype={"C": None})\n\n result = DataFrame() * 2\n assert result.index.equals(DataFrame().index)\n assert len(result.columns) == 0\n\n @pytest.mark.parametrize(\n "func",\n [operator.eq, operator.ne, operator.lt, operator.gt, operator.ge, operator.le],\n )\n def test_comparisons(self, simple_frame, float_frame, func):\n df1 = DataFrame(\n np.random.default_rng(2).standard_normal((30, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=pd.date_range("2000-01-01", periods=30, freq="B"),\n )\n df2 = df1.copy()\n\n row = simple_frame.xs("a")\n ndim_5 = np.ones(df1.shape + (1, 1, 1))\n\n result = func(df1, df2)\n tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))\n\n msg = (\n "Unable to coerce to Series/DataFrame, "\n "dimension must be <= 2: (30, 4, 1, 1, 1)"\n )\n with pytest.raises(ValueError, match=re.escape(msg)):\n func(df1, ndim_5)\n\n result2 = func(simple_frame, row)\n tm.assert_numpy_array_equal(\n result2.values, func(simple_frame.values, row.values)\n )\n\n result3 = func(float_frame, 0)\n tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))\n\n msg = (\n r"Can only compare identically-labeled \(both index and columns\) "\n "DataFrame objects"\n )\n with pytest.raises(ValueError, match=msg):\n func(simple_frame, simple_frame[:2])\n\n def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):\n # GH 11565\n df = DataFrame(\n {x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}\n )\n\n f = getattr(operator, compare_operators_no_eq_ne)\n msg = "|".join(\n [\n "'[<>]=?' not supported between instances of 'str' and 'int'",\n "Invalid comparison between dtype=str and int",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n f(df, 0)\n\n def test_comparison_protected_from_errstate(self):\n missing_df = DataFrame(\n np.ones((10, 4), dtype=np.float64),\n columns=Index(list("ABCD"), dtype=object),\n )\n missing_df.loc[missing_df.index[0], "A"] = np.nan\n with np.errstate(invalid="ignore"):\n expected = missing_df.values < 0\n with np.errstate(invalid="raise"):\n result = (missing_df < 0).values\n tm.assert_numpy_array_equal(result, expected)\n\n def test_boolean_comparison(self):\n # GH 4576\n # boolean comparisons with a tuple/list give unexpected results\n df = DataFrame(np.arange(6).reshape((3, 2)))\n b = np.array([2, 2])\n b_r = np.atleast_2d([2, 2])\n b_c = b_r.T\n lst = [2, 2, 2]\n tup = tuple(lst)\n\n # gt\n expected = DataFrame([[False, False], [False, True], [True, True]])\n result = df > b\n tm.assert_frame_equal(result, expected)\n\n result = df.values > b\n tm.assert_numpy_array_equal(result, expected.values)\n\n msg1d = "Unable to coerce to Series, length must be 2: given 3"\n msg2d = "Unable to coerce to DataFrame, shape must be"\n msg2db = "operands could not be broadcast together with shapes"\n with pytest.raises(ValueError, match=msg1d):\n # wrong shape\n df > lst\n\n with pytest.raises(ValueError, match=msg1d):\n # wrong shape\n df > tup\n\n # broadcasts like ndarray (GH#23000)\n result = df > b_r\n tm.assert_frame_equal(result, expected)\n\n result = df.values > b_r\n tm.assert_numpy_array_equal(result, expected.values)\n\n with pytest.raises(ValueError, match=msg2d):\n df > b_c\n\n with pytest.raises(ValueError, match=msg2db):\n df.values > b_c\n\n # ==\n expected = DataFrame([[False, False], [True, False], [False, False]])\n result = df == b\n tm.assert_frame_equal(result, expected)\n\n with pytest.raises(ValueError, match=msg1d):\n df == lst\n\n with pytest.raises(ValueError, match=msg1d):\n df == tup\n\n # broadcasts like ndarray (GH#23000)\n result = df == b_r\n tm.assert_frame_equal(result, expected)\n\n result = df.values == b_r\n tm.assert_numpy_array_equal(result, expected.values)\n\n with pytest.raises(ValueError, match=msg2d):\n df == b_c\n\n assert df.values.shape != b_c.shape\n\n # with alignment\n df = DataFrame(\n np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")\n )\n expected.index = df.index\n expected.columns = df.columns\n\n with pytest.raises(ValueError, match=msg1d):\n df == lst\n\n with pytest.raises(ValueError, match=msg1d):\n df == tup\n\n def test_inplace_ops_alignment(self):\n # inplace ops / ops alignment\n # GH 8511\n\n columns = list("abcdefg")\n X_orig = DataFrame(\n np.arange(10 * len(columns)).reshape(-1, len(columns)),\n columns=columns,\n index=range(10),\n )\n Z = 100 * X_orig.iloc[:, 1:-1].copy()\n block1 = list("bedcf")\n subs = list("bcdef")\n\n # add\n X = X_orig.copy()\n result1 = (X[block1] + Z).reindex(columns=subs)\n\n X[block1] += Z\n result2 = X.reindex(columns=subs)\n\n X = X_orig.copy()\n result3 = (X[block1] + Z[block1]).reindex(columns=subs)\n\n X[block1] += Z[block1]\n result4 = X.reindex(columns=subs)\n\n tm.assert_frame_equal(result1, result2)\n tm.assert_frame_equal(result1, result3)\n tm.assert_frame_equal(result1, result4)\n\n # sub\n X = X_orig.copy()\n result1 = (X[block1] - Z).reindex(columns=subs)\n\n X[block1] -= Z\n result2 = X.reindex(columns=subs)\n\n X = X_orig.copy()\n result3 = (X[block1] - Z[block1]).reindex(columns=subs)\n\n X[block1] -= Z[block1]\n result4 = X.reindex(columns=subs)\n\n tm.assert_frame_equal(result1, result2)\n tm.assert_frame_equal(result1, result3)\n tm.assert_frame_equal(result1, result4)\n\n def test_inplace_ops_identity(self):\n # GH 5104\n # make sure that we are actually changing the object\n s_orig = Series([1, 2, 3])\n df_orig = DataFrame(\n np.random.default_rng(2).integers(0, 5, size=10).reshape(-1, 5)\n )\n\n # no dtype change\n s = s_orig.copy()\n s2 = s\n s += 1\n tm.assert_series_equal(s, s2)\n tm.assert_series_equal(s_orig + 1, s)\n assert s is s2\n assert s._mgr is s2._mgr\n\n df = df_orig.copy()\n df2 = df\n df += 1\n tm.assert_frame_equal(df, df2)\n tm.assert_frame_equal(df_orig + 1, df)\n assert df is df2\n assert df._mgr is df2._mgr\n\n # dtype change\n s = s_orig.copy()\n s2 = s\n s += 1.5\n tm.assert_series_equal(s, s2)\n tm.assert_series_equal(s_orig + 1.5, s)\n\n df = df_orig.copy()\n df2 = df\n df += 1.5\n tm.assert_frame_equal(df, df2)\n tm.assert_frame_equal(df_orig + 1.5, df)\n assert df is df2\n assert df._mgr is df2._mgr\n\n # mixed dtype\n arr = np.random.default_rng(2).integers(0, 10, size=5)\n df_orig = DataFrame({"A": arr.copy(), "B": "foo"})\n df = df_orig.copy()\n df2 = df\n df["A"] += 1\n expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})\n tm.assert_frame_equal(df, expected)\n tm.assert_frame_equal(df2, expected)\n assert df._mgr is df2._mgr\n\n df = df_orig.copy()\n df2 = df\n df["A"] += 1.5\n expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"})\n tm.assert_frame_equal(df, expected)\n tm.assert_frame_equal(df2, expected)\n assert df._mgr is df2._mgr\n\n @pytest.mark.parametrize(\n "op",\n [\n "add",\n "and",\n pytest.param(\n "div",\n marks=pytest.mark.xfail(\n raises=AttributeError, reason="__idiv__ not implemented"\n ),\n ),\n "floordiv",\n "mod",\n "mul",\n "or",\n "pow",\n "sub",\n "truediv",\n "xor",\n ],\n )\n def test_inplace_ops_identity2(self, op):\n df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]})\n\n operand = 2\n if op in ("and", "or", "xor"):\n # cannot use floats for boolean ops\n df["a"] = [True, False, True]\n\n df_copy = df.copy()\n iop = f"__i{op}__"\n op = f"__{op}__"\n\n # no id change and value is correct\n getattr(df, iop)(operand)\n expected = getattr(df_copy, op)(operand)\n tm.assert_frame_equal(df, expected)\n expected = id(df)\n assert id(df) == expected\n\n @pytest.mark.parametrize(\n "val",\n [\n [1, 2, 3],\n (1, 2, 3),\n np.array([1, 2, 3], dtype=np.int64),\n range(1, 4),\n ],\n )\n def test_alignment_non_pandas(self, val):\n index = ["A", "B", "C"]\n columns = ["X", "Y", "Z"]\n df = DataFrame(\n np.random.default_rng(2).standard_normal((3, 3)),\n index=index,\n columns=columns,\n )\n\n align = DataFrame._align_for_op\n\n expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index)\n tm.assert_frame_equal(align(df, val, axis=0)[1], expected)\n\n expected = DataFrame(\n {"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index\n )\n tm.assert_frame_equal(align(df, val, axis=1)[1], expected)\n\n @pytest.mark.parametrize("val", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)])\n def test_alignment_non_pandas_length_mismatch(self, val):\n index = ["A", "B", "C"]\n columns = ["X", "Y", "Z"]\n df = DataFrame(\n np.random.default_rng(2).standard_normal((3, 3)),\n index=index,\n columns=columns,\n )\n\n align = DataFrame._align_for_op\n # length mismatch\n msg = "Unable to coerce to Series, length must be 3: given 2"\n with pytest.raises(ValueError, match=msg):\n align(df, val, axis=0)\n\n with pytest.raises(ValueError, match=msg):\n align(df, val, axis=1)\n\n def test_alignment_non_pandas_index_columns(self):\n index = ["A", "B", "C"]\n columns = ["X", "Y", "Z"]\n df = DataFrame(\n np.random.default_rng(2).standard_normal((3, 3)),\n index=index,\n columns=columns,\n )\n\n align = DataFrame._align_for_op\n val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n tm.assert_frame_equal(\n align(df, val, axis=0)[1],\n DataFrame(val, index=df.index, columns=df.columns),\n )\n tm.assert_frame_equal(\n align(df, val, axis=1)[1],\n DataFrame(val, index=df.index, columns=df.columns),\n )\n\n # shape mismatch\n msg = "Unable to coerce to DataFrame, shape must be"\n val = np.array([[1, 2, 3], [4, 5, 6]])\n with pytest.raises(ValueError, match=msg):\n align(df, val, axis=0)\n\n with pytest.raises(ValueError, match=msg):\n align(df, val, axis=1)\n\n val = np.zeros((3, 3, 3))\n msg = re.escape(\n "Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)"\n )\n with pytest.raises(ValueError, match=msg):\n align(df, val, axis=0)\n with pytest.raises(ValueError, match=msg):\n align(df, val, axis=1)\n\n def test_no_warning(self, all_arithmetic_operators):\n df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})\n b = df["B"]\n with tm.assert_produces_warning(None):\n getattr(df, all_arithmetic_operators)(b)\n\n def test_dunder_methods_binary(self, all_arithmetic_operators):\n # GH#??? frame.__foo__ should only accept one argument\n df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})\n b = df["B"]\n with pytest.raises(TypeError, match="takes 2 positional arguments"):\n getattr(df, all_arithmetic_operators)(b, 0)\n\n def test_align_int_fill_bug(self):\n # GH#910\n X = np.arange(10 * 10, dtype="float64").reshape(10, 10)\n Y = np.ones((10, 1), dtype=int)\n\n df1 = DataFrame(X)\n df1["0.X"] = Y.squeeze()\n\n df2 = df1.astype(float)\n\n result = df1 - df1.mean()\n expected = df2 - df2.mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_pow_with_realignment():\n # GH#32685 pow has special semantics for operating with null values\n left = DataFrame({"A": [0, 1, 2]})\n right = DataFrame(index=[0, 1, 2])\n\n result = left**right\n expected = DataFrame({"A": [np.nan, 1.0, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dataframe_series_extension_dtypes():\n # https://github.com/pandas-dev/pandas/issues/34311\n df = DataFrame(\n np.random.default_rng(2).integers(0, 100, (10, 3)), columns=["a", "b", "c"]\n )\n ser = Series([1, 2, 3], index=["a", "b", "c"])\n\n expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3)\n expected = DataFrame(expected, columns=df.columns, dtype="Int64")\n\n df_ea = df.astype("Int64")\n result = df_ea + ser\n tm.assert_frame_equal(result, expected)\n result = df_ea + ser.astype("Int64")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dataframe_blockwise_slicelike():\n # GH#34367\n arr = np.random.default_rng(2).integers(0, 1000, (100, 10))\n df1 = DataFrame(arr)\n # Explicit cast to float to avoid implicit cast when setting nan\n df2 = df1.copy().astype({1: "float", 3: "float", 7: "float"})\n df2.iloc[0, [1, 3, 7]] = np.nan\n\n # Explicit cast to float to avoid implicit cast when setting nan\n df3 = df1.copy().astype({5: "float"})\n df3.iloc[0, [5]] = np.nan\n\n # Explicit cast to float to avoid implicit cast when setting nan\n df4 = df1.copy().astype({2: "float", 3: "float", 4: "float"})\n df4.iloc[0, np.arange(2, 5)] = np.nan\n # Explicit cast to float to avoid implicit cast when setting nan\n df5 = df1.copy().astype({4: "float", 5: "float", 6: "float"})\n df5.iloc[0, np.arange(4, 7)] = np.nan\n\n for left, right in [(df1, df2), (df2, df3), (df4, df5)]:\n res = left + right\n\n expected = DataFrame({i: left[i] + right[i] for i in left.columns})\n tm.assert_frame_equal(res, expected)\n\n\n@pytest.mark.parametrize(\n "df, col_dtype",\n [\n (DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"),\n (\n DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")).astype(\n {"b": object}\n ),\n "object",\n ),\n ],\n)\ndef test_dataframe_operation_with_non_numeric_types(df, col_dtype):\n # GH #22663\n expected = DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab"))\n expected = expected.astype({"b": col_dtype})\n result = df + Series([-1.0], index=list("a"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_arith_reindex_with_duplicates():\n # https://github.com/pandas-dev/pandas/issues/35194\n df1 = DataFrame(data=[[0]], columns=["second"])\n df2 = DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"])\n result = df1 + df2\n expected = DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("to_add", [[Series([1, 1])], [Series([1, 1]), Series([1, 1])]])\ndef test_arith_list_of_arraylike_raise(to_add):\n # GH 36702. Raise when trying to add list of array-like to DataFrame\n df = DataFrame({"x": [1, 2], "y": [1, 2]})\n\n msg = f"Unable to coerce list of {type(to_add[0])} to Series/DataFrame"\n with pytest.raises(ValueError, match=msg):\n df + to_add\n with pytest.raises(ValueError, match=msg):\n to_add + df\n\n\ndef test_inplace_arithmetic_series_update(using_copy_on_write, warn_copy_on_write):\n # https://github.com/pandas-dev/pandas/issues/36373\n df = DataFrame({"A": [1, 2, 3]})\n df_orig = df.copy()\n series = df["A"]\n vals = series._values\n\n with tm.assert_cow_warning(warn_copy_on_write):\n series += 1\n if using_copy_on_write:\n assert series._values is not vals\n tm.assert_frame_equal(df, df_orig)\n else:\n assert series._values is vals\n\n expected = DataFrame({"A": [2, 3, 4]})\n tm.assert_frame_equal(df, expected)\n\n\ndef test_arithmetic_multiindex_align():\n """\n Regression test for: https://github.com/pandas-dev/pandas/issues/33765\n """\n df1 = DataFrame(\n [[1]],\n index=["a"],\n columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]),\n )\n df2 = DataFrame([[1]], index=["a"], columns=Index([0], name="a"))\n expected = DataFrame(\n [[0]],\n index=["a"],\n columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]),\n )\n result = df1 - df2\n tm.assert_frame_equal(result, expected)\n\n\ndef test_bool_frame_mult_float():\n # GH 18549\n df = DataFrame(True, list("ab"), list("cd"))\n result = df * 1.0\n expected = DataFrame(np.ones((2, 2)), list("ab"), list("cd"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_frame_sub_nullable_int(any_int_ea_dtype):\n # GH 32822\n series1 = Series([1, 2, None], dtype=any_int_ea_dtype)\n series2 = Series([1, 2, 3], dtype=any_int_ea_dtype)\n expected = DataFrame([0, 0, None], dtype=any_int_ea_dtype)\n result = series1.to_frame() - series2.to_frame()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning"\n)\ndef test_frame_op_subclass_nonclass_constructor():\n # GH#43201 subclass._constructor is a function, not the subclass itself\n\n class SubclassedSeries(Series):\n @property\n def _constructor(self):\n return SubclassedSeries\n\n @property\n def _constructor_expanddim(self):\n return SubclassedDataFrame\n\n class SubclassedDataFrame(DataFrame):\n _metadata = ["my_extra_data"]\n\n def __init__(self, my_extra_data, *args, **kwargs) -> None:\n self.my_extra_data = my_extra_data\n super().__init__(*args, **kwargs)\n\n @property\n def _constructor(self):\n return functools.partial(type(self), self.my_extra_data)\n\n @property\n def _constructor_sliced(self):\n return SubclassedSeries\n\n sdf = SubclassedDataFrame("some_data", {"A": [1, 2, 3], "B": [4, 5, 6]})\n result = sdf * 2\n expected = SubclassedDataFrame("some_data", {"A": [2, 4, 6], "B": [8, 10, 12]})\n tm.assert_frame_equal(result, expected)\n\n result = sdf + sdf\n tm.assert_frame_equal(result, expected)\n\n\ndef test_enum_column_equality():\n Cols = Enum("Cols", "col1 col2")\n\n q1 = DataFrame({Cols.col1: [1, 2, 3]})\n q2 = DataFrame({Cols.col1: [1, 2, 3]})\n\n result = q1[Cols.col1] == q2[Cols.col1]\n expected = Series([True, True, True], name=Cols.col1)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_mixed_col_index_dtype(using_infer_string):\n # GH 47382\n df1 = DataFrame(columns=list("abc"), data=1.0, index=[0])\n df2 = DataFrame(columns=list("abc"), data=0.0, index=[0])\n df1.columns = df2.columns.astype("string")\n result = df1 + df2\n expected = DataFrame(columns=list("abc"), data=1.0, index=[0])\n if using_infer_string:\n # df2.columns.dtype will be "str" instead of object,\n # so the aligned result will be "string", not object\n if HAS_PYARROW:\n dtype = "string[pyarrow]"\n else:\n dtype = "string"\n expected.columns = expected.columns.astype(dtype)\n tm.assert_frame_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\frame\test_arithmetic.py
test_arithmetic.py
Python
73,489
0.75
0.077855
0.102841
awesome-app
717
2024-11-27T04:14:03.671453
MIT
true
ea24ed29f475e46b3218b804decff5e9
import ctypes\n\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\n\npa = pytest.importorskip("pyarrow")\n\n\n@td.skip_if_no("pyarrow", min_version="14.0")\ndef test_dataframe_arrow_interface(using_infer_string):\n df = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})\n\n capsule = df.__arrow_c_stream__()\n assert (\n ctypes.pythonapi.PyCapsule_IsValid(\n ctypes.py_object(capsule), b"arrow_array_stream"\n )\n == 1\n )\n\n table = pa.table(df)\n string_type = pa.large_string() if using_infer_string else pa.string()\n expected = pa.table({"a": [1, 2, 3], "b": pa.array(["a", "b", "c"], string_type)})\n assert table.equals(expected)\n\n schema = pa.schema([("a", pa.int8()), ("b", pa.string())])\n table = pa.table(df, schema=schema)\n expected = expected.cast(schema)\n assert table.equals(expected)\n\n\n@td.skip_if_no("pyarrow", min_version="15.0")\ndef test_dataframe_to_arrow(using_infer_string):\n df = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})\n\n table = pa.RecordBatchReader.from_stream(df).read_all()\n string_type = pa.large_string() if using_infer_string else pa.string()\n expected = pa.table({"a": [1, 2, 3], "b": pa.array(["a", "b", "c"], string_type)})\n assert table.equals(expected)\n\n schema = pa.schema([("a", pa.int8()), ("b", pa.string())])\n table = pa.RecordBatchReader.from_stream(df, schema=schema).read_all()\n expected = expected.cast(schema)\n assert table.equals(expected)\n
.venv\Lib\site-packages\pandas\tests\frame\test_arrow_interface.py
test_arrow_interface.py
Python
1,505
0.85
0.085106
0
awesome-app
779
2023-09-12T05:23:12.679859
BSD-3-Clause
true
d9e308769ae67719ebca740c642f86aa
from datetime import (\n datetime,\n timedelta,\n)\nimport itertools\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import PerformanceWarning\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Series,\n Timestamp,\n date_range,\n option_context,\n)\nimport pandas._testing as tm\nfrom pandas.core.internals.blocks import NumpyBlock\n\n# Segregated collection of methods that require the BlockManager internal data\n# structure\n\n\n# TODO(ArrayManager) check which of those tests need to be rewritten to test the\n# equivalent for ArrayManager\npytestmark = td.skip_array_manager_invalid_test\n\n\nclass TestDataFrameBlockInternals:\n def test_setitem_invalidates_datetime_index_freq(self):\n # GH#24096 altering a datetime64tz column inplace invalidates the\n # `freq` attribute on the underlying DatetimeIndex\n\n dti = date_range("20130101", periods=3, tz="US/Eastern")\n ts = dti[1]\n\n df = DataFrame({"B": dti})\n assert df["B"]._values.freq is None\n\n df.iloc[1, 0] = pd.NaT\n assert df["B"]._values.freq is None\n\n # check that the DatetimeIndex was not altered in place\n assert dti.freq == "D"\n assert dti[1] == ts\n\n def test_cast_internals(self, float_frame):\n msg = "Passing a BlockManager to DataFrame"\n with tm.assert_produces_warning(\n DeprecationWarning, match=msg, check_stacklevel=False\n ):\n casted = DataFrame(float_frame._mgr, dtype=int)\n expected = DataFrame(float_frame._series, dtype=int)\n tm.assert_frame_equal(casted, expected)\n\n with tm.assert_produces_warning(\n DeprecationWarning, match=msg, check_stacklevel=False\n ):\n casted = DataFrame(float_frame._mgr, dtype=np.int32)\n expected = DataFrame(float_frame._series, dtype=np.int32)\n tm.assert_frame_equal(casted, expected)\n\n def test_consolidate(self, float_frame):\n float_frame["E"] = 7.0\n consolidated = float_frame._consolidate()\n assert len(consolidated._mgr.blocks) == 1\n\n # Ensure copy, do I want this?\n recons = consolidated._consolidate()\n assert recons is not consolidated\n tm.assert_frame_equal(recons, consolidated)\n\n float_frame["F"] = 8.0\n assert len(float_frame._mgr.blocks) == 3\n\n return_value = float_frame._consolidate_inplace()\n assert return_value is None\n assert len(float_frame._mgr.blocks) == 1\n\n def test_consolidate_inplace(self, float_frame):\n # triggers in-place consolidation\n for letter in range(ord("A"), ord("Z")):\n float_frame[chr(letter)] = chr(letter)\n\n def test_modify_values(self, float_frame, using_copy_on_write):\n if using_copy_on_write:\n with pytest.raises(ValueError, match="read-only"):\n float_frame.values[5] = 5\n assert (float_frame.values[5] != 5).all()\n return\n\n float_frame.values[5] = 5\n assert (float_frame.values[5] == 5).all()\n\n # unconsolidated\n float_frame["E"] = 7.0\n col = float_frame["E"]\n float_frame.values[6] = 6\n # as of 2.0 .values does not consolidate, so subsequent calls to .values\n # does not share data\n assert not (float_frame.values[6] == 6).all()\n\n assert (col == 7).all()\n\n def test_boolean_set_uncons(self, float_frame):\n float_frame["E"] = 7.0\n\n expected = float_frame.values.copy()\n expected[expected > 1] = 2\n\n float_frame[float_frame > 1] = 2\n tm.assert_almost_equal(expected, float_frame.values)\n\n def test_constructor_with_convert(self):\n # this is actually mostly a test of lib.maybe_convert_objects\n # #2845\n df = DataFrame({"A": [2**63 - 1]})\n result = df["A"]\n expected = Series(np.asarray([2**63 - 1], np.int64), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [2**63]})\n result = df["A"]\n expected = Series(np.asarray([2**63], np.uint64), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [datetime(2005, 1, 1), True]})\n result = df["A"]\n expected = Series(\n np.asarray([datetime(2005, 1, 1), True], np.object_), name="A"\n )\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [None, 1]})\n result = df["A"]\n expected = Series(np.asarray([np.nan, 1], np.float64), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [1.0, 2]})\n result = df["A"]\n expected = Series(np.asarray([1.0, 2], np.float64), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [1.0 + 2.0j, 3]})\n result = df["A"]\n expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex128), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [1.0 + 2.0j, 3.0]})\n result = df["A"]\n expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex128), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [1.0 + 2.0j, True]})\n result = df["A"]\n expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [1.0, None]})\n result = df["A"]\n expected = Series(np.asarray([1.0, np.nan], np.float64), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [1.0 + 2.0j, None]})\n result = df["A"]\n expected = Series(np.asarray([1.0 + 2.0j, np.nan], np.complex128), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [2.0, 1, True, None]})\n result = df["A"]\n expected = Series(np.asarray([2.0, 1, True, None], np.object_), name="A")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [2.0, 1, datetime(2006, 1, 1), None]})\n result = df["A"]\n expected = Series(\n np.asarray([2.0, 1, datetime(2006, 1, 1), None], np.object_), name="A"\n )\n tm.assert_series_equal(result, expected)\n\n def test_construction_with_mixed(self, float_string_frame, using_infer_string):\n # mixed-type frames\n float_string_frame["datetime"] = datetime.now()\n float_string_frame["timedelta"] = timedelta(days=1, seconds=1)\n assert float_string_frame["datetime"].dtype == "M8[us]"\n assert float_string_frame["timedelta"].dtype == "m8[us]"\n result = float_string_frame.dtypes\n expected = Series(\n [np.dtype("float64")] * 4\n + [\n np.dtype("object")\n if not using_infer_string\n else pd.StringDtype(na_value=np.nan),\n np.dtype("datetime64[us]"),\n np.dtype("timedelta64[us]"),\n ],\n index=list("ABCD") + ["foo", "datetime", "timedelta"],\n )\n tm.assert_series_equal(result, expected)\n\n def test_construction_with_conversions(self):\n # convert from a numpy array of non-ns timedelta64; as of 2.0 this does\n # *not* convert\n arr = np.array([1, 2, 3], dtype="timedelta64[s]")\n df = DataFrame({"A": arr})\n expected = DataFrame(\n {"A": pd.timedelta_range("00:00:01", periods=3, freq="s")}, index=range(3)\n )\n tm.assert_numpy_array_equal(df["A"].to_numpy(), arr)\n\n expected = DataFrame(\n {\n "dt1": Timestamp("20130101"),\n "dt2": date_range("20130101", periods=3).astype("M8[s]"),\n # 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),\n # FIXME: don't leave commented-out\n },\n index=range(3),\n )\n assert expected.dtypes["dt1"] == "M8[s]"\n assert expected.dtypes["dt2"] == "M8[s]"\n\n dt1 = np.datetime64("2013-01-01")\n dt2 = np.array(\n ["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]"\n )\n df = DataFrame({"dt1": dt1, "dt2": dt2})\n\n # df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01\n # 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')\n # FIXME: don't leave commented-out\n\n tm.assert_frame_equal(df, expected)\n\n def test_constructor_compound_dtypes(self):\n # GH 5191\n # compound dtypes should raise not-implementederror\n\n def f(dtype):\n data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9))\n return DataFrame(data=data, columns=["A", "B", "C"], dtype=dtype)\n\n msg = "compound dtypes are not implemented in the DataFrame constructor"\n with pytest.raises(NotImplementedError, match=msg):\n f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])\n\n # pre-2.0 these used to work (though results may be unexpected)\n with pytest.raises(TypeError, match="argument must be"):\n f("int64")\n with pytest.raises(TypeError, match="argument must be"):\n f("float64")\n\n # 10822\n msg = "^Unknown datetime string format, unable to parse: aa, at position 0$"\n with pytest.raises(ValueError, match=msg):\n f("M8[ns]")\n\n def test_pickle(self, float_string_frame, timezone_frame):\n empty_frame = DataFrame()\n\n unpickled = tm.round_trip_pickle(float_string_frame)\n tm.assert_frame_equal(float_string_frame, unpickled)\n\n # buglet\n float_string_frame._mgr.ndim\n\n # empty\n unpickled = tm.round_trip_pickle(empty_frame)\n repr(unpickled)\n\n # tz frame\n unpickled = tm.round_trip_pickle(timezone_frame)\n tm.assert_frame_equal(timezone_frame, unpickled)\n\n def test_consolidate_datetime64(self):\n # numpy vstack bug\n\n df = DataFrame(\n {\n "starting": pd.to_datetime(\n [\n "2012-06-21 00:00",\n "2012-06-23 07:00",\n "2012-06-23 16:30",\n "2012-06-25 08:00",\n "2012-06-26 12:00",\n ]\n ),\n "ending": pd.to_datetime(\n [\n "2012-06-23 07:00",\n "2012-06-23 16:30",\n "2012-06-25 08:00",\n "2012-06-26 12:00",\n "2012-06-27 08:00",\n ]\n ),\n "measure": [77, 65, 77, 0, 77],\n }\n )\n\n ser_starting = df.starting\n ser_starting.index = ser_starting.values\n ser_starting = ser_starting.tz_localize("US/Eastern")\n ser_starting = ser_starting.tz_convert("UTC")\n ser_starting.index.name = "starting"\n\n ser_ending = df.ending\n ser_ending.index = ser_ending.values\n ser_ending = ser_ending.tz_localize("US/Eastern")\n ser_ending = ser_ending.tz_convert("UTC")\n ser_ending.index.name = "ending"\n\n df.starting = ser_starting.index\n df.ending = ser_ending.index\n\n tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index)\n tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)\n\n def test_is_mixed_type(self, float_frame, float_string_frame):\n assert not float_frame._is_mixed_type\n assert float_string_frame._is_mixed_type\n\n def test_stale_cached_series_bug_473(self, using_copy_on_write, warn_copy_on_write):\n # this is chained, but ok\n with option_context("chained_assignment", None):\n Y = DataFrame(\n np.random.default_rng(2).random((4, 4)),\n index=("a", "b", "c", "d"),\n columns=("e", "f", "g", "h"),\n )\n repr(Y)\n Y["e"] = Y["e"].astype("object")\n with tm.raises_chained_assignment_error():\n Y["g"]["c"] = np.nan\n repr(Y)\n Y.sum()\n Y["g"].sum()\n if using_copy_on_write:\n assert not pd.isna(Y["g"]["c"])\n else:\n assert pd.isna(Y["g"]["c"])\n\n @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")\n def test_strange_column_corruption_issue(self, using_copy_on_write):\n # TODO(wesm): Unclear how exactly this is related to internal matters\n df = DataFrame(index=[0, 1])\n df[0] = np.nan\n wasCol = {}\n\n with tm.assert_produces_warning(\n PerformanceWarning, raise_on_extra_warnings=False\n ):\n for i, dt in enumerate(df.index):\n for col in range(100, 200):\n if col not in wasCol:\n wasCol[col] = 1\n df[col] = np.nan\n if using_copy_on_write:\n df.loc[dt, col] = i\n else:\n df[col][dt] = i\n\n myid = 100\n\n first = len(df.loc[pd.isna(df[myid]), [myid]])\n second = len(df.loc[pd.isna(df[myid]), [myid]])\n assert first == second == 0\n\n def test_constructor_no_pandas_array(self):\n # Ensure that NumpyExtensionArray isn't allowed inside Series\n # See https://github.com/pandas-dev/pandas/issues/23995 for more.\n arr = Series([1, 2, 3]).array\n result = DataFrame({"A": arr})\n expected = DataFrame({"A": [1, 2, 3]})\n tm.assert_frame_equal(result, expected)\n assert isinstance(result._mgr.blocks[0], NumpyBlock)\n assert result._mgr.blocks[0].is_numeric\n\n def test_add_column_with_pandas_array(self):\n # GH 26390\n df = DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]})\n df["c"] = pd.arrays.NumpyExtensionArray(np.array([1, 2, None, 3], dtype=object))\n df2 = DataFrame(\n {\n "a": [1, 2, 3, 4],\n "b": ["a", "b", "c", "d"],\n "c": pd.arrays.NumpyExtensionArray(\n np.array([1, 2, None, 3], dtype=object)\n ),\n }\n )\n assert type(df["c"]._mgr.blocks[0]) == NumpyBlock\n assert df["c"]._mgr.blocks[0].is_object\n assert type(df2["c"]._mgr.blocks[0]) == NumpyBlock\n assert df2["c"]._mgr.blocks[0].is_object\n tm.assert_frame_equal(df, df2)\n\n\ndef test_update_inplace_sets_valid_block_values(using_copy_on_write):\n # https://github.com/pandas-dev/pandas/issues/33457\n df = DataFrame({"a": Series([1, 2, None], dtype="category")})\n\n # inplace update of a single column\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n df["a"].fillna(1, inplace=True)\n else:\n with tm.assert_produces_warning(FutureWarning, match="inplace method"):\n df["a"].fillna(1, inplace=True)\n\n # check we haven't put a Series into any block.values\n assert isinstance(df._mgr.blocks[0].values, Categorical)\n\n if not using_copy_on_write:\n # smoketest for OP bug from GH#35731\n assert df.isnull().sum().sum() == 0\n\n\ndef test_nonconsolidated_item_cache_take():\n # https://github.com/pandas-dev/pandas/issues/35521\n\n # create non-consolidated dataframe with object dtype columns\n df = DataFrame(\n {\n "col1": Series(["a"], dtype=object),\n }\n )\n df["col2"] = Series([0], dtype=object)\n assert not df._mgr.is_consolidated()\n\n # access column (item cache)\n df["col1"] == "A"\n # take operation\n # (regression was that this consolidated but didn't reset item cache,\n # resulting in an invalid cache and the .at operation not working properly)\n df[df["col2"] == 0]\n\n # now setting value should update actual dataframe\n df.at[0, "col1"] = "A"\n\n expected = DataFrame({"col1": ["A"], "col2": [0]}, dtype=object)\n tm.assert_frame_equal(df, expected)\n assert df.at[0, "col1"] == "A"\n
.venv\Lib\site-packages\pandas\tests\frame\test_block_internals.py
test_block_internals.py
Python
16,104
0.95
0.075724
0.124324
react-lib
965
2025-02-26T11:18:31.692657
GPL-3.0
true
08e84d45b23cb16d939a4b18d8cdc783
"""\nTests for DataFrame cumulative operations\n\nSee also\n--------\ntests.series.test_cumulative\n"""\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestDataFrameCumulativeOps:\n # ---------------------------------------------------------------------\n # Cumulative Operations - cumsum, cummax, ...\n\n def test_cumulative_ops_smoke(self):\n # it works\n df = DataFrame({"A": np.arange(20)}, index=np.arange(20))\n df.cummax()\n df.cummin()\n df.cumsum()\n\n dm = DataFrame(np.arange(20).reshape(4, 5), index=range(4), columns=range(5))\n # TODO(wesm): do something with this?\n dm.cumsum()\n\n def test_cumprod_smoke(self, datetime_frame):\n datetime_frame.iloc[5:10, 0] = np.nan\n datetime_frame.iloc[10:15, 1] = np.nan\n datetime_frame.iloc[15:, 2] = np.nan\n\n # ints\n df = datetime_frame.fillna(0).astype(int)\n df.cumprod(0)\n df.cumprod(1)\n\n # ints32\n df = datetime_frame.fillna(0).astype(np.int32)\n df.cumprod(0)\n df.cumprod(1)\n\n @pytest.mark.parametrize("method", ["cumsum", "cumprod", "cummin", "cummax"])\n def test_cumulative_ops_match_series_apply(self, datetime_frame, method):\n datetime_frame.iloc[5:10, 0] = np.nan\n datetime_frame.iloc[10:15, 1] = np.nan\n datetime_frame.iloc[15:, 2] = np.nan\n\n # axis = 0\n result = getattr(datetime_frame, method)()\n expected = datetime_frame.apply(getattr(Series, method))\n tm.assert_frame_equal(result, expected)\n\n # axis = 1\n result = getattr(datetime_frame, method)(axis=1)\n expected = datetime_frame.apply(getattr(Series, method), axis=1)\n tm.assert_frame_equal(result, expected)\n\n # fix issue TODO: GH ref?\n assert np.shape(result) == np.shape(datetime_frame)\n\n def test_cumsum_preserve_dtypes(self):\n # GH#19296 dont incorrectly upcast to object\n df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3.0], "C": [True, False, False]})\n\n result = df.cumsum()\n\n expected = DataFrame(\n {\n "A": Series([1, 3, 6], dtype=np.int64),\n "B": Series([1, 3, 6], dtype=np.float64),\n "C": df["C"].cumsum(),\n }\n )\n tm.assert_frame_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\frame\test_cumulative.py
test_cumulative.py
Python
2,389
0.95
0.074074
0.15625
node-utils
512
2023-09-10T07:11:09.793484
BSD-3-Clause
true
0c3b80e1bea9deaf42266351a25f30f7
import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import (\n IS64,\n is_platform_windows,\n)\n\nfrom pandas import (\n Categorical,\n DataFrame,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestIteration:\n def test_keys(self, float_frame):\n assert float_frame.keys() is float_frame.columns\n\n def test_iteritems(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])\n for k, v in df.items():\n assert isinstance(v, DataFrame._constructor_sliced)\n\n def test_items(self):\n # GH#17213, GH#13918\n cols = ["a", "b", "c"]\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)\n for c, (k, v) in zip(cols, df.items()):\n assert c == k\n assert isinstance(v, Series)\n assert (df[k] == v).all()\n\n def test_items_names(self, float_string_frame):\n for k, v in float_string_frame.items():\n assert v.name == k\n\n def test_iter(self, float_frame):\n assert list(float_frame) == list(float_frame.columns)\n\n def test_iterrows(self, float_frame, float_string_frame):\n for k, v in float_frame.iterrows():\n exp = float_frame.loc[k]\n tm.assert_series_equal(v, exp)\n\n for k, v in float_string_frame.iterrows():\n exp = float_string_frame.loc[k]\n tm.assert_series_equal(v, exp)\n\n def test_iterrows_iso8601(self):\n # GH#19671\n s = DataFrame(\n {\n "non_iso8601": ["M1701", "M1802", "M1903", "M2004"],\n "iso8601": date_range("2000-01-01", periods=4, freq="ME"),\n }\n )\n for k, v in s.iterrows():\n exp = s.loc[k]\n tm.assert_series_equal(v, exp)\n\n def test_iterrows_corner(self):\n # GH#12222\n df = DataFrame(\n {\n "a": [datetime.datetime(2015, 1, 1)],\n "b": [None],\n "c": [None],\n "d": [""],\n "e": [[]],\n "f": [set()],\n "g": [{}],\n }\n )\n expected = Series(\n [datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],\n index=list("abcdefg"),\n name=0,\n dtype="object",\n )\n _, result = next(df.iterrows())\n tm.assert_series_equal(result, expected)\n\n def test_itertuples(self, float_frame):\n for i, tup in enumerate(float_frame.itertuples()):\n ser = DataFrame._constructor_sliced(tup[1:])\n ser.name = tup[0]\n expected = float_frame.iloc[i, :].reset_index(drop=True)\n tm.assert_series_equal(ser, expected)\n\n def test_itertuples_index_false(self):\n df = DataFrame(\n {"floats": np.random.default_rng(2).standard_normal(5), "ints": range(5)},\n columns=["floats", "ints"],\n )\n\n for tup in df.itertuples(index=False):\n assert isinstance(tup[1], int)\n\n def test_itertuples_duplicate_cols(self):\n df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})\n dfaa = df[["a", "a"]]\n\n assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)]\n\n # repr with int on 32-bit/windows\n if not (is_platform_windows() or not IS64):\n assert (\n repr(list(df.itertuples(name=None)))\n == "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]"\n )\n\n def test_itertuples_tuple_name(self):\n df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})\n tup = next(df.itertuples(name="TestName"))\n assert tup._fields == ("Index", "a", "b")\n assert (tup.Index, tup.a, tup.b) == tup\n assert type(tup).__name__ == "TestName"\n\n def test_itertuples_disallowed_col_labels(self):\n df = DataFrame(data={"def": [1, 2, 3], "return": [4, 5, 6]})\n tup2 = next(df.itertuples(name="TestName"))\n assert tup2 == (0, 1, 4)\n assert tup2._fields == ("Index", "_1", "_2")\n\n @pytest.mark.parametrize("limit", [254, 255, 1024])\n @pytest.mark.parametrize("index", [True, False])\n def test_itertuples_py2_3_field_limit_namedtuple(self, limit, index):\n # GH#28282\n df = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(limit)}])\n result = next(df.itertuples(index=index))\n assert isinstance(result, tuple)\n assert hasattr(result, "_fields")\n\n def test_sequence_like_with_categorical(self):\n # GH#7839\n # make sure can iterate\n df = DataFrame(\n {"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}\n )\n df["grade"] = Categorical(df["raw_grade"])\n\n # basic sequencing testing\n result = list(df.grade.values)\n expected = np.array(df.grade.values).tolist()\n tm.assert_almost_equal(result, expected)\n\n # iteration\n for t in df.itertuples(index=False):\n str(t)\n\n for row, s in df.iterrows():\n str(s)\n\n for c, col in df.items():\n str(col)\n
.venv\Lib\site-packages\pandas\tests\frame\test_iteration.py
test_iteration.py
Python
5,077
0.95
0.1875
0.067669
react-lib
309
2024-01-05T14:23:37.718156
BSD-3-Clause
true
388d8d59196167ee3d88497e9a49e749
import operator\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n CategoricalIndex,\n DataFrame,\n Interval,\n Series,\n isnull,\n)\nimport pandas._testing as tm\n\n\nclass TestDataFrameLogicalOperators:\n # &, |, ^\n\n @pytest.mark.parametrize(\n "left, right, op, expected",\n [\n (\n [True, False, np.nan],\n [True, False, True],\n operator.and_,\n [True, False, False],\n ),\n (\n [True, False, True],\n [True, False, np.nan],\n operator.and_,\n [True, False, False],\n ),\n (\n [True, False, np.nan],\n [True, False, True],\n operator.or_,\n [True, False, False],\n ),\n (\n [True, False, True],\n [True, False, np.nan],\n operator.or_,\n [True, False, True],\n ),\n ],\n )\n def test_logical_operators_nans(self, left, right, op, expected, frame_or_series):\n # GH#13896\n result = op(frame_or_series(left), frame_or_series(right))\n expected = frame_or_series(expected)\n\n tm.assert_equal(result, expected)\n\n def test_logical_ops_empty_frame(self):\n # GH#5808\n # empty frames, non-mixed dtype\n df = DataFrame(index=[1])\n\n result = df & df\n tm.assert_frame_equal(result, df)\n\n result = df | df\n tm.assert_frame_equal(result, df)\n\n df2 = DataFrame(index=[1, 2])\n result = df & df2\n tm.assert_frame_equal(result, df2)\n\n dfa = DataFrame(index=[1], columns=["A"])\n\n result = dfa & dfa\n expected = DataFrame(False, index=[1], columns=["A"])\n tm.assert_frame_equal(result, expected)\n\n def test_logical_ops_bool_frame(self):\n # GH#5808\n df1a_bool = DataFrame(True, index=[1], columns=["A"])\n\n result = df1a_bool & df1a_bool\n tm.assert_frame_equal(result, df1a_bool)\n\n result = df1a_bool | df1a_bool\n tm.assert_frame_equal(result, df1a_bool)\n\n def test_logical_ops_int_frame(self):\n # GH#5808\n df1a_int = DataFrame(1, index=[1], columns=["A"])\n df1a_bool = DataFrame(True, index=[1], columns=["A"])\n\n result = df1a_int | df1a_bool\n tm.assert_frame_equal(result, df1a_bool)\n\n # Check that this matches Series behavior\n res_ser = df1a_int["A"] | df1a_bool["A"]\n tm.assert_series_equal(res_ser, df1a_bool["A"])\n\n def test_logical_ops_invalid(self, using_infer_string):\n # GH#5808\n\n df1 = DataFrame(1.0, index=[1], columns=["A"])\n df2 = DataFrame(True, index=[1], columns=["A"])\n msg = re.escape("unsupported operand type(s) for |: 'float' and 'bool'")\n with pytest.raises(TypeError, match=msg):\n df1 | df2\n\n df1 = DataFrame("foo", index=[1], columns=["A"])\n df2 = DataFrame(True, index=[1], columns=["A"])\n if using_infer_string and df1["A"].dtype.storage == "pyarrow":\n msg = "operation 'or_' not supported for dtype 'str'"\n else:\n msg = re.escape("unsupported operand type(s) for |: 'str' and 'bool'")\n with pytest.raises(TypeError, match=msg):\n df1 | df2\n\n def test_logical_operators(self):\n def _check_bin_op(op):\n result = op(df1, df2)\n expected = DataFrame(\n op(df1.values, df2.values), index=df1.index, columns=df1.columns\n )\n assert result.values.dtype == np.bool_\n tm.assert_frame_equal(result, expected)\n\n def _check_unary_op(op):\n result = op(df1)\n expected = DataFrame(op(df1.values), index=df1.index, columns=df1.columns)\n assert result.values.dtype == np.bool_\n tm.assert_frame_equal(result, expected)\n\n df1 = {\n "a": {"a": True, "b": False, "c": False, "d": True, "e": True},\n "b": {"a": False, "b": True, "c": False, "d": False, "e": False},\n "c": {"a": False, "b": False, "c": True, "d": False, "e": False},\n "d": {"a": True, "b": False, "c": False, "d": True, "e": True},\n "e": {"a": True, "b": False, "c": False, "d": True, "e": True},\n }\n\n df2 = {\n "a": {"a": True, "b": False, "c": True, "d": False, "e": False},\n "b": {"a": False, "b": True, "c": False, "d": False, "e": False},\n "c": {"a": True, "b": False, "c": True, "d": False, "e": False},\n "d": {"a": False, "b": False, "c": False, "d": True, "e": False},\n "e": {"a": False, "b": False, "c": False, "d": False, "e": True},\n }\n\n df1 = DataFrame(df1)\n df2 = DataFrame(df2)\n\n _check_bin_op(operator.and_)\n _check_bin_op(operator.or_)\n _check_bin_op(operator.xor)\n\n _check_unary_op(operator.inv) # TODO: belongs elsewhere\n\n @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")\n def test_logical_with_nas(self):\n d = DataFrame({"a": [np.nan, False], "b": [True, True]})\n\n # GH4947\n # bool comparisons should return bool\n result = d["a"] | d["b"]\n expected = Series([False, True])\n tm.assert_series_equal(result, expected)\n\n # GH4604, automatic casting here\n result = d["a"].fillna(False) | d["b"]\n expected = Series([True, True])\n tm.assert_series_equal(result, expected)\n\n msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = d["a"].fillna(False, downcast=False) | d["b"]\n expected = Series([True, True])\n tm.assert_series_equal(result, expected)\n\n def test_logical_ops_categorical_columns(self):\n # GH#38367\n intervals = [Interval(1, 2), Interval(3, 4)]\n data = DataFrame(\n [[1, np.nan], [2, np.nan]],\n columns=CategoricalIndex(\n intervals, categories=intervals + [Interval(5, 6)]\n ),\n )\n mask = DataFrame(\n [[False, False], [False, False]], columns=data.columns, dtype=bool\n )\n result = mask | isnull(data)\n expected = DataFrame(\n [[False, True], [False, True]],\n columns=CategoricalIndex(\n intervals, categories=intervals + [Interval(5, 6)]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_int_dtype_different_index_not_bool(self):\n # GH 52500\n df1 = DataFrame([1, 2, 3], index=[10, 11, 23], columns=["a"])\n df2 = DataFrame([10, 20, 30], index=[11, 10, 23], columns=["a"])\n result = np.bitwise_xor(df1, df2)\n expected = DataFrame([21, 8, 29], index=[10, 11, 23], columns=["a"])\n tm.assert_frame_equal(result, expected)\n\n result = df1 ^ df2\n tm.assert_frame_equal(result, expected)\n\n def test_different_dtypes_different_index_raises(self):\n # GH 52538\n df1 = DataFrame([1, 2], index=["a", "b"])\n df2 = DataFrame([3, 4], index=["b", "c"])\n with pytest.raises(TypeError, match="unsupported operand type"):\n df1 & df2\n
.venv\Lib\site-packages\pandas\tests\frame\test_logical_ops.py
test_logical_ops.py
Python
7,305
0.95
0.07907
0.078212
awesome-app
750
2023-07-20T22:48:33.909669
GPL-3.0
true
fc438b9412c83b2d364460736e1ba6f2
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDataFrameNonuniqueIndexes:\n def test_setattr_columns_vs_construct_with_columns(self):\n # assignment\n # GH 3687\n arr = np.random.default_rng(2).standard_normal((3, 2))\n idx = list(range(2))\n df = DataFrame(arr, columns=["A", "A"])\n df.columns = idx\n expected = DataFrame(arr, columns=idx)\n tm.assert_frame_equal(df, expected)\n\n def test_setattr_columns_vs_construct_with_columns_datetimeindx(self):\n idx = date_range("20130101", periods=4, freq="QE-NOV")\n df = DataFrame(\n [[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"]\n )\n df.columns = idx\n expected = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)\n tm.assert_frame_equal(df, expected)\n\n def test_insert_with_duplicate_columns(self):\n # insert\n df = DataFrame(\n [[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],\n columns=["foo", "bar", "foo", "hello"],\n )\n df["string"] = "bah"\n expected = DataFrame(\n [[1, 1, 1, 5, "bah"], [1, 1, 2, 5, "bah"], [2, 1, 3, 5, "bah"]],\n columns=["foo", "bar", "foo", "hello", "string"],\n )\n tm.assert_frame_equal(df, expected)\n with pytest.raises(ValueError, match="Length of value"):\n df.insert(0, "AnotherColumn", range(len(df.index) - 1))\n\n # insert same dtype\n df["foo2"] = 3\n expected = DataFrame(\n [[1, 1, 1, 5, "bah", 3], [1, 1, 2, 5, "bah", 3], [2, 1, 3, 5, "bah", 3]],\n columns=["foo", "bar", "foo", "hello", "string", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n\n # set (non-dup)\n df["foo2"] = 4\n expected = DataFrame(\n [[1, 1, 1, 5, "bah", 4], [1, 1, 2, 5, "bah", 4], [2, 1, 3, 5, "bah", 4]],\n columns=["foo", "bar", "foo", "hello", "string", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n df["foo2"] = 3\n\n # delete (non dup)\n del df["bar"]\n expected = DataFrame(\n [[1, 1, 5, "bah", 3], [1, 2, 5, "bah", 3], [2, 3, 5, "bah", 3]],\n columns=["foo", "foo", "hello", "string", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n\n # try to delete again (its not consolidated)\n del df["hello"]\n expected = DataFrame(\n [[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],\n columns=["foo", "foo", "string", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n\n # consolidate\n df = df._consolidate()\n expected = DataFrame(\n [[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],\n columns=["foo", "foo", "string", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n\n # insert\n df.insert(2, "new_col", 5.0)\n expected = DataFrame(\n [[1, 1, 5.0, "bah", 3], [1, 2, 5.0, "bah", 3], [2, 3, 5.0, "bah", 3]],\n columns=["foo", "foo", "new_col", "string", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n\n # insert a dup\n with pytest.raises(ValueError, match="cannot insert"):\n df.insert(2, "new_col", 4.0)\n\n df.insert(2, "new_col", 4.0, allow_duplicates=True)\n expected = DataFrame(\n [\n [1, 1, 4.0, 5.0, "bah", 3],\n [1, 2, 4.0, 5.0, "bah", 3],\n [2, 3, 4.0, 5.0, "bah", 3],\n ],\n columns=["foo", "foo", "new_col", "new_col", "string", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n\n # delete (dup)\n del df["foo"]\n expected = DataFrame(\n [[4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3]],\n columns=["new_col", "new_col", "string", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n\n def test_dup_across_dtypes(self):\n # dup across dtypes\n df = DataFrame(\n [[1, 1, 1.0, 5], [1, 1, 2.0, 5], [2, 1, 3.0, 5]],\n columns=["foo", "bar", "foo", "hello"],\n )\n\n df["foo2"] = 7.0\n expected = DataFrame(\n [[1, 1, 1.0, 5, 7.0], [1, 1, 2.0, 5, 7.0], [2, 1, 3.0, 5, 7.0]],\n columns=["foo", "bar", "foo", "hello", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n\n result = df["foo"]\n expected = DataFrame([[1, 1.0], [1, 2.0], [2, 3.0]], columns=["foo", "foo"])\n tm.assert_frame_equal(result, expected)\n\n # multiple replacements\n df["foo"] = "string"\n expected = DataFrame(\n [\n ["string", 1, "string", 5, 7.0],\n ["string", 1, "string", 5, 7.0],\n ["string", 1, "string", 5, 7.0],\n ],\n columns=["foo", "bar", "foo", "hello", "foo2"],\n )\n tm.assert_frame_equal(df, expected)\n\n del df["foo"]\n expected = DataFrame(\n [[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "hello", "foo2"]\n )\n tm.assert_frame_equal(df, expected)\n\n def test_column_dups_indexes(self):\n # check column dups with index equal and not equal to df's index\n df = DataFrame(\n np.random.default_rng(2).standard_normal((5, 3)),\n index=["a", "b", "c", "d", "e"],\n columns=["A", "B", "A"],\n )\n for index in [df.index, pd.Index(list("edcba"))]:\n this_df = df.copy()\n expected_ser = Series(index.values, index=this_df.index)\n expected_df = DataFrame(\n {"A": expected_ser, "B": this_df["B"]},\n columns=["A", "B", "A"],\n )\n this_df["A"] = index\n tm.assert_frame_equal(this_df, expected_df)\n\n def test_changing_dtypes_with_duplicate_columns(self):\n # multiple assignments that change dtypes\n # the location indexer is a slice\n # GH 6120\n df = DataFrame(\n np.random.default_rng(2).standard_normal((5, 2)), columns=["that", "that"]\n )\n expected = DataFrame(1.0, index=range(5), columns=["that", "that"])\n\n df["that"] = 1.0\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame(\n np.random.default_rng(2).random((5, 2)), columns=["that", "that"]\n )\n expected = DataFrame(1, index=range(5), columns=["that", "that"])\n\n df["that"] = 1\n tm.assert_frame_equal(df, expected)\n\n def test_dup_columns_comparisons(self):\n # equality\n df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"])\n df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]], columns=["A", "A"])\n\n # not-comparing like-labelled\n msg = (\n r"Can only compare identically-labeled \(both index and columns\) "\n "DataFrame objects"\n )\n with pytest.raises(ValueError, match=msg):\n df1 == df2\n\n df1r = df1.reindex_like(df2)\n result = df1r == df2\n expected = DataFrame(\n [[False, True], [True, False], [False, False], [True, False]],\n columns=["A", "A"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_mixed_column_selection(self):\n # mixed column selection\n # GH 5639\n dfbool = DataFrame(\n {\n "one": Series([True, True, False], index=["a", "b", "c"]),\n "two": Series([False, False, True, False], index=["a", "b", "c", "d"]),\n "three": Series([False, True, True, True], index=["a", "b", "c", "d"]),\n }\n )\n expected = pd.concat([dfbool["one"], dfbool["three"], dfbool["one"]], axis=1)\n result = dfbool[["one", "three", "one"]]\n tm.assert_frame_equal(result, expected)\n\n def test_multi_axis_dups(self):\n # multi-axis dups\n # GH 6121\n df = DataFrame(\n np.arange(25.0).reshape(5, 5),\n index=["a", "b", "c", "d", "e"],\n columns=["A", "B", "C", "D", "E"],\n )\n z = df[["A", "C", "A"]].copy()\n expected = z.loc[["a", "c", "a"]]\n\n df = DataFrame(\n np.arange(25.0).reshape(5, 5),\n index=["a", "b", "c", "d", "e"],\n columns=["A", "B", "C", "D", "E"],\n )\n z = df[["A", "C", "A"]]\n result = z.loc[["a", "c", "a"]]\n tm.assert_frame_equal(result, expected)\n\n def test_columns_with_dups(self):\n # GH 3468 related\n\n # basic\n df = DataFrame([[1, 2]], columns=["a", "a"])\n df.columns = ["a", "a.1"]\n expected = DataFrame([[1, 2]], columns=["a", "a.1"])\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame([[1, 2, 3]], columns=["b", "a", "a"])\n df.columns = ["b", "a", "a.1"]\n expected = DataFrame([[1, 2, 3]], columns=["b", "a", "a.1"])\n tm.assert_frame_equal(df, expected)\n\n def test_columns_with_dup_index(self):\n # with a dup index\n df = DataFrame([[1, 2]], columns=["a", "a"])\n df.columns = ["b", "b"]\n expected = DataFrame([[1, 2]], columns=["b", "b"])\n tm.assert_frame_equal(df, expected)\n\n def test_multi_dtype(self):\n # multi-dtype\n df = DataFrame(\n [[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]],\n columns=["a", "a", "b", "b", "d", "c", "c"],\n )\n df.columns = list("ABCDEFG")\n expected = DataFrame(\n [[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("ABCDEFG")\n )\n tm.assert_frame_equal(df, expected)\n\n def test_multi_dtype2(self):\n df = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a", "a", "a"])\n df.columns = ["a", "a.1", "a.2", "a.3"]\n expected = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a.1", "a.2", "a.3"])\n tm.assert_frame_equal(df, expected)\n\n def test_dups_across_blocks(self, using_array_manager):\n # dups across blocks\n df_float = DataFrame(\n np.random.default_rng(2).standard_normal((10, 3)), dtype="float64"\n )\n df_int = DataFrame(\n np.random.default_rng(2).standard_normal((10, 3)).astype("int64")\n )\n df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns)\n df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns)\n df_dt = DataFrame(\n pd.Timestamp("20010101"), index=df_float.index, columns=df_float.columns\n )\n df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)\n\n if not using_array_manager:\n assert len(df._mgr.blknos) == len(df.columns)\n assert len(df._mgr.blklocs) == len(df.columns)\n\n # testing iloc\n for i in range(len(df.columns)):\n df.iloc[:, i]\n\n def test_dup_columns_across_dtype(self):\n # dup columns across dtype GH 2079/2194\n vals = [[1, -1, 2.0], [2, -2, 3.0]]\n rs = DataFrame(vals, columns=["A", "A", "B"])\n xp = DataFrame(vals)\n xp.columns = ["A", "A", "B"]\n tm.assert_frame_equal(rs, xp)\n\n def test_set_value_by_index(self):\n # See gh-12344\n warn = None\n msg = "will attempt to set the values inplace"\n\n df = DataFrame(np.arange(9).reshape(3, 3).T)\n df.columns = list("AAA")\n expected = df.iloc[:, 2].copy()\n\n with tm.assert_produces_warning(warn, match=msg):\n df.iloc[:, 0] = 3\n tm.assert_series_equal(df.iloc[:, 2], expected)\n\n df = DataFrame(np.arange(9).reshape(3, 3).T)\n df.columns = [2, float(2), str(2)]\n expected = df.iloc[:, 1].copy()\n\n with tm.assert_produces_warning(warn, match=msg):\n df.iloc[:, 0] = 3\n tm.assert_series_equal(df.iloc[:, 1], expected)\n
.venv\Lib\site-packages\pandas\tests\frame\test_nonunique_indexes.py
test_nonunique_indexes.py
Python
11,937
0.95
0.062315
0.106164
react-lib
833
2023-09-26T06:01:31.655192
MIT
true
a424a18def4465a4508494c598d2a95e
"""\nTests for np.foo applied to DataFrame, not necessarily ufuncs.\n"""\nimport numpy as np\n\nfrom pandas import (\n Categorical,\n DataFrame,\n)\nimport pandas._testing as tm\n\n\nclass TestAsArray:\n def test_asarray_homogeneous(self):\n df = DataFrame({"A": Categorical([1, 2]), "B": Categorical([1, 2])})\n result = np.asarray(df)\n # may change from object in the future\n expected = np.array([[1, 1], [2, 2]], dtype="object")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_np_sqrt(self, float_frame):\n with np.errstate(all="ignore"):\n result = np.sqrt(float_frame)\n assert isinstance(result, type(float_frame))\n assert result.index.is_(float_frame.index)\n assert result.columns.is_(float_frame.columns)\n\n tm.assert_frame_equal(result, float_frame.apply(np.sqrt))\n\n def test_sum_deprecated_axis_behavior(self):\n # GH#52042 deprecated behavior of df.sum(axis=None), which gets\n # called when we do np.sum(df)\n\n arr = np.random.default_rng(2).standard_normal((4, 3))\n df = DataFrame(arr)\n\n msg = "The behavior of DataFrame.sum with axis=None is deprecated"\n with tm.assert_produces_warning(\n FutureWarning, match=msg, check_stacklevel=False\n ):\n res = np.sum(df)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df.sum(axis=None)\n tm.assert_series_equal(res, expected)\n\n def test_np_ravel(self):\n # GH26247\n arr = np.array(\n [\n [0.11197053, 0.44361564, -0.92589452],\n [0.05883648, -0.00948922, -0.26469934],\n ]\n )\n\n result = np.ravel([DataFrame(batch.reshape(1, 3)) for batch in arr])\n expected = np.array(\n [\n 0.11197053,\n 0.44361564,\n -0.92589452,\n 0.05883648,\n -0.00948922,\n -0.26469934,\n ]\n )\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.ravel(DataFrame(arr[0].reshape(1, 3), columns=["x1", "x2", "x3"]))\n expected = np.array([0.11197053, 0.44361564, -0.92589452])\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.ravel(\n [\n DataFrame(batch.reshape(1, 3), columns=["x1", "x2", "x3"])\n for batch in arr\n ]\n )\n expected = np.array(\n [\n 0.11197053,\n 0.44361564,\n -0.92589452,\n 0.05883648,\n -0.00948922,\n -0.26469934,\n ]\n )\n tm.assert_numpy_array_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\frame\test_npfuncs.py
test_npfuncs.py
Python
2,751
0.95
0.089888
0.052632
python-kit
854
2025-04-11T07:04:13.207761
BSD-3-Clause
true
b7e5796ebdcdefa812d0ba259165318e
import operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import (\n NumExprClobberingError,\n UndefinedVariableError,\n)\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.computation.check import NUMEXPR_INSTALLED\n\n\n@pytest.fixture(params=["python", "pandas"], ids=lambda x: x)\ndef parser(request):\n return request.param\n\n\n@pytest.fixture(\n params=["python", pytest.param("numexpr", marks=td.skip_if_no("numexpr"))],\n ids=lambda x: x,\n)\ndef engine(request):\n return request.param\n\n\ndef skip_if_no_pandas_parser(parser):\n if parser != "pandas":\n pytest.skip(f"cannot evaluate with parser={parser}")\n\n\nclass TestCompat:\n @pytest.fixture\n def df(self):\n return DataFrame({"A": [1, 2, 3]})\n\n @pytest.fixture\n def expected1(self, df):\n return df[df.A > 0]\n\n @pytest.fixture\n def expected2(self, df):\n return df.A + 1\n\n def test_query_default(self, df, expected1, expected2):\n # GH 12749\n # this should always work, whether NUMEXPR_INSTALLED or not\n result = df.query("A>0")\n tm.assert_frame_equal(result, expected1)\n result = df.eval("A+1")\n tm.assert_series_equal(result, expected2, check_names=False)\n\n def test_query_None(self, df, expected1, expected2):\n result = df.query("A>0", engine=None)\n tm.assert_frame_equal(result, expected1)\n result = df.eval("A+1", engine=None)\n tm.assert_series_equal(result, expected2, check_names=False)\n\n def test_query_python(self, df, expected1, expected2):\n result = df.query("A>0", engine="python")\n tm.assert_frame_equal(result, expected1)\n result = df.eval("A+1", engine="python")\n tm.assert_series_equal(result, expected2, check_names=False)\n\n def test_query_numexpr(self, df, expected1, expected2):\n if NUMEXPR_INSTALLED:\n result = df.query("A>0", engine="numexpr")\n tm.assert_frame_equal(result, expected1)\n result = df.eval("A+1", engine="numexpr")\n tm.assert_series_equal(result, expected2, check_names=False)\n else:\n msg = (\n r"'numexpr' is not installed or an unsupported version. "\n r"Cannot use engine='numexpr' for query/eval if 'numexpr' is "\n r"not installed"\n )\n with pytest.raises(ImportError, match=msg):\n df.query("A>0", engine="numexpr")\n with pytest.raises(ImportError, match=msg):\n df.eval("A+1", engine="numexpr")\n\n\nclass TestDataFrameEval:\n # smaller hits python, larger hits numexpr\n @pytest.mark.parametrize("n", [4, 4000])\n @pytest.mark.parametrize(\n "op_str,op,rop",\n [\n ("+", "__add__", "__radd__"),\n ("-", "__sub__", "__rsub__"),\n ("*", "__mul__", "__rmul__"),\n ("/", "__truediv__", "__rtruediv__"),\n ],\n )\n def test_ops(self, op_str, op, rop, n):\n # tst ops and reversed ops in evaluation\n # GH7198\n\n df = DataFrame(1, index=range(n), columns=list("abcd"))\n df.iloc[0] = 2\n m = df.mean()\n\n base = DataFrame( # noqa: F841\n np.tile(m.values, n).reshape(n, -1), columns=list("abcd")\n )\n\n expected = eval(f"base {op_str} df")\n\n # ops as strings\n result = eval(f"m {op_str} df")\n tm.assert_frame_equal(result, expected)\n\n # these are commutative\n if op in ["+", "*"]:\n result = getattr(df, op)(m)\n tm.assert_frame_equal(result, expected)\n\n # these are not\n elif op in ["-", "/"]:\n result = getattr(df, rop)(m)\n tm.assert_frame_equal(result, expected)\n\n def test_dataframe_sub_numexpr_path(self):\n # GH7192: Note we need a large number of rows to ensure this\n # goes through the numexpr path\n df = DataFrame({"A": np.random.default_rng(2).standard_normal(25000)})\n df.iloc[0:5] = np.nan\n expected = 1 - np.isnan(df.iloc[0:25])\n result = (1 - np.isnan(df)).iloc[0:25]\n tm.assert_frame_equal(result, expected)\n\n def test_query_non_str(self):\n # GH 11485\n df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})\n\n msg = "expr must be a string to be evaluated"\n with pytest.raises(ValueError, match=msg):\n df.query(lambda x: x.B == "b")\n\n with pytest.raises(ValueError, match=msg):\n df.query(111)\n\n def test_query_empty_string(self):\n # GH 13139\n df = DataFrame({"A": [1, 2, 3]})\n\n msg = "expr cannot be an empty string"\n with pytest.raises(ValueError, match=msg):\n df.query("")\n\n def test_eval_resolvers_as_list(self):\n # GH 14095\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 2)), columns=list("ab")\n )\n dict1 = {"a": 1}\n dict2 = {"b": 2}\n assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]\n assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]\n\n def test_eval_resolvers_combined(self):\n # GH 34966\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 2)), columns=list("ab")\n )\n dict1 = {"c": 2}\n\n # Both input and default index/column resolvers should be usable\n result = df.eval("a + b * c", resolvers=[dict1])\n\n expected = df["a"] + df["b"] * dict1["c"]\n tm.assert_series_equal(result, expected)\n\n def test_eval_object_dtype_binop(self):\n # GH#24883\n df = DataFrame({"a1": ["Y", "N"]})\n res = df.eval("c = ((a1 == 'Y') & True)")\n expected = DataFrame({"a1": ["Y", "N"], "c": [True, False]})\n tm.assert_frame_equal(res, expected)\n\n def test_extension_array_eval(self, engine, parser, request):\n # GH#58748\n if engine == "numexpr":\n mark = pytest.mark.xfail(\n reason="numexpr does not support extension array dtypes"\n )\n request.applymarker(mark)\n df = DataFrame({"a": pd.array([1, 2, 3]), "b": pd.array([4, 5, 6])})\n result = df.eval("a / b", engine=engine, parser=parser)\n expected = Series(pd.array([0.25, 0.40, 0.50]))\n tm.assert_series_equal(result, expected)\n\n def test_complex_eval(self, engine, parser):\n # GH#21374\n df = DataFrame({"a": [1 + 2j], "b": [1 + 1j]})\n result = df.eval("a/b", engine=engine, parser=parser)\n expected = Series([1.5 + 0.5j])\n tm.assert_series_equal(result, expected)\n\n\nclass TestDataFrameQueryWithMultiIndex:\n def test_query_with_named_multiindex(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n a = np.random.default_rng(2).choice(["red", "green"], size=10)\n b = np.random.default_rng(2).choice(["eggs", "ham"], size=10)\n index = MultiIndex.from_arrays([a, b], names=["color", "food"])\n df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), index=index)\n ind = Series(\n df.index.get_level_values("color").values, index=index, name="color"\n )\n\n # equality\n res1 = df.query('color == "red"', parser=parser, engine=engine)\n res2 = df.query('"red" == color', parser=parser, engine=engine)\n exp = df[ind == "red"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('color != "red"', parser=parser, engine=engine)\n res2 = df.query('"red" != color', parser=parser, engine=engine)\n exp = df[ind != "red"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('color == ["red"]', parser=parser, engine=engine)\n res2 = df.query('["red"] == color', parser=parser, engine=engine)\n exp = df[ind.isin(["red"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('color != ["red"]', parser=parser, engine=engine)\n res2 = df.query('["red"] != color', parser=parser, engine=engine)\n exp = df[~ind.isin(["red"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('["red"] in color', parser=parser, engine=engine)\n res2 = df.query('"red" in color', parser=parser, engine=engine)\n exp = df[ind.isin(["red"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('["red"] not in color', parser=parser, engine=engine)\n res2 = df.query('"red" not in color', parser=parser, engine=engine)\n exp = df[~ind.isin(["red"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n def test_query_with_unnamed_multiindex(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n a = np.random.default_rng(2).choice(["red", "green"], size=10)\n b = np.random.default_rng(2).choice(["eggs", "ham"], size=10)\n index = MultiIndex.from_arrays([a, b])\n df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), index=index)\n ind = Series(df.index.get_level_values(0).values, index=index)\n\n res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)\n res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)\n exp = df[ind == "red"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)\n res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)\n exp = df[ind != "red"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)\n res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin(["red"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)\n res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin(["red"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin(["red"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin(["red"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # ## LEVEL 1\n ind = Series(df.index.get_level_values(1).values, index=index)\n res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)\n res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)\n exp = df[ind == "eggs"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)\n res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)\n exp = df[ind != "eggs"]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)\n res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin(["eggs"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)\n res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin(["eggs"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin(["eggs"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin(["eggs"])]\n tm.assert_frame_equal(res1, exp)\n tm.assert_frame_equal(res2, exp)\n\n def test_query_with_partially_named_multiindex(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n a = np.random.default_rng(2).choice(["red", "green"], size=10)\n b = np.arange(10)\n index = MultiIndex.from_arrays([a, b])\n index.names = [None, "rating"]\n df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), index=index)\n res = df.query("rating == 1", parser=parser, engine=engine)\n ind = Series(\n df.index.get_level_values("rating").values, index=index, name="rating"\n )\n exp = df[ind == 1]\n tm.assert_frame_equal(res, exp)\n\n res = df.query("rating != 1", parser=parser, engine=engine)\n ind = Series(\n df.index.get_level_values("rating").values, index=index, name="rating"\n )\n exp = df[ind != 1]\n tm.assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind == "red"]\n tm.assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind != "red"]\n tm.assert_frame_equal(res, exp)\n\n def test_query_multiindex_get_index_resolvers(self):\n df = DataFrame(\n np.ones((10, 3)),\n index=MultiIndex.from_arrays(\n [range(10) for _ in range(2)], names=["spam", "eggs"]\n ),\n )\n resolvers = df._get_index_resolvers()\n\n def to_series(mi, level):\n level_values = mi.get_level_values(level)\n s = level_values.to_series()\n s.index = mi\n return s\n\n col_series = df.columns.to_series()\n expected = {\n "index": df.index,\n "columns": col_series,\n "spam": to_series(df.index, "spam"),\n "eggs": to_series(df.index, "eggs"),\n "clevel_0": col_series,\n }\n for k, v in resolvers.items():\n if isinstance(v, Index):\n assert v.is_(expected[k])\n elif isinstance(v, Series):\n tm.assert_series_equal(v, expected[k])\n else:\n raise AssertionError("object must be a Series or Index")\n\n\n@td.skip_if_no("numexpr")\nclass TestDataFrameQueryNumExprPandas:\n @pytest.fixture\n def engine(self):\n return "numexpr"\n\n @pytest.fixture\n def parser(self):\n return "pandas"\n\n def test_date_query_with_attribute_access(self, engine, parser):\n skip_if_no_pandas_parser(parser)\n df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))\n df["dates1"] = date_range("1/1/2012", periods=5)\n df["dates2"] = date_range("1/1/2013", periods=5)\n df["dates3"] = date_range("1/1/2014", periods=5)\n res = df.query(\n "@df.dates1 < 20130101 < @df.dates3", engine=engine, parser=parser\n )\n expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_no_attribute_access(self, engine, parser):\n df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))\n df["dates1"] = date_range("1/1/2012", periods=5)\n df["dates2"] = date_range("1/1/2013", periods=5)\n df["dates3"] = date_range("1/1/2014", periods=5)\n res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)\n expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_with_NaT(self, engine, parser):\n n = 10\n df = DataFrame(np.random.default_rng(2).standard_normal((n, 3)))\n df["dates1"] = date_range("1/1/2012", periods=n)\n df["dates2"] = date_range("1/1/2013", periods=n)\n df["dates3"] = date_range("1/1/2014", periods=n)\n df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT\n df.loc[np.random.default_rng(2).random(n) > 0.5, "dates3"] = pd.NaT\n res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)\n expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query(self, engine, parser):\n n = 10\n df = DataFrame(np.random.default_rng(2).standard_normal((n, 3)))\n df["dates1"] = date_range("1/1/2012", periods=n)\n df["dates3"] = date_range("1/1/2014", periods=n)\n return_value = df.set_index("dates1", inplace=True, drop=True)\n assert return_value is None\n res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)\n expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self, engine, parser):\n n = 10\n # Cast to object to avoid implicit cast when setting entry to pd.NaT below\n df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))).astype(\n {0: object}\n )\n df["dates1"] = date_range("1/1/2012", periods=n)\n df["dates3"] = date_range("1/1/2014", periods=n)\n df.iloc[0, 0] = pd.NaT\n return_value = df.set_index("dates1", inplace=True, drop=True)\n assert return_value is None\n res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)\n expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self, engine, parser):\n n = 10\n d = {}\n d["dates1"] = date_range("1/1/2012", periods=n)\n d["dates3"] = date_range("1/1/2014", periods=n)\n df = DataFrame(d)\n df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT\n return_value = df.set_index("dates1", inplace=True, drop=True)\n assert return_value is None\n res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)\n expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_with_non_date(self, engine, parser):\n n = 10\n df = DataFrame(\n {"dates": date_range("1/1/2012", periods=n), "nondate": np.arange(n)}\n )\n\n result = df.query("dates == nondate", parser=parser, engine=engine)\n assert len(result) == 0\n\n result = df.query("dates != nondate", parser=parser, engine=engine)\n tm.assert_frame_equal(result, df)\n\n msg = r"Invalid comparison between dtype=datetime64\[ns\] and ndarray"\n for op in ["<", ">", "<=", ">="]:\n with pytest.raises(TypeError, match=msg):\n df.query(f"dates {op} nondate", parser=parser, engine=engine)\n\n def test_query_syntax_error(self, engine, parser):\n df = DataFrame({"i": range(10), "+": range(3, 13), "r": range(4, 14)})\n msg = "invalid syntax"\n with pytest.raises(SyntaxError, match=msg):\n df.query("i - +", engine=engine, parser=parser)\n\n def test_query_scope(self, engine, parser):\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(\n np.random.default_rng(2).standard_normal((20, 2)), columns=list("ab")\n )\n\n a, b = 1, 2 # noqa: F841\n res = df.query("a > b", engine=engine, parser=parser)\n expected = df[df.a > df.b]\n tm.assert_frame_equal(res, expected)\n\n res = df.query("@a > b", engine=engine, parser=parser)\n expected = df[a > df.b]\n tm.assert_frame_equal(res, expected)\n\n # no local variable c\n with pytest.raises(\n UndefinedVariableError, match="local variable 'c' is not defined"\n ):\n df.query("@a > b > @c", engine=engine, parser=parser)\n\n # no column named 'c'\n with pytest.raises(UndefinedVariableError, match="name 'c' is not defined"):\n df.query("@a > b > c", engine=engine, parser=parser)\n\n def test_query_doesnt_pickup_local(self, engine, parser):\n n = m = 10\n df = DataFrame(\n np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc")\n )\n\n # we don't pick up the local 'sin'\n with pytest.raises(UndefinedVariableError, match="name 'sin' is not defined"):\n df.query("sin > 5", engine=engine, parser=parser)\n\n def test_query_builtin(self, engine, parser):\n n = m = 10\n df = DataFrame(\n np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc")\n )\n\n df.index.name = "sin"\n msg = "Variables in expression.+"\n with pytest.raises(NumExprClobberingError, match=msg):\n df.query("sin > 5", engine=engine, parser=parser)\n\n def test_query(self, engine, parser):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"]\n )\n\n tm.assert_frame_equal(\n df.query("a < b", engine=engine, parser=parser), df[df.a < df.b]\n )\n tm.assert_frame_equal(\n df.query("a + b > b * c", engine=engine, parser=parser),\n df[df.a + df.b > df.b * df.c],\n )\n\n def test_query_index_with_name(self, engine, parser):\n df = DataFrame(\n np.random.default_rng(2).integers(10, size=(10, 3)),\n index=Index(range(10), name="blob"),\n columns=["a", "b", "c"],\n )\n res = df.query("(blob < 5) & (a < b)", engine=engine, parser=parser)\n expec = df[(df.index < 5) & (df.a < df.b)]\n tm.assert_frame_equal(res, expec)\n\n res = df.query("blob < b", engine=engine, parser=parser)\n expec = df[df.index < df.b]\n\n tm.assert_frame_equal(res, expec)\n\n def test_query_index_without_name(self, engine, parser):\n df = DataFrame(\n np.random.default_rng(2).integers(10, size=(10, 3)),\n index=range(10),\n columns=["a", "b", "c"],\n )\n\n # "index" should refer to the index\n res = df.query("index < b", engine=engine, parser=parser)\n expec = df[df.index < df.b]\n tm.assert_frame_equal(res, expec)\n\n # test against a scalar\n res = df.query("index < 5", engine=engine, parser=parser)\n expec = df[df.index < 5]\n tm.assert_frame_equal(res, expec)\n\n def test_nested_scope(self, engine, parser):\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))\n df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))\n expected = df[(df > 0) & (df2 > 0)]\n\n result = df.query("(@df > 0) & (@df2 > 0)", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n result = pd.eval("df[df > 0 and df2 > 0]", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n result = pd.eval(\n "df[df > 0 and df2 > 0 and df[df > 0] > 0]", engine=engine, parser=parser\n )\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n tm.assert_frame_equal(result, expected)\n\n result = pd.eval("df[(df>0) & (df2>0)]", engine=engine, parser=parser)\n expected = df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n def test_nested_raises_on_local_self_reference(self, engine, parser):\n df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))\n\n # can't reference ourself b/c we're a local so @ is necessary\n with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):\n df.query("df > 0", engine=engine, parser=parser)\n\n def test_local_syntax(self, engine, parser):\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(\n np.random.default_rng(2).standard_normal((100, 10)),\n columns=list("abcdefghij"),\n )\n b = 1\n expect = df[df.a < b]\n result = df.query("a < @b", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expect)\n\n expect = df[df.a < df.b]\n result = df.query("a < b", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expect)\n\n def test_chained_cmp_and_in(self, engine, parser):\n skip_if_no_pandas_parser(parser)\n cols = list("abc")\n df = DataFrame(\n np.random.default_rng(2).standard_normal((100, len(cols))), columns=cols\n )\n res = df.query(\n "a < b < c and a not in b not in c", engine=engine, parser=parser\n )\n ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)\n expec = df[ind]\n tm.assert_frame_equal(res, expec)\n\n def test_local_variable_with_in(self, engine, parser):\n skip_if_no_pandas_parser(parser)\n a = Series(np.random.default_rng(2).integers(3, size=15), name="a")\n b = Series(np.random.default_rng(2).integers(10, size=15), name="b")\n df = DataFrame({"a": a, "b": b})\n\n expected = df.loc[(df.b - 1).isin(a)]\n result = df.query("b - 1 in a", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n b = Series(np.random.default_rng(2).integers(10, size=15), name="b")\n expected = df.loc[(b - 1).isin(a)]\n result = df.query("@b - 1 in a", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n def test_at_inside_string(self, engine, parser):\n skip_if_no_pandas_parser(parser)\n c = 1 # noqa: F841\n df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]})\n result = df.query('a == "@c"', engine=engine, parser=parser)\n expected = df[df.a == "@c"]\n tm.assert_frame_equal(result, expected)\n\n def test_query_undefined_local(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.default_rng(2).random((10, 2)), columns=list("ab"))\n with pytest.raises(\n UndefinedVariableError, match="local variable 'c' is not defined"\n ):\n df.query("a == @c", engine=engine, parser=parser)\n\n def test_index_resolvers_come_after_columns_with_the_same_name(\n self, engine, parser\n ):\n n = 1 # noqa: F841\n a = np.r_[20:101:20]\n\n df = DataFrame(\n {"index": a, "b": np.random.default_rng(2).standard_normal(a.size)}\n )\n df.index.name = "index"\n result = df.query("index > 5", engine=engine, parser=parser)\n expected = df[df["index"] > 5]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame(\n {"index": a, "b": np.random.default_rng(2).standard_normal(a.size)}\n )\n result = df.query("ilevel_0 > 5", engine=engine, parser=parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({"a": a, "b": np.random.default_rng(2).standard_normal(a.size)})\n df.index.name = "a"\n result = df.query("a > 5", engine=engine, parser=parser)\n expected = df[df.a > 5]\n tm.assert_frame_equal(result, expected)\n\n result = df.query("index > 5", engine=engine, parser=parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("op, f", [["==", operator.eq], ["!=", operator.ne]])\n def test_inf(self, op, f, engine, parser):\n n = 10\n df = DataFrame(\n {\n "a": np.random.default_rng(2).random(n),\n "b": np.random.default_rng(2).random(n),\n }\n )\n df.loc[::2, 0] = np.inf\n q = f"a {op} inf"\n expected = df[f(df.a, np.inf)]\n result = df.query(q, engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n def test_check_tz_aware_index_query(self, tz_aware_fixture):\n # https://github.com/pandas-dev/pandas/issues/29463\n tz = tz_aware_fixture\n df_index = date_range(\n start="2019-01-01", freq="1d", periods=10, tz=tz, name="time"\n )\n expected = DataFrame(index=df_index)\n df = DataFrame(index=df_index)\n result = df.query('"2018-01-03 00:00:00+00" < time')\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(df_index)\n expected.columns = expected.columns.astype(object)\n result = df.reset_index().query('"2018-01-03 00:00:00+00" < time')\n tm.assert_frame_equal(result, expected)\n\n def test_method_calls_in_query(self, engine, parser):\n # https://github.com/pandas-dev/pandas/issues/22435\n n = 10\n df = DataFrame(\n {\n "a": 2 * np.random.default_rng(2).random(n),\n "b": np.random.default_rng(2).random(n),\n }\n )\n expected = df[df["a"].astype("int") == 0]\n result = df.query("a.astype('int') == 0", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame(\n {\n "a": np.where(\n np.random.default_rng(2).random(n) < 0.5,\n np.nan,\n np.random.default_rng(2).standard_normal(n),\n ),\n "b": np.random.default_rng(2).standard_normal(n),\n }\n )\n expected = df[df["a"].notnull()]\n result = df.query("a.notnull()", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n\n@td.skip_if_no("numexpr")\nclass TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):\n @pytest.fixture\n def engine(self):\n return "numexpr"\n\n @pytest.fixture\n def parser(self):\n return "python"\n\n def test_date_query_no_attribute_access(self, engine, parser):\n df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))\n df["dates1"] = date_range("1/1/2012", periods=5)\n df["dates2"] = date_range("1/1/2013", periods=5)\n df["dates3"] = date_range("1/1/2014", periods=5)\n res = df.query(\n "(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser\n )\n expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_with_NaT(self, engine, parser):\n n = 10\n df = DataFrame(np.random.default_rng(2).standard_normal((n, 3)))\n df["dates1"] = date_range("1/1/2012", periods=n)\n df["dates2"] = date_range("1/1/2013", periods=n)\n df["dates3"] = date_range("1/1/2014", periods=n)\n df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT\n df.loc[np.random.default_rng(2).random(n) > 0.5, "dates3"] = pd.NaT\n res = df.query(\n "(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser\n )\n expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query(self, engine, parser):\n n = 10\n df = DataFrame(np.random.default_rng(2).standard_normal((n, 3)))\n df["dates1"] = date_range("1/1/2012", periods=n)\n df["dates3"] = date_range("1/1/2014", periods=n)\n return_value = df.set_index("dates1", inplace=True, drop=True)\n assert return_value is None\n res = df.query(\n "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser\n )\n expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self, engine, parser):\n n = 10\n # Cast to object to avoid implicit cast when setting entry to pd.NaT below\n df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))).astype(\n {0: object}\n )\n df["dates1"] = date_range("1/1/2012", periods=n)\n df["dates3"] = date_range("1/1/2014", periods=n)\n df.iloc[0, 0] = pd.NaT\n return_value = df.set_index("dates1", inplace=True, drop=True)\n assert return_value is None\n res = df.query(\n "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser\n )\n expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self, engine, parser):\n n = 10\n df = DataFrame(np.random.default_rng(2).standard_normal((n, 3)))\n df["dates1"] = date_range("1/1/2012", periods=n)\n df["dates3"] = date_range("1/1/2014", periods=n)\n df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT\n return_value = df.set_index("dates1", inplace=True, drop=True)\n assert return_value is None\n msg = r"'BoolOp' nodes are not implemented"\n with pytest.raises(NotImplementedError, match=msg):\n df.query("index < 20130101 < dates3", engine=engine, parser=parser)\n\n def test_nested_scope(self, engine, parser):\n # smoke test\n x = 1 # noqa: F841\n result = pd.eval("x + 1", engine=engine, parser=parser)\n assert result == 2\n\n df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))\n df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))\n\n # don't have the pandas parser\n msg = r"The '@' prefix is only supported by the pandas parser"\n with pytest.raises(SyntaxError, match=msg):\n df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)\n\n with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):\n df.query("(df>0) & (df2>0)", engine=engine, parser=parser)\n\n expected = df[(df > 0) & (df2 > 0)]\n result = pd.eval("df[(df > 0) & (df2 > 0)]", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n result = pd.eval(\n "df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]", engine=engine, parser=parser\n )\n tm.assert_frame_equal(expected, result)\n\n def test_query_numexpr_with_min_and_max_columns(self):\n df = DataFrame({"min": [1, 2, 3], "max": [4, 5, 6]})\n regex_to_match = (\n r"Variables in expression \"\(min\) == \(1\)\" "\n r"overlap with builtins: \('min'\)"\n )\n with pytest.raises(NumExprClobberingError, match=regex_to_match):\n df.query("min == 1")\n\n regex_to_match = (\n r"Variables in expression \"\(max\) == \(1\)\" "\n r"overlap with builtins: \('max'\)"\n )\n with pytest.raises(NumExprClobberingError, match=regex_to_match):\n df.query("max == 1")\n\n\nclass TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):\n @pytest.fixture\n def engine(self):\n return "python"\n\n @pytest.fixture\n def parser(self):\n return "pandas"\n\n def test_query_builtin(self, engine, parser):\n n = m = 10\n df = DataFrame(\n np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc")\n )\n\n df.index.name = "sin"\n expected = df[df.index > 5]\n result = df.query("sin > 5", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):\n @pytest.fixture\n def engine(self):\n return "python"\n\n @pytest.fixture\n def parser(self):\n return "python"\n\n def test_query_builtin(self, engine, parser):\n n = m = 10\n df = DataFrame(\n np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc")\n )\n\n df.index.name = "sin"\n expected = df[df.index > 5]\n result = df.query("sin > 5", engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryStrings:\n def test_str_query_method(self, parser, engine):\n df = DataFrame(np.random.default_rng(2).standard_normal((10, 1)), columns=["b"])\n df["strings"] = Series(list("aabbccddee"))\n expect = df[df.strings == "a"]\n\n if parser != "pandas":\n col = "strings"\n lst = '"a"'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = "==", "!="\n ops = 2 * ([eq] + [ne])\n msg = r"'(Not)?In' nodes are not implemented"\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = f"{lhs} {op} {rhs}"\n with pytest.raises(NotImplementedError, match=msg):\n df.query(\n ex,\n engine=engine,\n parser=parser,\n local_dict={"strings": df.strings},\n )\n else:\n res = df.query('"a" == strings', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n res = df.query('strings == "a"', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n tm.assert_frame_equal(res, df[df.strings.isin(["a"])])\n\n expect = df[df.strings != "a"]\n res = df.query('strings != "a"', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n res = df.query('"a" != strings', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n tm.assert_frame_equal(res, df[~df.strings.isin(["a"])])\n\n def test_str_list_query_method(self, parser, engine):\n df = DataFrame(np.random.default_rng(2).standard_normal((10, 1)), columns=["b"])\n df["strings"] = Series(list("aabbccddee"))\n expect = df[df.strings.isin(["a", "b"])]\n\n if parser != "pandas":\n col = "strings"\n lst = '["a", "b"]'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = "==", "!="\n ops = 2 * ([eq] + [ne])\n msg = r"'(Not)?In' nodes are not implemented"\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = f"{lhs} {op} {rhs}"\n with pytest.raises(NotImplementedError, match=msg):\n df.query(ex, engine=engine, parser=parser)\n else:\n res = df.query('strings == ["a", "b"]', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n res = df.query('["a", "b"] == strings', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n expect = df[~df.strings.isin(["a", "b"])]\n\n res = df.query('strings != ["a", "b"]', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n res = df.query('["a", "b"] != strings', engine=engine, parser=parser)\n tm.assert_frame_equal(res, expect)\n\n def test_query_with_string_columns(self, parser, engine):\n df = DataFrame(\n {\n "a": list("aaaabbbbcccc"),\n "b": list("aabbccddeeff"),\n "c": np.random.default_rng(2).integers(5, size=12),\n "d": np.random.default_rng(2).integers(9, size=12),\n }\n )\n if parser == "pandas":\n res = df.query("a in b", parser=parser, engine=engine)\n expec = df[df.a.isin(df.b)]\n tm.assert_frame_equal(res, expec)\n\n res = df.query("a in b and c < d", parser=parser, engine=engine)\n expec = df[df.a.isin(df.b) & (df.c < df.d)]\n tm.assert_frame_equal(res, expec)\n else:\n msg = r"'(Not)?In' nodes are not implemented"\n with pytest.raises(NotImplementedError, match=msg):\n df.query("a in b", parser=parser, engine=engine)\n\n msg = r"'BoolOp' nodes are not implemented"\n with pytest.raises(NotImplementedError, match=msg):\n df.query("a in b and c < d", parser=parser, engine=engine)\n\n def test_object_array_eq_ne(self, parser, engine):\n df = DataFrame(\n {\n "a": list("aaaabbbbcccc"),\n "b": list("aabbccddeeff"),\n "c": np.random.default_rng(2).integers(5, size=12),\n "d": np.random.default_rng(2).integers(9, size=12),\n }\n )\n res = df.query("a == b", parser=parser, engine=engine)\n exp = df[df.a == df.b]\n tm.assert_frame_equal(res, exp)\n\n res = df.query("a != b", parser=parser, engine=engine)\n exp = df[df.a != df.b]\n tm.assert_frame_equal(res, exp)\n\n def test_query_with_nested_strings(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n events = [\n f"page {n} {act}" for n in range(1, 4) for act in ["load", "exit"]\n ] * 2\n stamps1 = date_range("2014-01-01 0:00:01", freq="30s", periods=6)\n stamps2 = date_range("2014-02-01 1:00:01", freq="30s", periods=6)\n df = DataFrame(\n {\n "id": np.arange(1, 7).repeat(2),\n "event": events,\n "timestamp": stamps1.append(stamps2),\n }\n )\n\n expected = df[df.event == '"page 1 load"']\n res = df.query("""'"page 1 load"' in event""", parser=parser, engine=engine)\n tm.assert_frame_equal(expected, res)\n\n def test_query_with_nested_special_character(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n df = DataFrame({"a": ["a", "b", "test & test"], "b": [1, 2, 3]})\n res = df.query('a == "test & test"', parser=parser, engine=engine)\n expec = df[df.a == "test & test"]\n tm.assert_frame_equal(res, expec)\n\n @pytest.mark.parametrize(\n "op, func",\n [\n ["<", operator.lt],\n [">", operator.gt],\n ["<=", operator.le],\n [">=", operator.ge],\n ],\n )\n def test_query_lex_compare_strings(self, parser, engine, op, func):\n a = Series(np.random.default_rng(2).choice(list("abcde"), 20))\n b = Series(np.arange(a.size))\n df = DataFrame({"X": a, "Y": b})\n\n res = df.query(f'X {op} "d"', engine=engine, parser=parser)\n expected = df[func(df.X, "d")]\n tm.assert_frame_equal(res, expected)\n\n def test_query_single_element_booleans(self, parser, engine):\n columns = "bid", "bidsize", "ask", "asksize"\n data = np.random.default_rng(2).integers(2, size=(1, len(columns))).astype(bool)\n df = DataFrame(data, columns=columns)\n res = df.query("bid & ask", engine=engine, parser=parser)\n expected = df[df.bid & df.ask]\n tm.assert_frame_equal(res, expected)\n\n def test_query_string_scalar_variable(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n df = DataFrame(\n {\n "Symbol": ["BUD US", "BUD US", "IBM US", "IBM US"],\n "Price": [109.70, 109.72, 183.30, 183.35],\n }\n )\n e = df[df.Symbol == "BUD US"]\n symb = "BUD US" # noqa: F841\n r = df.query("Symbol == @symb", parser=parser, engine=engine)\n tm.assert_frame_equal(e, r)\n\n @pytest.mark.parametrize(\n "in_list",\n [\n [None, "asdf", "ghjk"],\n ["asdf", None, "ghjk"],\n ["asdf", "ghjk", None],\n [None, None, "asdf"],\n ["asdf", None, None],\n [None, None, None],\n ],\n )\n def test_query_string_null_elements(self, in_list):\n # GITHUB ISSUE #31516\n parser = "pandas"\n engine = "python"\n expected = {i: value for i, value in enumerate(in_list) if value == "asdf"}\n\n df_expected = DataFrame({"a": expected}, dtype="string")\n df_expected.index = df_expected.index.astype("int64")\n df = DataFrame({"a": in_list}, dtype="string")\n res1 = df.query("a == 'asdf'", parser=parser, engine=engine)\n res2 = df[df["a"] == "asdf"]\n res3 = df.query("a <= 'asdf'", parser=parser, engine=engine)\n tm.assert_frame_equal(res1, df_expected)\n tm.assert_frame_equal(res1, res2)\n tm.assert_frame_equal(res1, res3)\n tm.assert_frame_equal(res2, res3)\n\n\nclass TestDataFrameEvalWithFrame:\n @pytest.fixture\n def frame(self):\n return DataFrame(\n np.random.default_rng(2).standard_normal((10, 3)), columns=list("abc")\n )\n\n def test_simple_expr(self, frame, parser, engine):\n res = frame.eval("a + b", engine=engine, parser=parser)\n expect = frame.a + frame.b\n tm.assert_series_equal(res, expect)\n\n def test_bool_arith_expr(self, frame, parser, engine):\n res = frame.eval("a[a < 1] + b", engine=engine, parser=parser)\n expect = frame.a[frame.a < 1] + frame.b\n tm.assert_series_equal(res, expect)\n\n @pytest.mark.parametrize("op", ["+", "-", "*", "/"])\n def test_invalid_type_for_operator_raises(self, parser, engine, op):\n df = DataFrame({"a": [1, 2], "b": ["c", "d"]})\n msg = r"unsupported operand type\(s\) for .+: '.+' and '.+'|Cannot"\n\n with pytest.raises(TypeError, match=msg):\n df.eval(f"a {op} b", engine=engine, parser=parser)\n\n\nclass TestDataFrameQueryBacktickQuoting:\n @pytest.fixture\n def df(self):\n """\n Yields a dataframe with strings that may or may not need escaping\n by backticks. The last two columns cannot be escaped by backticks\n and should raise a ValueError.\n """\n yield DataFrame(\n {\n "A": [1, 2, 3],\n "B B": [3, 2, 1],\n "C C": [4, 5, 6],\n "C C": [7, 4, 3],\n "C_C": [8, 9, 10],\n "D_D D": [11, 1, 101],\n "E.E": [6, 3, 5],\n "F-F": [8, 1, 10],\n "1e1": [2, 4, 8],\n "def": [10, 11, 2],\n "A (x)": [4, 1, 3],\n "B(x)": [1, 1, 5],\n "B (x)": [2, 7, 4],\n " &^ :!€$?(} > <++*'' ": [2, 5, 6],\n "": [10, 11, 1],\n " A": [4, 7, 9],\n " ": [1, 2, 1],\n "it's": [6, 3, 1],\n "that's": [9, 1, 8],\n "☺": [8, 7, 6],\n "foo#bar": [2, 4, 5],\n 1: [5, 7, 9],\n }\n )\n\n def test_single_backtick_variable_query(self, df):\n res = df.query("1 < `B B`")\n expect = df[1 < df["B B"]]\n tm.assert_frame_equal(res, expect)\n\n def test_two_backtick_variables_query(self, df):\n res = df.query("1 < `B B` and 4 < `C C`")\n expect = df[(1 < df["B B"]) & (4 < df["C C"])]\n tm.assert_frame_equal(res, expect)\n\n def test_single_backtick_variable_expr(self, df):\n res = df.eval("A + `B B`")\n expect = df["A"] + df["B B"]\n tm.assert_series_equal(res, expect)\n\n def test_two_backtick_variables_expr(self, df):\n res = df.eval("`B B` + `C C`")\n expect = df["B B"] + df["C C"]\n tm.assert_series_equal(res, expect)\n\n def test_already_underscore_variable(self, df):\n res = df.eval("`C_C` + A")\n expect = df["C_C"] + df["A"]\n tm.assert_series_equal(res, expect)\n\n def test_same_name_but_underscores(self, df):\n res = df.eval("C_C + `C C`")\n expect = df["C_C"] + df["C C"]\n tm.assert_series_equal(res, expect)\n\n def test_mixed_underscores_and_spaces(self, df):\n res = df.eval("A + `D_D D`")\n expect = df["A"] + df["D_D D"]\n tm.assert_series_equal(res, expect)\n\n def test_backtick_quote_name_with_no_spaces(self, df):\n res = df.eval("A + `C_C`")\n expect = df["A"] + df["C_C"]\n tm.assert_series_equal(res, expect)\n\n def test_special_characters(self, df):\n res = df.eval("`E.E` + `F-F` - A")\n expect = df["E.E"] + df["F-F"] - df["A"]\n tm.assert_series_equal(res, expect)\n\n def test_start_with_digit(self, df):\n res = df.eval("A + `1e1`")\n expect = df["A"] + df["1e1"]\n tm.assert_series_equal(res, expect)\n\n def test_keyword(self, df):\n res = df.eval("A + `def`")\n expect = df["A"] + df["def"]\n tm.assert_series_equal(res, expect)\n\n def test_unneeded_quoting(self, df):\n res = df.query("`A` > 2")\n expect = df[df["A"] > 2]\n tm.assert_frame_equal(res, expect)\n\n def test_parenthesis(self, df):\n res = df.query("`A (x)` > 2")\n expect = df[df["A (x)"] > 2]\n tm.assert_frame_equal(res, expect)\n\n def test_empty_string(self, df):\n res = df.query("`` > 5")\n expect = df[df[""] > 5]\n tm.assert_frame_equal(res, expect)\n\n def test_multiple_spaces(self, df):\n res = df.query("`C C` > 5")\n expect = df[df["C C"] > 5]\n tm.assert_frame_equal(res, expect)\n\n def test_start_with_spaces(self, df):\n res = df.eval("` A` + ` `")\n expect = df[" A"] + df[" "]\n tm.assert_series_equal(res, expect)\n\n def test_lots_of_operators_string(self, df):\n res = df.query("` &^ :!€$?(} > <++*'' ` > 4")\n expect = df[df[" &^ :!€$?(} > <++*'' "] > 4]\n tm.assert_frame_equal(res, expect)\n\n def test_missing_attribute(self, df):\n message = "module 'pandas' has no attribute 'thing'"\n with pytest.raises(AttributeError, match=message):\n df.eval("@pd.thing")\n\n def test_failing_quote(self, df):\n msg = r"(Could not convert ).*( to a valid Python identifier.)"\n with pytest.raises(SyntaxError, match=msg):\n df.query("`it's` > `that's`")\n\n def test_failing_character_outside_range(self, df):\n msg = r"(Could not convert ).*( to a valid Python identifier.)"\n with pytest.raises(SyntaxError, match=msg):\n df.query("`☺` > 4")\n\n def test_failing_hashtag(self, df):\n msg = "Failed to parse backticks"\n with pytest.raises(SyntaxError, match=msg):\n df.query("`foo#bar` > 4")\n\n def test_call_non_named_expression(self, df):\n """\n Only attributes and variables ('named functions') can be called.\n .__call__() is not an allowed attribute because that would allow\n calling anything.\n https://github.com/pandas-dev/pandas/pull/32460\n """\n\n def func(*_):\n return 1\n\n funcs = [func] # noqa: F841\n\n df.eval("@func()")\n\n with pytest.raises(TypeError, match="Only named functions are supported"):\n df.eval("@funcs[0]()")\n\n with pytest.raises(TypeError, match="Only named functions are supported"):\n df.eval("@funcs[0].__call__()")\n\n def test_ea_dtypes(self, any_numeric_ea_and_arrow_dtype):\n # GH#29618\n df = DataFrame(\n [[1, 2], [3, 4]], columns=["a", "b"], dtype=any_numeric_ea_and_arrow_dtype\n )\n warning = RuntimeWarning if NUMEXPR_INSTALLED else None\n with tm.assert_produces_warning(warning):\n result = df.eval("c = b - a")\n expected = DataFrame(\n [[1, 2, 1], [3, 4, 1]],\n columns=["a", "b", "c"],\n dtype=any_numeric_ea_and_arrow_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n def test_ea_dtypes_and_scalar(self):\n # GH#29618\n df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"], dtype="Float64")\n warning = RuntimeWarning if NUMEXPR_INSTALLED else None\n with tm.assert_produces_warning(warning):\n result = df.eval("c = b - 1")\n expected = DataFrame(\n [[1, 2, 1], [3, 4, 3]], columns=["a", "b", "c"], dtype="Float64"\n )\n tm.assert_frame_equal(result, expected)\n\n def test_ea_dtypes_and_scalar_operation(self, any_numeric_ea_and_arrow_dtype):\n # GH#29618\n df = DataFrame(\n [[1, 2], [3, 4]], columns=["a", "b"], dtype=any_numeric_ea_and_arrow_dtype\n )\n result = df.eval("c = 2 - 1")\n expected = DataFrame(\n {\n "a": Series([1, 3], dtype=any_numeric_ea_and_arrow_dtype),\n "b": Series([2, 4], dtype=any_numeric_ea_and_arrow_dtype),\n "c": Series([1, 1], dtype=result["c"].dtype),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"])\n def test_query_ea_dtypes(self, dtype):\n if dtype == "int64[pyarrow]":\n pytest.importorskip("pyarrow")\n # GH#50261\n df = DataFrame({"a": Series([1, 2], dtype=dtype)})\n ref = {2} # noqa: F841\n warning = RuntimeWarning if dtype == "Int64" and NUMEXPR_INSTALLED else None\n with tm.assert_produces_warning(warning):\n result = df.query("a in @ref")\n expected = DataFrame({"a": Series([2], dtype=dtype, index=[1])})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("engine", ["python", "numexpr"])\n @pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"])\n def test_query_ea_equality_comparison(self, dtype, engine):\n # GH#50261\n warning = RuntimeWarning if engine == "numexpr" else None\n if engine == "numexpr" and not NUMEXPR_INSTALLED:\n pytest.skip("numexpr not installed")\n if dtype == "int64[pyarrow]":\n pytest.importorskip("pyarrow")\n df = DataFrame(\n {"A": Series([1, 1, 2], dtype="Int64"), "B": Series([1, 2, 2], dtype=dtype)}\n )\n with tm.assert_produces_warning(warning):\n result = df.query("A == B", engine=engine)\n expected = DataFrame(\n {\n "A": Series([1, 2], dtype="Int64", index=[0, 2]),\n "B": Series([1, 2], dtype=dtype, index=[0, 2]),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n def test_all_nat_in_object(self):\n # GH#57068\n now = pd.Timestamp.now("UTC") # noqa: F841\n df = DataFrame({"a": pd.to_datetime([None, None], utc=True)}, dtype=object)\n result = df.query("a > @now")\n expected = DataFrame({"a": []}, dtype=object)\n tm.assert_frame_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\frame\test_query_eval.py
test_query_eval.py
Python
55,373
0.75
0.104312
0.039571
awesome-app
816
2024-04-25T19:05:58.393762
MIT
true
4db1df6485564dc9608ccc737f82310e
from datetime import timedelta\nfrom decimal import Decimal\nimport re\n\nfrom dateutil.tz import tzlocal\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import (\n IS64,\n is_platform_windows,\n)\nfrom pandas.compat.numpy import np_version_gt2\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalDtype,\n DataFrame,\n DatetimeIndex,\n Index,\n PeriodIndex,\n RangeIndex,\n Series,\n Timestamp,\n date_range,\n isna,\n notna,\n to_datetime,\n to_timedelta,\n)\nimport pandas._testing as tm\nfrom pandas.core import (\n algorithms,\n nanops,\n)\n\nis_windows_np2_or_is32 = (is_platform_windows() and not np_version_gt2) or not IS64\nis_windows_or_is32 = is_platform_windows() or not IS64\n\n\ndef make_skipna_wrapper(alternative, skipna_alternative=None):\n """\n Create a function for calling on an array.\n\n Parameters\n ----------\n alternative : function\n The function to be called on the array with no NaNs.\n Only used when 'skipna_alternative' is None.\n skipna_alternative : function\n The function to be called on the original array\n\n Returns\n -------\n function\n """\n if skipna_alternative:\n\n def skipna_wrapper(x):\n return skipna_alternative(x.values)\n\n else:\n\n def skipna_wrapper(x):\n nona = x.dropna()\n if len(nona) == 0:\n return np.nan\n return alternative(nona)\n\n return skipna_wrapper\n\n\ndef assert_stat_op_calc(\n opname,\n alternative,\n frame,\n has_skipna=True,\n check_dtype=True,\n check_dates=False,\n rtol=1e-5,\n atol=1e-8,\n skipna_alternative=None,\n):\n """\n Check that operator opname works as advertised on frame\n\n Parameters\n ----------\n opname : str\n Name of the operator to test on frame\n alternative : function\n Function that opname is tested against; i.e. "frame.opname()" should\n equal "alternative(frame)".\n frame : DataFrame\n The object that the tests are executed on\n has_skipna : bool, default True\n Whether the method "opname" has the kwarg "skip_na"\n check_dtype : bool, default True\n Whether the dtypes of the result of "frame.opname()" and\n "alternative(frame)" should be checked.\n check_dates : bool, default false\n Whether opname should be tested on a Datetime Series\n rtol : float, default 1e-5\n Relative tolerance.\n atol : float, default 1e-8\n Absolute tolerance.\n skipna_alternative : function, default None\n NaN-safe version of alternative\n """\n f = getattr(frame, opname)\n\n if check_dates:\n df = DataFrame({"b": date_range("1/1/2001", periods=2)})\n with tm.assert_produces_warning(None):\n result = getattr(df, opname)()\n assert isinstance(result, Series)\n\n df["a"] = range(len(df))\n with tm.assert_produces_warning(None):\n result = getattr(df, opname)()\n assert isinstance(result, Series)\n assert len(result)\n\n if has_skipna:\n\n def wrapper(x):\n return alternative(x.values)\n\n skipna_wrapper = make_skipna_wrapper(alternative, skipna_alternative)\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n tm.assert_series_equal(\n result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol\n )\n tm.assert_series_equal(\n result1,\n frame.apply(wrapper, axis=1),\n rtol=rtol,\n atol=atol,\n )\n else:\n skipna_wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n tm.assert_series_equal(\n result0,\n frame.apply(skipna_wrapper),\n check_dtype=check_dtype,\n rtol=rtol,\n atol=atol,\n )\n\n if opname in ["sum", "prod"]:\n expected = frame.apply(skipna_wrapper, axis=1)\n tm.assert_series_equal(\n result1, expected, check_dtype=False, rtol=rtol, atol=atol\n )\n\n # check dtypes\n if check_dtype:\n lcd_dtype = frame.values.dtype\n assert lcd_dtype == result0.dtype\n assert lcd_dtype == result1.dtype\n\n # bad axis\n with pytest.raises(ValueError, match="No axis named 2"):\n f(axis=2)\n\n # all NA case\n if has_skipna:\n all_na = frame * np.nan\n r0 = getattr(all_na, opname)(axis=0)\n r1 = getattr(all_na, opname)(axis=1)\n if opname in ["sum", "prod"]:\n unit = 1 if opname == "prod" else 0 # result for empty sum/prod\n expected = Series(unit, index=r0.index, dtype=r0.dtype)\n tm.assert_series_equal(r0, expected)\n expected = Series(unit, index=r1.index, dtype=r1.dtype)\n tm.assert_series_equal(r1, expected)\n\n\n@pytest.fixture\ndef bool_frame_with_na():\n """\n Fixture for DataFrame of booleans with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D']; some entries are missing\n """\n df = DataFrame(\n np.concatenate(\n [np.ones((15, 4), dtype=bool), np.zeros((15, 4), dtype=bool)], axis=0\n ),\n index=Index([f"foo_{i}" for i in range(30)], dtype=object),\n columns=Index(list("ABCD"), dtype=object),\n dtype=object,\n )\n # set some NAs\n df.iloc[5:10] = np.nan\n df.iloc[15:20, -2:] = np.nan\n return df\n\n\n@pytest.fixture\ndef float_frame_with_na():\n """\n Fixture for DataFrame of floats with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D']; some entries are missing\n """\n df = DataFrame(\n np.random.default_rng(2).standard_normal((30, 4)),\n index=Index([f"foo_{i}" for i in range(30)], dtype=object),\n columns=Index(list("ABCD"), dtype=object),\n )\n # set some NAs\n df.iloc[5:10] = np.nan\n df.iloc[15:20, -2:] = np.nan\n return df\n\n\nclass TestDataFrameAnalytics:\n # ---------------------------------------------------------------------\n # Reductions\n @pytest.mark.parametrize("axis", [0, 1])\n @pytest.mark.parametrize(\n "opname",\n [\n "count",\n "sum",\n "mean",\n "product",\n "median",\n "min",\n "max",\n "nunique",\n "var",\n "std",\n "sem",\n pytest.param("skew", marks=td.skip_if_no("scipy")),\n pytest.param("kurt", marks=td.skip_if_no("scipy")),\n ],\n )\n def test_stat_op_api_float_string_frame(self, float_string_frame, axis, opname):\n if (opname in ("sum", "min", "max") and axis == 0) or opname in (\n "count",\n "nunique",\n ):\n getattr(float_string_frame, opname)(axis=axis)\n else:\n if opname in ["var", "std", "sem", "skew", "kurt"]:\n msg = "could not convert string to float: 'bar'"\n elif opname == "product":\n if axis == 1:\n msg = "can't multiply sequence by non-int of type 'float'"\n else:\n msg = "can't multiply sequence by non-int of type 'str'"\n elif opname == "sum":\n msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'"\n elif opname == "mean":\n if axis == 0:\n # different message on different builds\n msg = "|".join(\n [\n r"Could not convert \['.*'\] to numeric",\n "Could not convert string '(bar){30}' to numeric",\n ]\n )\n else:\n msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'"\n elif opname in ["min", "max"]:\n msg = "'[><]=' not supported between instances of 'float' and 'str'"\n elif opname == "median":\n msg = re.compile(\n r"Cannot convert \[.*\] to numeric|does not support|Cannot perform",\n flags=re.S,\n )\n if not isinstance(msg, re.Pattern):\n msg = msg + "|does not support|Cannot perform reduction"\n with pytest.raises(TypeError, match=msg):\n getattr(float_string_frame, opname)(axis=axis)\n if opname != "nunique":\n getattr(float_string_frame, opname)(axis=axis, numeric_only=True)\n\n @pytest.mark.parametrize("axis", [0, 1])\n @pytest.mark.parametrize(\n "opname",\n [\n "count",\n "sum",\n "mean",\n "product",\n "median",\n "min",\n "max",\n "var",\n "std",\n "sem",\n pytest.param("skew", marks=td.skip_if_no("scipy")),\n pytest.param("kurt", marks=td.skip_if_no("scipy")),\n ],\n )\n def test_stat_op_api_float_frame(self, float_frame, axis, opname):\n getattr(float_frame, opname)(axis=axis, numeric_only=False)\n\n def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):\n def count(s):\n return notna(s).sum()\n\n def nunique(s):\n return len(algorithms.unique1d(s.dropna()))\n\n def var(x):\n return np.var(x, ddof=1)\n\n def std(x):\n return np.std(x, ddof=1)\n\n def sem(x):\n return np.std(x, ddof=1) / np.sqrt(len(x))\n\n assert_stat_op_calc(\n "nunique",\n nunique,\n float_frame_with_na,\n has_skipna=False,\n check_dtype=False,\n check_dates=True,\n )\n\n # GH#32571: rol needed for flaky CI builds\n # mixed types (with upcasting happening)\n assert_stat_op_calc(\n "sum",\n np.sum,\n mixed_float_frame.astype("float32"),\n check_dtype=False,\n rtol=1e-3,\n )\n\n assert_stat_op_calc(\n "sum", np.sum, float_frame_with_na, skipna_alternative=np.nansum\n )\n assert_stat_op_calc("mean", np.mean, float_frame_with_na, check_dates=True)\n assert_stat_op_calc(\n "product", np.prod, float_frame_with_na, skipna_alternative=np.nanprod\n )\n\n assert_stat_op_calc("var", var, float_frame_with_na)\n assert_stat_op_calc("std", std, float_frame_with_na)\n assert_stat_op_calc("sem", sem, float_frame_with_na)\n\n assert_stat_op_calc(\n "count",\n count,\n float_frame_with_na,\n has_skipna=False,\n check_dtype=False,\n check_dates=True,\n )\n\n def test_stat_op_calc_skew_kurtosis(self, float_frame_with_na):\n sp_stats = pytest.importorskip("scipy.stats")\n\n def skewness(x):\n if len(x) < 3:\n return np.nan\n return sp_stats.skew(x, bias=False)\n\n def kurt(x):\n if len(x) < 4:\n return np.nan\n return sp_stats.kurtosis(x, bias=False)\n\n assert_stat_op_calc("skew", skewness, float_frame_with_na)\n assert_stat_op_calc("kurt", kurt, float_frame_with_na)\n\n def test_median(self, float_frame_with_na, int_frame):\n def wrapper(x):\n if isna(x).any():\n return np.nan\n return np.median(x)\n\n assert_stat_op_calc("median", wrapper, float_frame_with_na, check_dates=True)\n assert_stat_op_calc(\n "median", wrapper, int_frame, check_dtype=False, check_dates=True\n )\n\n @pytest.mark.parametrize(\n "method", ["sum", "mean", "prod", "var", "std", "skew", "min", "max"]\n )\n @pytest.mark.parametrize(\n "df",\n [\n DataFrame(\n {\n "a": [\n -0.00049987540199591344,\n -0.0016467257772919831,\n 0.00067695870775883013,\n ],\n "b": [-0, -0, 0.0],\n "c": [\n 0.00031111847529610595,\n 0.0014902627951905339,\n -0.00094099200035979691,\n ],\n },\n index=["foo", "bar", "baz"],\n dtype="O",\n ),\n DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object),\n ],\n )\n @pytest.mark.filterwarnings("ignore:Mismatched null-like values:FutureWarning")\n def test_stat_operators_attempt_obj_array(self, method, df, axis):\n # GH#676\n assert df.values.dtype == np.object_\n result = getattr(df, method)(axis=axis)\n expected = getattr(df.astype("f8"), method)(axis=axis).astype(object)\n if axis in [1, "columns"] and method in ["min", "max"]:\n expected[expected.isna()] = None\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("op", ["mean", "std", "var", "skew", "kurt", "sem"])\n def test_mixed_ops(self, op):\n # GH#16116\n df = DataFrame(\n {\n "int": [1, 2, 3, 4],\n "float": [1.0, 2.0, 3.0, 4.0],\n "str": ["a", "b", "c", "d"],\n }\n )\n msg = "|".join(\n [\n "Could not convert",\n "could not convert",\n "can't multiply sequence by non-int",\n "does not support",\n "Cannot perform",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n getattr(df, op)()\n\n with pd.option_context("use_bottleneck", False):\n with pytest.raises(TypeError, match=msg):\n getattr(df, op)()\n\n def test_reduce_mixed_frame(self):\n # GH 6806\n df = DataFrame(\n {\n "bool_data": [True, True, False, False, False],\n "int_data": [10, 20, 30, 40, 50],\n "string_data": ["a", "b", "c", "d", "e"],\n }\n )\n df.reindex(columns=["bool_data", "int_data", "string_data"])\n test = df.sum(axis=0)\n tm.assert_numpy_array_equal(\n test.values, np.array([2, 150, "abcde"], dtype=object)\n )\n alt = df.T.sum(axis=1)\n tm.assert_series_equal(test, alt)\n\n def test_nunique(self):\n df = DataFrame({"A": [1, 1, 1], "B": [1, 2, 3], "C": [1, np.nan, 3]})\n tm.assert_series_equal(df.nunique(), Series({"A": 1, "B": 3, "C": 2}))\n tm.assert_series_equal(\n df.nunique(dropna=False), Series({"A": 1, "B": 3, "C": 3})\n )\n tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))\n tm.assert_series_equal(\n df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2})\n )\n\n @pytest.mark.parametrize("tz", [None, "UTC"])\n def test_mean_mixed_datetime_numeric(self, tz):\n # https://github.com/pandas-dev/pandas/issues/24752\n df = DataFrame({"A": [1, 1], "B": [Timestamp("2000", tz=tz)] * 2})\n result = df.mean()\n expected = Series([1.0, Timestamp("2000", tz=tz)], index=["A", "B"])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("tz", [None, "UTC"])\n def test_mean_includes_datetimes(self, tz):\n # https://github.com/pandas-dev/pandas/issues/24752\n # Behavior in 0.24.0rc1 was buggy.\n # As of 2.0 with numeric_only=None we do *not* drop datetime columns\n df = DataFrame({"A": [Timestamp("2000", tz=tz)] * 2})\n result = df.mean()\n\n expected = Series([Timestamp("2000", tz=tz)], index=["A"])\n tm.assert_series_equal(result, expected)\n\n def test_mean_mixed_string_decimal(self):\n # GH 11670\n # possible bug when calculating mean of DataFrame?\n\n d = [\n {"A": 2, "B": None, "C": Decimal("628.00")},\n {"A": 1, "B": None, "C": Decimal("383.00")},\n {"A": 3, "B": None, "C": Decimal("651.00")},\n {"A": 2, "B": None, "C": Decimal("575.00")},\n {"A": 4, "B": None, "C": Decimal("1114.00")},\n {"A": 1, "B": "TEST", "C": Decimal("241.00")},\n {"A": 2, "B": None, "C": Decimal("572.00")},\n {"A": 4, "B": None, "C": Decimal("609.00")},\n {"A": 3, "B": None, "C": Decimal("820.00")},\n {"A": 5, "B": None, "C": Decimal("1223.00")},\n ]\n\n df = DataFrame(d)\n\n with pytest.raises(\n TypeError, match="unsupported operand type|does not support|Cannot perform"\n ):\n df.mean()\n result = df[["A", "C"]].mean()\n expected = Series([2.7, 681.6], index=["A", "C"], dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_var_std(self, datetime_frame):\n result = datetime_frame.std(ddof=4)\n expected = datetime_frame.apply(lambda x: x.std(ddof=4))\n tm.assert_almost_equal(result, expected)\n\n result = datetime_frame.var(ddof=4)\n expected = datetime_frame.apply(lambda x: x.var(ddof=4))\n tm.assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.default_rng(2).random((1, 1000)), 1000, 0)\n result = nanops.nanvar(arr, axis=0)\n assert not (result < 0).any()\n\n with pd.option_context("use_bottleneck", False):\n result = nanops.nanvar(arr, axis=0)\n assert not (result < 0).any()\n\n @pytest.mark.parametrize("meth", ["sem", "var", "std"])\n def test_numeric_only_flag(self, meth):\n # GH 9201\n df1 = DataFrame(\n np.random.default_rng(2).standard_normal((5, 3)),\n columns=["foo", "bar", "baz"],\n )\n # Cast to object to avoid implicit cast when setting entry to "100" below\n df1 = df1.astype({"foo": object})\n # set one entry to a number in str format\n df1.loc[0, "foo"] = "100"\n\n df2 = DataFrame(\n np.random.default_rng(2).standard_normal((5, 3)),\n columns=["foo", "bar", "baz"],\n )\n # Cast to object to avoid implicit cast when setting entry to "a" below\n df2 = df2.astype({"foo": object})\n # set one entry to a non-number str\n df2.loc[0, "foo"] = "a"\n\n result = getattr(df1, meth)(axis=1, numeric_only=True)\n expected = getattr(df1[["bar", "baz"]], meth)(axis=1)\n tm.assert_series_equal(expected, result)\n\n result = getattr(df2, meth)(axis=1, numeric_only=True)\n expected = getattr(df2[["bar", "baz"]], meth)(axis=1)\n tm.assert_series_equal(expected, result)\n\n # df1 has all numbers, df2 has a letter inside\n msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"\n with pytest.raises(TypeError, match=msg):\n getattr(df1, meth)(axis=1, numeric_only=False)\n msg = "could not convert string to float: 'a'"\n with pytest.raises(TypeError, match=msg):\n getattr(df2, meth)(axis=1, numeric_only=False)\n\n def test_sem(self, datetime_frame):\n result = datetime_frame.sem(ddof=4)\n expected = datetime_frame.apply(lambda x: x.std(ddof=4) / np.sqrt(len(x)))\n tm.assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.default_rng(2).random((1, 1000)), 1000, 0)\n result = nanops.nansem(arr, axis=0)\n assert not (result < 0).any()\n\n with pd.option_context("use_bottleneck", False):\n result = nanops.nansem(arr, axis=0)\n assert not (result < 0).any()\n\n @pytest.mark.parametrize(\n "dropna, expected",\n [\n (\n True,\n {\n "A": [12],\n "B": [10.0],\n "C": [1.0],\n "D": ["a"],\n "E": Categorical(["a"], categories=["a"]),\n "F": DatetimeIndex(["2000-01-02"], dtype="M8[ns]"),\n "G": to_timedelta(["1 days"]),\n },\n ),\n (\n False,\n {\n "A": [12],\n "B": [10.0],\n "C": [np.nan],\n "D": Series([np.nan], dtype="str"),\n "E": Categorical([np.nan], categories=["a"]),\n "F": DatetimeIndex([pd.NaT], dtype="M8[ns]"),\n "G": to_timedelta([pd.NaT]),\n },\n ),\n (\n True,\n {\n "H": [8, 9, np.nan, np.nan],\n "I": [8, 9, np.nan, np.nan],\n "J": [1, np.nan, np.nan, np.nan],\n "K": Categorical(["a", np.nan, np.nan, np.nan], categories=["a"]),\n "L": DatetimeIndex(\n ["2000-01-02", "NaT", "NaT", "NaT"], dtype="M8[ns]"\n ),\n "M": to_timedelta(["1 days", "nan", "nan", "nan"]),\n "N": [0, 1, 2, 3],\n },\n ),\n (\n False,\n {\n "H": [8, 9, np.nan, np.nan],\n "I": [8, 9, np.nan, np.nan],\n "J": [1, np.nan, np.nan, np.nan],\n "K": Categorical([np.nan, "a", np.nan, np.nan], categories=["a"]),\n "L": DatetimeIndex(\n ["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"\n ),\n "M": to_timedelta(["nan", "1 days", "nan", "nan"]),\n "N": [0, 1, 2, 3],\n },\n ),\n ],\n )\n def test_mode_dropna(self, dropna, expected):\n df = DataFrame(\n {\n "A": [12, 12, 19, 11],\n "B": [10, 10, np.nan, 3],\n "C": [1, np.nan, np.nan, np.nan],\n "D": Series([np.nan, np.nan, "a", np.nan], dtype="str"),\n "E": Categorical([np.nan, np.nan, "a", np.nan]),\n "F": DatetimeIndex(["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"),\n "G": to_timedelta(["1 days", "nan", "nan", "nan"]),\n "H": [8, 8, 9, 9],\n "I": [9, 9, 8, 8],\n "J": [1, 1, np.nan, np.nan],\n "K": Categorical(["a", np.nan, "a", np.nan]),\n "L": DatetimeIndex(\n ["2000-01-02", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"\n ),\n "M": to_timedelta(["1 days", "nan", "1 days", "nan"]),\n "N": np.arange(4, dtype="int64"),\n }\n )\n\n result = df[sorted(expected.keys())].mode(dropna=dropna)\n expected = DataFrame(expected)\n tm.assert_frame_equal(result, expected)\n\n def test_mode_sort_with_na(self, using_infer_string):\n df = DataFrame({"A": [np.nan, np.nan, "a", "a"]})\n expected = DataFrame({"A": ["a", np.nan]})\n result = df.mode(dropna=False)\n tm.assert_frame_equal(result, expected)\n\n def test_mode_empty_df(self):\n df = DataFrame([], columns=["a", "b"])\n result = df.mode()\n expected = DataFrame([], columns=["a", "b"], index=Index([], dtype=np.int64))\n tm.assert_frame_equal(result, expected)\n\n def test_operators_timedelta64(self):\n df = DataFrame(\n {\n "A": date_range("2012-1-1", periods=3, freq="D"),\n "B": date_range("2012-1-2", periods=3, freq="D"),\n "C": Timestamp("20120101") - timedelta(minutes=5, seconds=5),\n }\n )\n\n diffs = DataFrame({"A": df["A"] - df["C"], "B": df["A"] - df["B"]})\n\n # min\n result = diffs.min()\n assert result.iloc[0] == diffs.loc[0, "A"]\n assert result.iloc[1] == diffs.loc[0, "B"]\n\n result = diffs.min(axis=1)\n assert (result == diffs.loc[0, "B"]).all()\n\n # max\n result = diffs.max()\n assert result.iloc[0] == diffs.loc[2, "A"]\n assert result.iloc[1] == diffs.loc[2, "B"]\n\n result = diffs.max(axis=1)\n assert (result == diffs["A"]).all()\n\n # abs\n result = diffs.abs()\n result2 = abs(diffs)\n expected = DataFrame({"A": df["A"] - df["C"], "B": df["B"] - df["A"]})\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # mixed frame\n mixed = diffs.copy()\n mixed["C"] = "foo"\n mixed["D"] = 1\n mixed["E"] = 1.0\n mixed["F"] = Timestamp("20130101")\n\n # results in an object array\n result = mixed.min()\n expected = Series(\n [\n pd.Timedelta(timedelta(seconds=5 * 60 + 5)),\n pd.Timedelta(timedelta(days=-1)),\n "foo",\n 1,\n 1.0,\n Timestamp("20130101"),\n ],\n index=mixed.columns,\n )\n tm.assert_series_equal(result, expected)\n\n # excludes non-numeric\n result = mixed.min(axis=1, numeric_only=True)\n expected = Series([1, 1, 1.0], index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n # works when only those columns are selected\n result = mixed[["A", "B"]].min(1)\n expected = Series([timedelta(days=-1)] * 3)\n tm.assert_series_equal(result, expected)\n\n result = mixed[["A", "B"]].min()\n expected = Series(\n [timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=["A", "B"]\n )\n tm.assert_series_equal(result, expected)\n\n # GH 3106\n df = DataFrame(\n {\n "time": date_range("20130102", periods=5),\n "time2": date_range("20130105", periods=5),\n }\n )\n df["off1"] = df["time2"] - df["time"]\n assert df["off1"].dtype == "timedelta64[ns]"\n\n df["off2"] = df["time"] - df["time2"]\n df._consolidate_inplace()\n assert df["off1"].dtype == "timedelta64[ns]"\n assert df["off2"].dtype == "timedelta64[ns]"\n\n def test_std_timedelta64_skipna_false(self):\n # GH#37392\n tdi = pd.timedelta_range("1 Day", periods=10)\n df = DataFrame({"A": tdi, "B": tdi}, copy=True)\n df.iloc[-2, -1] = pd.NaT\n\n result = df.std(skipna=False)\n expected = Series(\n [df["A"].std(), pd.NaT], index=["A", "B"], dtype="timedelta64[ns]"\n )\n tm.assert_series_equal(result, expected)\n\n result = df.std(axis=1, skipna=False)\n expected = Series([pd.Timedelta(0)] * 8 + [pd.NaT, pd.Timedelta(0)])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "values", [["2022-01-01", "2022-01-02", pd.NaT, "2022-01-03"], 4 * [pd.NaT]]\n )\n def test_std_datetime64_with_nat(\n self, values, skipna, using_array_manager, request, unit\n ):\n # GH#51335\n if using_array_manager and (\n not skipna or all(value is pd.NaT for value in values)\n ):\n mark = pytest.mark.xfail(\n reason="GH#51446: Incorrect type inference on NaT in reduction result"\n )\n request.applymarker(mark)\n dti = to_datetime(values).as_unit(unit)\n df = DataFrame({"a": dti})\n result = df.std(skipna=skipna)\n if not skipna or all(value is pd.NaT for value in values):\n expected = Series({"a": pd.NaT}, dtype=f"timedelta64[{unit}]")\n else:\n # 86400000000000ns == 1 day\n expected = Series({"a": 86400000000000}, dtype=f"timedelta64[{unit}]")\n tm.assert_series_equal(result, expected)\n\n def test_sum_corner(self):\n empty_frame = DataFrame()\n\n axis0 = empty_frame.sum(0)\n axis1 = empty_frame.sum(1)\n assert isinstance(axis0, Series)\n assert isinstance(axis1, Series)\n assert len(axis0) == 0\n assert len(axis1) == 0\n\n @pytest.mark.parametrize(\n "index",\n [\n RangeIndex(0),\n DatetimeIndex([]),\n Index([], dtype=np.int64),\n Index([], dtype=np.float64),\n DatetimeIndex([], freq="ME"),\n PeriodIndex([], freq="D"),\n ],\n )\n def test_axis_1_empty(self, all_reductions, index):\n df = DataFrame(columns=["a"], index=index)\n result = getattr(df, all_reductions)(axis=1)\n if all_reductions in ("any", "all"):\n expected_dtype = "bool"\n elif all_reductions == "count":\n expected_dtype = "int64"\n else:\n expected_dtype = "object"\n expected = Series([], index=index, dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)])\n @pytest.mark.parametrize("numeric_only", [None, True, False])\n def test_sum_prod_nanops(self, method, unit, numeric_only):\n idx = ["a", "b", "c"]\n df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]})\n # The default\n result = getattr(df, method)(numeric_only=numeric_only)\n expected = Series([unit, unit, unit], index=idx, dtype="float64")\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = getattr(df, method)(numeric_only=numeric_only, min_count=1)\n expected = Series([unit, unit, np.nan], index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = getattr(df, method)(numeric_only=numeric_only, min_count=0)\n expected = Series([unit, unit, unit], index=idx, dtype="float64")\n tm.assert_series_equal(result, expected)\n\n result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1)\n expected = Series([unit, np.nan, np.nan], index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count > 1\n df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})\n result = getattr(df, method)(numeric_only=numeric_only, min_count=5)\n expected = Series(result, index=["A", "B"])\n tm.assert_series_equal(result, expected)\n\n result = getattr(df, method)(numeric_only=numeric_only, min_count=6)\n expected = Series(result, index=["A", "B"])\n tm.assert_series_equal(result, expected)\n\n def test_sum_nanops_timedelta(self):\n # prod isn't defined on timedeltas\n idx = ["a", "b", "c"]\n df = DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]})\n\n df2 = df.apply(to_timedelta)\n\n # 0 by default\n result = df2.sum()\n expected = Series([0, 0, 0], dtype="m8[ns]", index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df2.sum(min_count=0)\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df2.sum(min_count=1)\n expected = Series([0, 0, np.nan], dtype="m8[ns]", index=idx)\n tm.assert_series_equal(result, expected)\n\n def test_sum_nanops_min_count(self):\n # https://github.com/pandas-dev/pandas/issues/39738\n df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})\n result = df.sum(min_count=10)\n expected = Series([np.nan, np.nan], index=["x", "y"])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("float_type", ["float16", "float32", "float64"])\n @pytest.mark.parametrize(\n "kwargs, expected_result",\n [\n ({"axis": 1, "min_count": 2}, [3.2, 5.3, np.nan]),\n ({"axis": 1, "min_count": 3}, [np.nan, np.nan, np.nan]),\n ({"axis": 1, "skipna": False}, [3.2, 5.3, np.nan]),\n ],\n )\n def test_sum_nanops_dtype_min_count(self, float_type, kwargs, expected_result):\n # GH#46947\n df = DataFrame({"a": [1.0, 2.3, 4.4], "b": [2.2, 3, np.nan]}, dtype=float_type)\n result = df.sum(**kwargs)\n expected = Series(expected_result).astype(float_type)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("float_type", ["float16", "float32", "float64"])\n @pytest.mark.parametrize(\n "kwargs, expected_result",\n [\n ({"axis": 1, "min_count": 2}, [2.0, 4.0, np.nan]),\n ({"axis": 1, "min_count": 3}, [np.nan, np.nan, np.nan]),\n ({"axis": 1, "skipna": False}, [2.0, 4.0, np.nan]),\n ],\n )\n def test_prod_nanops_dtype_min_count(self, float_type, kwargs, expected_result):\n # GH#46947\n df = DataFrame(\n {"a": [1.0, 2.0, 4.4], "b": [2.0, 2.0, np.nan]}, dtype=float_type\n )\n result = df.prod(**kwargs)\n expected = Series(expected_result).astype(float_type)\n tm.assert_series_equal(result, expected)\n\n def test_sum_object(self, float_frame):\n values = float_frame.values.astype(int)\n frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns)\n deltas = frame * timedelta(1)\n deltas.sum()\n\n def test_sum_bool(self, float_frame):\n # ensure this works, bug report\n bools = np.isnan(float_frame)\n bools.sum(1)\n bools.sum(0)\n\n def test_sum_mixed_datetime(self):\n # GH#30886\n df = DataFrame({"A": date_range("2000", periods=4), "B": [1, 2, 3, 4]}).reindex(\n [2, 3, 4]\n )\n with pytest.raises(TypeError, match="does not support reduction 'sum'"):\n df.sum()\n\n def test_mean_corner(self, float_frame, float_string_frame):\n # unit test when have object data\n msg = "Could not convert|does not support|Cannot perform"\n with pytest.raises(TypeError, match=msg):\n float_string_frame.mean(axis=0)\n\n # xs sum mixed type, just want to know it works...\n with pytest.raises(TypeError, match="unsupported operand type"):\n float_string_frame.mean(axis=1)\n\n # take mean of boolean column\n float_frame["bool"] = float_frame["A"] > 0\n means = float_frame.mean(0)\n assert means["bool"] == float_frame["bool"].values.mean()\n\n def test_mean_datetimelike(self):\n # GH#24757 check that datetimelike are excluded by default, handled\n # correctly with numeric_only=True\n # As of 2.0, datetimelike are *not* excluded with numeric_only=None\n\n df = DataFrame(\n {\n "A": np.arange(3),\n "B": date_range("2016-01-01", periods=3),\n "C": pd.timedelta_range("1D", periods=3),\n "D": pd.period_range("2016", periods=3, freq="Y"),\n }\n )\n result = df.mean(numeric_only=True)\n expected = Series({"A": 1.0})\n tm.assert_series_equal(result, expected)\n\n with pytest.raises(TypeError, match="mean is not implemented for PeriodArray"):\n df.mean()\n\n def test_mean_datetimelike_numeric_only_false(self):\n df = DataFrame(\n {\n "A": np.arange(3),\n "B": date_range("2016-01-01", periods=3),\n "C": pd.timedelta_range("1D", periods=3),\n }\n )\n\n # datetime(tz) and timedelta work\n result = df.mean(numeric_only=False)\n expected = Series({"A": 1, "B": df.loc[1, "B"], "C": df.loc[1, "C"]})\n tm.assert_series_equal(result, expected)\n\n # mean of period is not allowed\n df["D"] = pd.period_range("2016", periods=3, freq="Y")\n\n with pytest.raises(TypeError, match="mean is not implemented for Period"):\n df.mean(numeric_only=False)\n\n def test_mean_extensionarray_numeric_only_true(self):\n # https://github.com/pandas-dev/pandas/issues/33256\n arr = np.random.default_rng(2).integers(1000, size=(10, 5))\n df = DataFrame(arr, dtype="Int64")\n result = df.mean(numeric_only=True)\n expected = DataFrame(arr).mean().astype("Float64")\n tm.assert_series_equal(result, expected)\n\n def test_stats_mixed_type(self, float_string_frame):\n with pytest.raises(TypeError, match="could not convert"):\n float_string_frame.std(1)\n with pytest.raises(TypeError, match="could not convert"):\n float_string_frame.var(1)\n with pytest.raises(TypeError, match="unsupported operand type"):\n float_string_frame.mean(1)\n with pytest.raises(TypeError, match="could not convert"):\n float_string_frame.skew(1)\n\n def test_sum_bools(self):\n df = DataFrame(index=range(1), columns=range(10))\n bools = isna(df)\n assert bools.sum(axis=1)[0] == 10\n\n # ----------------------------------------------------------------------\n # Index of max / min\n\n @pytest.mark.parametrize("skipna", [True, False])\n @pytest.mark.parametrize("axis", [0, 1])\n def test_idxmin(self, float_frame, int_frame, skipna, axis):\n frame = float_frame\n frame.iloc[5:10] = np.nan\n frame.iloc[15:20, -2:] = np.nan\n for df in [frame, int_frame]:\n warn = None\n if skipna is False or axis == 1:\n warn = None if df is int_frame else FutureWarning\n msg = "The behavior of DataFrame.idxmin with all-NA values"\n with tm.assert_produces_warning(warn, match=msg):\n result = df.idxmin(axis=axis, skipna=skipna)\n\n msg2 = "The behavior of Series.idxmin"\n with tm.assert_produces_warning(warn, match=msg2):\n expected = df.apply(Series.idxmin, axis=axis, skipna=skipna)\n expected = expected.astype(df.index.dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("axis", [0, 1])\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_idxmin_empty(self, index, skipna, axis):\n # GH53265\n if axis == 0:\n frame = DataFrame(index=index)\n else:\n frame = DataFrame(columns=index)\n\n result = frame.idxmin(axis=axis, skipna=skipna)\n expected = Series(dtype=index.dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("numeric_only", [True, False])\n def test_idxmin_numeric_only(self, numeric_only):\n df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1], "c": list("xyx")})\n result = df.idxmin(numeric_only=numeric_only)\n if numeric_only:\n expected = Series([2, 1], index=["a", "b"])\n else:\n expected = Series([2, 1, 0], index=["a", "b", "c"])\n tm.assert_series_equal(result, expected)\n\n def test_idxmin_axis_2(self, float_frame):\n frame = float_frame\n msg = "No axis named 2 for object type DataFrame"\n with pytest.raises(ValueError, match=msg):\n frame.idxmin(axis=2)\n\n @pytest.mark.parametrize("axis", [0, 1])\n def test_idxmax(self, float_frame, int_frame, skipna, axis):\n frame = float_frame\n frame.iloc[5:10] = np.nan\n frame.iloc[15:20, -2:] = np.nan\n for df in [frame, int_frame]:\n warn = None\n if skipna is False or axis == 1:\n warn = None if df is int_frame else FutureWarning\n msg = "The behavior of DataFrame.idxmax with all-NA values"\n with tm.assert_produces_warning(warn, match=msg):\n result = df.idxmax(axis=axis, skipna=skipna)\n\n msg2 = "The behavior of Series.idxmax"\n with tm.assert_produces_warning(warn, match=msg2):\n expected = df.apply(Series.idxmax, axis=axis, skipna=skipna)\n expected = expected.astype(df.index.dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("axis", [0, 1])\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_idxmax_empty(self, index, skipna, axis):\n # GH53265\n if axis == 0:\n frame = DataFrame(index=index)\n else:\n frame = DataFrame(columns=index)\n\n result = frame.idxmax(axis=axis, skipna=skipna)\n expected = Series(dtype=index.dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("numeric_only", [True, False])\n def test_idxmax_numeric_only(self, numeric_only):\n df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1], "c": list("xyx")})\n result = df.idxmax(numeric_only=numeric_only)\n if numeric_only:\n expected = Series([1, 0], index=["a", "b"])\n else:\n expected = Series([1, 0, 1], index=["a", "b", "c"])\n tm.assert_series_equal(result, expected)\n\n def test_idxmax_arrow_types(self):\n # GH#55368\n pytest.importorskip("pyarrow")\n\n df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1]}, dtype="int64[pyarrow]")\n result = df.idxmax()\n expected = Series([1, 0], index=["a", "b"])\n tm.assert_series_equal(result, expected)\n\n result = df.idxmin()\n expected = Series([2, 1], index=["a", "b"])\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"a": ["b", "c", "a"]}, dtype="string[pyarrow]")\n result = df.idxmax(numeric_only=False)\n expected = Series([1], index=["a"])\n tm.assert_series_equal(result, expected)\n\n result = df.idxmin(numeric_only=False)\n expected = Series([2], index=["a"])\n tm.assert_series_equal(result, expected)\n\n def test_idxmax_axis_2(self, float_frame):\n frame = float_frame\n msg = "No axis named 2 for object type DataFrame"\n with pytest.raises(ValueError, match=msg):\n frame.idxmax(axis=2)\n\n def test_idxmax_mixed_dtype(self):\n # don't cast to object, which would raise in nanops\n dti = date_range("2016-01-01", periods=3)\n\n # Copying dti is needed for ArrayManager otherwise when we set\n # df.loc[0, 3] = pd.NaT below it edits dti\n df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti.copy(deep=True)})\n\n result = df.idxmax()\n expected = Series([1, 0, 2], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n result = df.idxmin()\n expected = Series([0, 2, 0], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n # with NaTs\n df.loc[0, 3] = pd.NaT\n result = df.idxmax()\n expected = Series([1, 0, 2], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n result = df.idxmin()\n expected = Series([0, 2, 1], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n # with multi-column dt64 block\n df[4] = dti[::-1]\n df._consolidate_inplace()\n\n result = df.idxmax()\n expected = Series([1, 0, 2, 0], index=[1, 2, 3, 4])\n tm.assert_series_equal(result, expected)\n\n result = df.idxmin()\n expected = Series([0, 2, 1, 2], index=[1, 2, 3, 4])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "op, expected_value",\n [("idxmax", [0, 4]), ("idxmin", [0, 5])],\n )\n def test_idxmax_idxmin_convert_dtypes(self, op, expected_value):\n # GH 40346\n df = DataFrame(\n {\n "ID": [100, 100, 100, 200, 200, 200],\n "value": [0, 0, 0, 1, 2, 0],\n },\n dtype="Int64",\n )\n df = df.groupby("ID")\n\n result = getattr(df, op)()\n expected = DataFrame(\n {"value": expected_value},\n index=Index([100, 200], name="ID", dtype="Int64"),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_idxmax_dt64_multicolumn_axis1(self):\n dti = date_range("2016-01-01", periods=3)\n df = DataFrame({3: dti, 4: dti[::-1]}, copy=True)\n df.iloc[0, 0] = pd.NaT\n\n df._consolidate_inplace()\n\n result = df.idxmax(axis=1)\n expected = Series([4, 3, 3])\n tm.assert_series_equal(result, expected)\n\n result = df.idxmin(axis=1)\n expected = Series([4, 3, 4])\n tm.assert_series_equal(result, expected)\n\n # ----------------------------------------------------------------------\n # Logical reductions\n\n @pytest.mark.parametrize("opname", ["any", "all"])\n @pytest.mark.parametrize("axis", [0, 1])\n @pytest.mark.parametrize("bool_only", [False, True])\n def test_any_all_mixed_float(self, opname, axis, bool_only, float_string_frame):\n # make sure op works on mixed-type frame\n mixed = float_string_frame\n mixed["_bool_"] = np.random.default_rng(2).standard_normal(len(mixed)) > 0.5\n\n getattr(mixed, opname)(axis=axis, bool_only=bool_only)\n\n @pytest.mark.parametrize("opname", ["any", "all"])\n @pytest.mark.parametrize("axis", [0, 1])\n def test_any_all_bool_with_na(self, opname, axis, bool_frame_with_na):\n getattr(bool_frame_with_na, opname)(axis=axis, bool_only=False)\n\n @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")\n @pytest.mark.parametrize("opname", ["any", "all"])\n def test_any_all_bool_frame(self, opname, bool_frame_with_na):\n # GH#12863: numpy gives back non-boolean data for object type\n # so fill NaNs to compare with pandas behavior\n frame = bool_frame_with_na.fillna(True)\n alternative = getattr(np, opname)\n f = getattr(frame, opname)\n\n def skipna_wrapper(x):\n nona = x.dropna().values\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n\n tm.assert_series_equal(result0, frame.apply(wrapper))\n tm.assert_series_equal(result1, frame.apply(wrapper, axis=1))\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n\n tm.assert_series_equal(result0, frame.apply(skipna_wrapper))\n tm.assert_series_equal(\n result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False\n )\n\n # bad axis\n with pytest.raises(ValueError, match="No axis named 2"):\n f(axis=2)\n\n # all NA case\n all_na = frame * np.nan\n r0 = getattr(all_na, opname)(axis=0)\n r1 = getattr(all_na, opname)(axis=1)\n if opname == "any":\n assert not r0.any()\n assert not r1.any()\n else:\n assert r0.all()\n assert r1.all()\n\n def test_any_all_extra(self):\n df = DataFrame(\n {\n "A": [True, False, False],\n "B": [True, True, False],\n "C": [True, True, True],\n },\n index=["a", "b", "c"],\n )\n result = df[["A", "B"]].any(axis=1)\n expected = Series([True, True, False], index=["a", "b", "c"])\n tm.assert_series_equal(result, expected)\n\n result = df[["A", "B"]].any(axis=1, bool_only=True)\n tm.assert_series_equal(result, expected)\n\n result = df.all(1)\n expected = Series([True, False, False], index=["a", "b", "c"])\n tm.assert_series_equal(result, expected)\n\n result = df.all(1, bool_only=True)\n tm.assert_series_equal(result, expected)\n\n # Axis is None\n result = df.all(axis=None).item()\n assert result is False\n\n result = df.any(axis=None).item()\n assert result is True\n\n result = df[["C"]].all(axis=None).item()\n assert result is True\n\n @pytest.mark.parametrize("axis", [0, 1])\n @pytest.mark.parametrize("bool_agg_func", ["any", "all"])\n @pytest.mark.parametrize("skipna", [True, False])\n def test_any_all_object_dtype(self, axis, bool_agg_func, skipna):\n # GH#35450\n df = DataFrame(\n data=[\n [1, np.nan, np.nan, True],\n [np.nan, 2, np.nan, True],\n [np.nan, np.nan, np.nan, True],\n [np.nan, np.nan, "5", np.nan],\n ]\n )\n result = getattr(df, bool_agg_func)(axis=axis, skipna=skipna)\n expected = Series([True, True, True, True])\n tm.assert_series_equal(result, expected)\n\n # GH#50947 deprecates this but it is not emitting a warning in some builds.\n @pytest.mark.filterwarnings(\n "ignore:'any' with datetime64 dtypes is deprecated.*:FutureWarning"\n )\n def test_any_datetime(self):\n # GH 23070\n float_data = [1, np.nan, 3, np.nan]\n datetime_data = [\n Timestamp("1960-02-15"),\n Timestamp("1960-02-16"),\n pd.NaT,\n pd.NaT,\n ]\n df = DataFrame({"A": float_data, "B": datetime_data})\n\n result = df.any(axis=1)\n\n expected = Series([True, True, True, False])\n tm.assert_series_equal(result, expected)\n\n def test_any_all_bool_only(self):\n # GH 25101\n df = DataFrame(\n {"col1": [1, 2, 3], "col2": [4, 5, 6], "col3": [None, None, None]},\n columns=Index(["col1", "col2", "col3"], dtype=object),\n )\n\n result = df.all(bool_only=True)\n expected = Series(dtype=np.bool_, index=[])\n tm.assert_series_equal(result, expected)\n\n df = DataFrame(\n {\n "col1": [1, 2, 3],\n "col2": [4, 5, 6],\n "col3": [None, None, None],\n "col4": [False, False, True],\n }\n )\n\n result = df.all(bool_only=True)\n expected = Series({"col4": False})\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "func, data, expected",\n [\n (np.any, {}, False),\n (np.all, {}, True),\n (np.any, {"A": []}, False),\n (np.all, {"A": []}, True),\n (np.any, {"A": [False, False]}, False),\n (np.all, {"A": [False, False]}, False),\n (np.any, {"A": [True, False]}, True),\n (np.all, {"A": [True, False]}, False),\n (np.any, {"A": [True, True]}, True),\n (np.all, {"A": [True, True]}, True),\n (np.any, {"A": [False], "B": [False]}, False),\n (np.all, {"A": [False], "B": [False]}, False),\n (np.any, {"A": [False, False], "B": [False, True]}, True),\n (np.all, {"A": [False, False], "B": [False, True]}, False),\n # other types\n (np.all, {"A": Series([0.0, 1.0], dtype="float")}, False),\n (np.any, {"A": Series([0.0, 1.0], dtype="float")}, True),\n (np.all, {"A": Series([0, 1], dtype=int)}, False),\n (np.any, {"A": Series([0, 1], dtype=int)}, True),\n pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns]")}, False),\n pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, False),\n pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns]")}, True),\n pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, True),\n pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns]")}, True),\n pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True),\n pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns]")}, True),\n pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True),\n pytest.param(np.all, {"A": Series([0, 1], dtype="m8[ns]")}, False),\n pytest.param(np.any, {"A": Series([0, 1], dtype="m8[ns]")}, True),\n pytest.param(np.all, {"A": Series([1, 2], dtype="m8[ns]")}, True),\n pytest.param(np.any, {"A": Series([1, 2], dtype="m8[ns]")}, True),\n # np.all on Categorical raises, so the reduction drops the\n # column, so all is being done on an empty Series, so is True\n (np.all, {"A": Series([0, 1], dtype="category")}, True),\n (np.any, {"A": Series([0, 1], dtype="category")}, False),\n (np.all, {"A": Series([1, 2], dtype="category")}, True),\n (np.any, {"A": Series([1, 2], dtype="category")}, False),\n # Mix GH#21484\n pytest.param(\n np.all,\n {\n "A": Series([10, 20], dtype="M8[ns]"),\n "B": Series([10, 20], dtype="m8[ns]"),\n },\n True,\n ),\n ],\n )\n def test_any_all_np_func(self, func, data, expected):\n # GH 19976\n data = DataFrame(data)\n\n if any(isinstance(x, CategoricalDtype) for x in data.dtypes):\n with pytest.raises(\n TypeError, match="dtype category does not support reduction"\n ):\n func(data)\n\n # method version\n with pytest.raises(\n TypeError, match="dtype category does not support reduction"\n ):\n getattr(DataFrame(data), func.__name__)(axis=None)\n else:\n msg = "'(any|all)' with datetime64 dtypes is deprecated"\n if data.dtypes.apply(lambda x: x.kind == "M").any():\n warn = FutureWarning\n else:\n warn = None\n\n with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):\n # GH#34479\n result = func(data)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n\n # method version\n with tm.assert_produces_warning(warn, match=msg):\n # GH#34479\n result = getattr(DataFrame(data), func.__name__)(axis=None)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n\n def test_any_all_object(self):\n # GH 19976\n result = np.all(DataFrame(columns=["a", "b"])).item()\n assert result is True\n\n result = np.any(DataFrame(columns=["a", "b"])).item()\n assert result is False\n\n def test_any_all_object_bool_only(self):\n df = DataFrame({"A": ["foo", 2], "B": [True, False]}).astype(object)\n df._consolidate_inplace()\n df["C"] = Series([True, True])\n\n # Categorical of bools is _not_ considered booly\n df["D"] = df["C"].astype("category")\n\n # The underlying bug is in DataFrame._get_bool_data, so we check\n # that while we're here\n res = df._get_bool_data()\n expected = df[["C"]]\n tm.assert_frame_equal(res, expected)\n\n res = df.all(bool_only=True, axis=0)\n expected = Series([True], index=["C"])\n tm.assert_series_equal(res, expected)\n\n # operating on a subset of columns should not produce a _larger_ Series\n res = df[["B", "C"]].all(bool_only=True, axis=0)\n tm.assert_series_equal(res, expected)\n\n assert df.all(bool_only=True, axis=None)\n\n res = df.any(bool_only=True, axis=0)\n expected = Series([True], index=["C"])\n tm.assert_series_equal(res, expected)\n\n # operating on a subset of columns should not produce a _larger_ Series\n res = df[["C"]].any(bool_only=True, axis=0)\n tm.assert_series_equal(res, expected)\n\n assert df.any(bool_only=True, axis=None)\n\n # ---------------------------------------------------------------------\n # Unsorted\n\n def test_series_broadcasting(self):\n # smoke test for numpy warnings\n # GH 16378, GH 16306\n df = DataFrame([1.0, 1.0, 1.0])\n df_nan = DataFrame({"A": [np.nan, 2.0, np.nan]})\n s = Series([1, 1, 1])\n s_nan = Series([np.nan, np.nan, 1])\n\n with tm.assert_produces_warning(None):\n df_nan.clip(lower=s, axis=0)\n for op in ["lt", "le", "gt", "ge", "eq", "ne"]:\n getattr(df, op)(s_nan, axis=0)\n\n\nclass TestDataFrameReductions:\n def test_min_max_dt64_with_NaT(self):\n # Both NaT and Timestamp are in DataFrame.\n df = DataFrame({"foo": [pd.NaT, pd.NaT, Timestamp("2012-05-01")]})\n\n res = df.min()\n exp = Series([Timestamp("2012-05-01")], index=["foo"])\n tm.assert_series_equal(res, exp)\n\n res = df.max()\n exp = Series([Timestamp("2012-05-01")], index=["foo"])\n tm.assert_series_equal(res, exp)\n\n # GH12941, only NaTs are in DataFrame.\n df = DataFrame({"foo": [pd.NaT, pd.NaT]})\n\n res = df.min()\n exp = Series([pd.NaT], index=["foo"])\n tm.assert_series_equal(res, exp)\n\n res = df.max()\n exp = Series([pd.NaT], index=["foo"])\n tm.assert_series_equal(res, exp)\n\n def test_min_max_dt64_with_NaT_skipna_false(self, request, tz_naive_fixture):\n # GH#36907\n tz = tz_naive_fixture\n if isinstance(tz, tzlocal) and is_platform_windows():\n pytest.skip(\n "GH#37659 OSError raised within tzlocal bc Windows "\n "chokes in times before 1970-01-01"\n )\n\n df = DataFrame(\n {\n "a": [\n Timestamp("2020-01-01 08:00:00", tz=tz),\n Timestamp("1920-02-01 09:00:00", tz=tz),\n ],\n "b": [Timestamp("2020-02-01 08:00:00", tz=tz), pd.NaT],\n }\n )\n res = df.min(axis=1, skipna=False)\n expected = Series([df.loc[0, "a"], pd.NaT])\n assert expected.dtype == df["a"].dtype\n\n tm.assert_series_equal(res, expected)\n\n res = df.max(axis=1, skipna=False)\n expected = Series([df.loc[0, "b"], pd.NaT])\n assert expected.dtype == df["a"].dtype\n\n tm.assert_series_equal(res, expected)\n\n def test_min_max_dt64_api_consistency_with_NaT(self):\n # Calling the following sum functions returned an error for dataframes but\n # returned NaT for series. These tests check that the API is consistent in\n # min/max calls on empty Series/DataFrames. See GH:33704 for more\n # information\n df = DataFrame({"x": to_datetime([])})\n expected_dt_series = Series(to_datetime([]))\n # check axis 0\n assert (df.min(axis=0).x is pd.NaT) == (expected_dt_series.min() is pd.NaT)\n assert (df.max(axis=0).x is pd.NaT) == (expected_dt_series.max() is pd.NaT)\n\n # check axis 1\n tm.assert_series_equal(df.min(axis=1), expected_dt_series)\n tm.assert_series_equal(df.max(axis=1), expected_dt_series)\n\n def test_min_max_dt64_api_consistency_empty_df(self):\n # check DataFrame/Series api consistency when calling min/max on an empty\n # DataFrame/Series.\n df = DataFrame({"x": []})\n expected_float_series = Series([], dtype=float)\n # check axis 0\n assert np.isnan(df.min(axis=0).x) == np.isnan(expected_float_series.min())\n assert np.isnan(df.max(axis=0).x) == np.isnan(expected_float_series.max())\n # check axis 1\n tm.assert_series_equal(df.min(axis=1), expected_float_series)\n tm.assert_series_equal(df.min(axis=1), expected_float_series)\n\n @pytest.mark.parametrize(\n "initial",\n ["2018-10-08 13:36:45+00:00", "2018-10-08 13:36:45+03:00"], # Non-UTC timezone\n )\n @pytest.mark.parametrize("method", ["min", "max"])\n def test_preserve_timezone(self, initial: str, method):\n # GH 28552\n initial_dt = to_datetime(initial)\n expected = Series([initial_dt])\n df = DataFrame([expected])\n result = getattr(df, method)(axis=1)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("method", ["min", "max"])\n def test_minmax_tzaware_skipna_axis_1(self, method, skipna):\n # GH#51242\n val = to_datetime("1900-01-01", utc=True)\n df = DataFrame(\n {"a": Series([pd.NaT, pd.NaT, val]), "b": Series([pd.NaT, val, val])}\n )\n op = getattr(df, method)\n result = op(axis=1, skipna=skipna)\n if skipna:\n expected = Series([pd.NaT, val, val])\n else:\n expected = Series([pd.NaT, pd.NaT, val])\n tm.assert_series_equal(result, expected)\n\n def test_frame_any_with_timedelta(self):\n # GH#17667\n df = DataFrame(\n {\n "a": Series([0, 0]),\n "t": Series([to_timedelta(0, "s"), to_timedelta(1, "ms")]),\n }\n )\n\n result = df.any(axis=0)\n expected = Series(data=[False, True], index=["a", "t"])\n tm.assert_series_equal(result, expected)\n\n result = df.any(axis=1)\n expected = Series(data=[False, True])\n tm.assert_series_equal(result, expected)\n\n def test_reductions_skipna_none_raises(\n self, request, frame_or_series, all_reductions\n ):\n if all_reductions == "count":\n request.applymarker(\n pytest.mark.xfail(reason="Count does not accept skipna")\n )\n obj = frame_or_series([1, 2, 3])\n msg = 'For argument "skipna" expected type bool, received type NoneType.'\n with pytest.raises(ValueError, match=msg):\n getattr(obj, all_reductions)(skipna=None)\n\n @td.skip_array_manager_invalid_test\n def test_reduction_timestamp_smallest_unit(self):\n # GH#52524\n df = DataFrame(\n {\n "a": Series([Timestamp("2019-12-31")], dtype="datetime64[s]"),\n "b": Series(\n [Timestamp("2019-12-31 00:00:00.123")], dtype="datetime64[ms]"\n ),\n }\n )\n result = df.max()\n expected = Series(\n [Timestamp("2019-12-31"), Timestamp("2019-12-31 00:00:00.123")],\n dtype="datetime64[ms]",\n index=["a", "b"],\n )\n tm.assert_series_equal(result, expected)\n\n @td.skip_array_manager_not_yet_implemented\n def test_reduction_timedelta_smallest_unit(self):\n # GH#52524\n df = DataFrame(\n {\n "a": Series([pd.Timedelta("1 days")], dtype="timedelta64[s]"),\n "b": Series([pd.Timedelta("1 days")], dtype="timedelta64[ms]"),\n }\n )\n result = df.max()\n expected = Series(\n [pd.Timedelta("1 days"), pd.Timedelta("1 days")],\n dtype="timedelta64[ms]",\n index=["a", "b"],\n )\n tm.assert_series_equal(result, expected)\n\n\nclass TestNuisanceColumns:\n @pytest.mark.parametrize("method", ["any", "all"])\n def test_any_all_categorical_dtype_nuisance_column(self, method):\n # GH#36076 DataFrame should match Series behavior\n ser = Series([0, 1], dtype="category", name="A")\n df = ser.to_frame()\n\n # Double-check the Series behavior is to raise\n with pytest.raises(TypeError, match="does not support reduction"):\n getattr(ser, method)()\n\n with pytest.raises(TypeError, match="does not support reduction"):\n getattr(np, method)(ser)\n\n with pytest.raises(TypeError, match="does not support reduction"):\n getattr(df, method)(bool_only=False)\n\n with pytest.raises(TypeError, match="does not support reduction"):\n getattr(df, method)(bool_only=None)\n\n with pytest.raises(TypeError, match="does not support reduction"):\n getattr(np, method)(df, axis=0)\n\n def test_median_categorical_dtype_nuisance_column(self):\n # GH#21020 DataFrame.median should match Series.median\n df = DataFrame({"A": Categorical([1, 2, 2, 2, 3])})\n ser = df["A"]\n\n # Double-check the Series behavior is to raise\n with pytest.raises(TypeError, match="does not support reduction"):\n ser.median()\n\n with pytest.raises(TypeError, match="does not support reduction"):\n df.median(numeric_only=False)\n\n with pytest.raises(TypeError, match="does not support reduction"):\n df.median()\n\n # same thing, but with an additional non-categorical column\n df["B"] = df["A"].astype(int)\n\n with pytest.raises(TypeError, match="does not support reduction"):\n df.median(numeric_only=False)\n\n with pytest.raises(TypeError, match="does not support reduction"):\n df.median()\n\n # TODO: np.median(df, axis=0) gives np.array([2.0, 2.0]) instead\n # of expected.values\n\n @pytest.mark.parametrize("method", ["min", "max"])\n def test_min_max_categorical_dtype_non_ordered_nuisance_column(self, method):\n # GH#28949 DataFrame.min should behave like Series.min\n cat = Categorical(["a", "b", "c", "b"], ordered=False)\n ser = Series(cat)\n df = ser.to_frame("A")\n\n # Double-check the Series behavior\n with pytest.raises(TypeError, match="is not ordered for operation"):\n getattr(ser, method)()\n\n with pytest.raises(TypeError, match="is not ordered for operation"):\n getattr(np, method)(ser)\n\n with pytest.raises(TypeError, match="is not ordered for operation"):\n getattr(df, method)(numeric_only=False)\n\n with pytest.raises(TypeError, match="is not ordered for operation"):\n getattr(df, method)()\n\n with pytest.raises(TypeError, match="is not ordered for operation"):\n getattr(np, method)(df, axis=0)\n\n # same thing, but with an additional non-categorical column\n df["B"] = df["A"].astype(object)\n with pytest.raises(TypeError, match="is not ordered for operation"):\n getattr(df, method)()\n\n with pytest.raises(TypeError, match="is not ordered for operation"):\n getattr(np, method)(df, axis=0)\n\n\nclass TestEmptyDataFrameReductions:\n @pytest.mark.parametrize(\n "opname, dtype, exp_value, exp_dtype",\n [\n ("sum", np.int8, 0, np.int64),\n ("prod", np.int8, 1, np.int_),\n ("sum", np.int64, 0, np.int64),\n ("prod", np.int64, 1, np.int64),\n ("sum", np.uint8, 0, np.uint64),\n ("prod", np.uint8, 1, np.uint),\n ("sum", np.uint64, 0, np.uint64),\n ("prod", np.uint64, 1, np.uint64),\n ("sum", np.float32, 0, np.float32),\n ("prod", np.float32, 1, np.float32),\n ("sum", np.float64, 0, np.float64),\n ],\n )\n def test_df_empty_min_count_0(self, opname, dtype, exp_value, exp_dtype):\n df = DataFrame({0: [], 1: []}, dtype=dtype)\n result = getattr(df, opname)(min_count=0)\n\n expected = Series([exp_value, exp_value], dtype=exp_dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "opname, dtype, exp_dtype",\n [\n ("sum", np.int8, np.float64),\n ("prod", np.int8, np.float64),\n ("sum", np.int64, np.float64),\n ("prod", np.int64, np.float64),\n ("sum", np.uint8, np.float64),\n ("prod", np.uint8, np.float64),\n ("sum", np.uint64, np.float64),\n ("prod", np.uint64, np.float64),\n ("sum", np.float32, np.float32),\n ("prod", np.float32, np.float32),\n ("sum", np.float64, np.float64),\n ],\n )\n def test_df_empty_min_count_1(self, opname, dtype, exp_dtype):\n df = DataFrame({0: [], 1: []}, dtype=dtype)\n result = getattr(df, opname)(min_count=1)\n\n expected = Series([np.nan, np.nan], dtype=exp_dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "opname, dtype, exp_value, exp_dtype",\n [\n ("sum", "Int8", 0, ("Int32" if is_windows_np2_or_is32 else "Int64")),\n ("prod", "Int8", 1, ("Int32" if is_windows_np2_or_is32 else "Int64")),\n ("prod", "Int8", 1, ("Int32" if is_windows_np2_or_is32 else "Int64")),\n ("sum", "Int64", 0, "Int64"),\n ("prod", "Int64", 1, "Int64"),\n ("sum", "UInt8", 0, ("UInt32" if is_windows_np2_or_is32 else "UInt64")),\n ("prod", "UInt8", 1, ("UInt32" if is_windows_np2_or_is32 else "UInt64")),\n ("sum", "UInt64", 0, "UInt64"),\n ("prod", "UInt64", 1, "UInt64"),\n ("sum", "Float32", 0, "Float32"),\n ("prod", "Float32", 1, "Float32"),\n ("sum", "Float64", 0, "Float64"),\n ],\n )\n def test_df_empty_nullable_min_count_0(self, opname, dtype, exp_value, exp_dtype):\n df = DataFrame({0: [], 1: []}, dtype=dtype)\n result = getattr(df, opname)(min_count=0)\n\n expected = Series([exp_value, exp_value], dtype=exp_dtype)\n tm.assert_series_equal(result, expected)\n\n # TODO: why does min_count=1 impact the resulting Windows dtype\n # differently than min_count=0?\n @pytest.mark.parametrize(\n "opname, dtype, exp_dtype",\n [\n ("sum", "Int8", ("Int32" if is_windows_or_is32 else "Int64")),\n ("prod", "Int8", ("Int32" if is_windows_or_is32 else "Int64")),\n ("sum", "Int64", "Int64"),\n ("prod", "Int64", "Int64"),\n ("sum", "UInt8", ("UInt32" if is_windows_or_is32 else "UInt64")),\n ("prod", "UInt8", ("UInt32" if is_windows_or_is32 else "UInt64")),\n ("sum", "UInt64", "UInt64"),\n ("prod", "UInt64", "UInt64"),\n ("sum", "Float32", "Float32"),\n ("prod", "Float32", "Float32"),\n ("sum", "Float64", "Float64"),\n ],\n )\n def test_df_empty_nullable_min_count_1(self, opname, dtype, exp_dtype):\n df = DataFrame({0: [], 1: []}, dtype=dtype)\n result = getattr(df, opname)(min_count=1)\n\n expected = Series([pd.NA, pd.NA], dtype=exp_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_sum_timedelta64_skipna_false(using_array_manager, request):\n # GH#17235\n if using_array_manager:\n mark = pytest.mark.xfail(\n reason="Incorrect type inference on NaT in reduction result"\n )\n request.applymarker(mark)\n\n arr = np.arange(8).astype(np.int64).view("m8[s]").reshape(4, 2)\n arr[-1, -1] = "Nat"\n\n df = DataFrame(arr)\n assert (df.dtypes == arr.dtype).all()\n\n result = df.sum(skipna=False)\n expected = Series([pd.Timedelta(seconds=12), pd.NaT], dtype="m8[s]")\n tm.assert_series_equal(result, expected)\n\n result = df.sum(axis=0, skipna=False)\n tm.assert_series_equal(result, expected)\n\n result = df.sum(axis=1, skipna=False)\n expected = Series(\n [\n pd.Timedelta(seconds=1),\n pd.Timedelta(seconds=5),\n pd.Timedelta(seconds=9),\n pd.NaT,\n ],\n dtype="m8[s]",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_mixed_frame_with_integer_sum():\n # https://github.com/pandas-dev/pandas/issues/34520\n df = DataFrame([["a", 1]], columns=list("ab"))\n df = df.astype({"b": "Int64"})\n result = df.sum()\n expected = Series(["a", 1], index=["a", "b"])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("numeric_only", [True, False, None])\n@pytest.mark.parametrize("method", ["min", "max"])\ndef test_minmax_extensionarray(method, numeric_only):\n # https://github.com/pandas-dev/pandas/issues/32651\n int64_info = np.iinfo("int64")\n ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype())\n df = DataFrame({"Int64": ser})\n result = getattr(df, method)(numeric_only=numeric_only)\n expected = Series(\n [getattr(int64_info, method)],\n dtype="Int64",\n index=Index(["Int64"]),\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("ts_value", [Timestamp("2000-01-01"), pd.NaT])\ndef test_frame_mixed_numeric_object_with_timestamp(ts_value):\n # GH 13912\n df = DataFrame({"a": [1], "b": [1.1], "c": ["foo"], "d": [ts_value]})\n with pytest.raises(\n TypeError, match="does not support (operation|reduction)|Cannot perform"\n ):\n df.sum()\n\n\ndef test_prod_sum_min_count_mixed_object():\n # https://github.com/pandas-dev/pandas/issues/41074\n df = DataFrame([1, "a", True])\n\n result = df.prod(axis=0, min_count=1, numeric_only=False)\n expected = Series(["a"], dtype=object)\n tm.assert_series_equal(result, expected)\n\n msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'")\n with pytest.raises(TypeError, match=msg):\n df.sum(axis=0, min_count=1, numeric_only=False)\n\n\n@pytest.mark.parametrize("method", ["min", "max", "mean", "median", "skew", "kurt"])\n@pytest.mark.parametrize("numeric_only", [True, False])\n@pytest.mark.parametrize("dtype", ["float64", "Float64"])\ndef test_reduction_axis_none_returns_scalar(method, numeric_only, dtype):\n # GH#21597 As of 2.0, axis=None reduces over all axes.\n\n df = DataFrame(np.random.default_rng(2).standard_normal((4, 4)), dtype=dtype)\n\n result = getattr(df, method)(axis=None, numeric_only=numeric_only)\n np_arr = df.to_numpy(dtype=np.float64)\n if method in {"skew", "kurt"}:\n comp_mod = pytest.importorskip("scipy.stats")\n if method == "kurt":\n method = "kurtosis"\n expected = getattr(comp_mod, method)(np_arr, bias=False, axis=None)\n tm.assert_almost_equal(result, expected)\n else:\n expected = getattr(np, method)(np_arr, axis=None)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "kernel",\n [\n "corr",\n "corrwith",\n "cov",\n "idxmax",\n "idxmin",\n "kurt",\n "max",\n "mean",\n "median",\n "min",\n "prod",\n "quantile",\n "sem",\n "skew",\n "std",\n "sum",\n "var",\n ],\n)\ndef test_fails_on_non_numeric(kernel):\n # GH#46852\n df = DataFrame({"a": [1, 2, 3], "b": object})\n args = (df,) if kernel == "corrwith" else ()\n msg = "|".join(\n [\n "not allowed for this dtype",\n "argument must be a string or a number",\n "not supported between instances of",\n "unsupported operand type",\n "argument must be a string or a real number",\n ]\n )\n if kernel == "median":\n # slightly different message on different builds\n msg1 = (\n r"Cannot convert \[\[<class 'object'> <class 'object'> "\n r"<class 'object'>\]\] to numeric"\n )\n msg2 = (\n r"Cannot convert \[<class 'object'> <class 'object'> "\n r"<class 'object'>\] to numeric"\n )\n msg = "|".join([msg1, msg2])\n with pytest.raises(TypeError, match=msg):\n getattr(df, kernel)(*args)\n\n\n@pytest.mark.parametrize(\n "method",\n [\n "all",\n "any",\n "count",\n "idxmax",\n "idxmin",\n "kurt",\n "kurtosis",\n "max",\n "mean",\n "median",\n "min",\n "nunique",\n "prod",\n "product",\n "sem",\n "skew",\n "std",\n "sum",\n "var",\n ],\n)\n@pytest.mark.parametrize("min_count", [0, 2])\ndef test_numeric_ea_axis_1(method, skipna, min_count, any_numeric_ea_dtype):\n # GH 54341\n df = DataFrame(\n {\n "a": Series([0, 1, 2, 3], dtype=any_numeric_ea_dtype),\n "b": Series([0, 1, pd.NA, 3], dtype=any_numeric_ea_dtype),\n },\n )\n expected_df = DataFrame(\n {\n "a": [0.0, 1.0, 2.0, 3.0],\n "b": [0.0, 1.0, np.nan, 3.0],\n },\n )\n if method in ("count", "nunique"):\n expected_dtype = "int64"\n elif method in ("all", "any"):\n expected_dtype = "boolean"\n elif method in (\n "kurt",\n "kurtosis",\n "mean",\n "median",\n "sem",\n "skew",\n "std",\n "var",\n ) and not any_numeric_ea_dtype.startswith("Float"):\n expected_dtype = "Float64"\n else:\n expected_dtype = any_numeric_ea_dtype\n\n kwargs = {}\n if method not in ("count", "nunique", "quantile"):\n kwargs["skipna"] = skipna\n if method in ("prod", "product", "sum"):\n kwargs["min_count"] = min_count\n\n warn = None\n msg = None\n if not skipna and method in ("idxmax", "idxmin"):\n warn = FutureWarning\n msg = f"The behavior of DataFrame.{method} with all-NA values"\n with tm.assert_produces_warning(warn, match=msg):\n result = getattr(df, method)(axis=1, **kwargs)\n with tm.assert_produces_warning(warn, match=msg):\n expected = getattr(expected_df, method)(axis=1, **kwargs)\n if method not in ("idxmax", "idxmin"):\n expected = expected.astype(expected_dtype)\n tm.assert_series_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\frame\test_reductions.py
test_reductions.py
Python
76,039
0.75
0.099388
0.075873
awesome-app
759
2024-08-20T17:46:33.340989
BSD-3-Clause
true
ca8996745c27f91073332a23482baa96
from datetime import (\n datetime,\n timedelta,\n)\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n NA,\n Categorical,\n CategoricalIndex,\n DataFrame,\n IntervalIndex,\n MultiIndex,\n NaT,\n PeriodIndex,\n Series,\n Timestamp,\n date_range,\n option_context,\n period_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDataFrameRepr:\n def test_repr_should_return_str(self):\n # https://docs.python.org/3/reference/datamodel.html#object.__repr__\n # "...The return value must be a string object."\n\n # (str on py2.x, str (unicode) on py3)\n\n data = [8, 5, 3, 5]\n index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]\n cols = ["\u03c8"]\n df = DataFrame(data, columns=cols, index=index1)\n assert type(df.__repr__()) is str # noqa: E721\n\n ser = df[cols[0]]\n assert type(ser.__repr__()) is str # noqa: E721\n\n def test_repr_bytes_61_lines(self):\n # GH#12857\n lets = list("ACDEFGHIJKLMNOP")\n words = np.random.default_rng(2).choice(lets, (1000, 50))\n df = DataFrame(words).astype("U1")\n assert (df.dtypes == object).all()\n\n # smoke tests; at one point this raised with 61 but not 60\n repr(df)\n repr(df.iloc[:60, :])\n repr(df.iloc[:61, :])\n\n def test_repr_unicode_level_names(self, frame_or_series):\n index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=["\u0394", "i1"])\n\n obj = DataFrame(np.random.default_rng(2).standard_normal((2, 4)), index=index)\n obj = tm.get_obj(obj, frame_or_series)\n repr(obj)\n\n def test_assign_index_sequences(self):\n # GH#2200\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}).set_index(\n ["a", "b"]\n )\n index = list(df.index)\n index[0] = ("faz", "boo")\n df.index = index\n repr(df)\n\n # this travels an improper code path\n index[0] = ["faz", "boo"]\n df.index = index\n repr(df)\n\n def test_repr_with_mi_nat(self):\n df = DataFrame({"X": [1, 2]}, index=[[NaT, Timestamp("20130101")], ["a", "b"]])\n result = repr(df)\n expected = " X\nNaT a 1\n2013-01-01 b 2"\n assert result == expected\n\n def test_repr_with_different_nulls(self):\n # GH45263\n df = DataFrame([1, 2, 3, 4], [True, None, np.nan, NaT])\n result = repr(df)\n expected = """ 0\nTrue 1\nNone 2\nNaN 3\nNaT 4"""\n assert result == expected\n\n def test_repr_with_different_nulls_cols(self):\n # GH45263\n d = {np.nan: [1, 2], None: [3, 4], NaT: [6, 7], True: [8, 9]}\n df = DataFrame(data=d)\n result = repr(df)\n expected = """ NaN None NaT True\n0 1 3 6 8\n1 2 4 7 9"""\n assert result == expected\n\n def test_multiindex_na_repr(self):\n # only an issue with long columns\n df3 = DataFrame(\n {\n "A" * 30: {("A", "A0006000", "nuit"): "A0006000"},\n "B" * 30: {("A", "A0006000", "nuit"): np.nan},\n "C" * 30: {("A", "A0006000", "nuit"): np.nan},\n "D" * 30: {("A", "A0006000", "nuit"): np.nan},\n "E" * 30: {("A", "A0006000", "nuit"): "A"},\n "F" * 30: {("A", "A0006000", "nuit"): np.nan},\n }\n )\n\n idf = df3.set_index(["A" * 30, "C" * 30])\n repr(idf)\n\n def test_repr_name_coincide(self):\n index = MultiIndex.from_tuples(\n [("a", 0, "foo"), ("b", 1, "bar")], names=["a", "b", "c"]\n )\n\n df = DataFrame({"value": [0, 1]}, index=index)\n\n lines = repr(df).split("\n")\n assert lines[2].startswith("a 0 foo")\n\n def test_repr_to_string(\n self,\n multiindex_year_month_day_dataframe_random_data,\n multiindex_dataframe_random_data,\n ):\n ymd = multiindex_year_month_day_dataframe_random_data\n frame = multiindex_dataframe_random_data\n\n repr(frame)\n repr(ymd)\n repr(frame.T)\n repr(ymd.T)\n\n buf = StringIO()\n frame.to_string(buf=buf)\n ymd.to_string(buf=buf)\n frame.T.to_string(buf=buf)\n ymd.T.to_string(buf=buf)\n\n def test_repr_empty(self):\n # empty\n repr(DataFrame())\n\n # empty with index\n frame = DataFrame(index=np.arange(1000))\n repr(frame)\n\n def test_repr_mixed(self, float_string_frame):\n # mixed\n repr(float_string_frame)\n\n @pytest.mark.slow\n def test_repr_mixed_big(self):\n # big mixed\n biggie = DataFrame(\n {\n "A": np.random.default_rng(2).standard_normal(200),\n "B": [str(i) for i in range(200)],\n },\n index=range(200),\n )\n biggie.loc[:20, "A"] = np.nan\n biggie.loc[:20, "B"] = np.nan\n\n repr(biggie)\n\n def test_repr(self):\n # columns but no index\n no_index = DataFrame(columns=[0, 1, 3])\n repr(no_index)\n\n df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])\n assert "\t" not in repr(df)\n assert "\r" not in repr(df)\n assert "a\n" not in repr(df)\n\n def test_repr_dimensions(self):\n df = DataFrame([[1, 2], [3, 4]])\n with option_context("display.show_dimensions", True):\n assert "2 rows x 2 columns" in repr(df)\n\n with option_context("display.show_dimensions", False):\n assert "2 rows x 2 columns" not in repr(df)\n\n with option_context("display.show_dimensions", "truncate"):\n assert "2 rows x 2 columns" not in repr(df)\n\n @pytest.mark.slow\n def test_repr_big(self):\n # big one\n biggie = DataFrame(np.zeros((200, 4)), columns=range(4), index=range(200))\n repr(biggie)\n\n def test_repr_unsortable(self):\n # columns are not sortable\n\n unsortable = DataFrame(\n {\n "foo": [1] * 50,\n datetime.today(): [1] * 50,\n "bar": ["bar"] * 50,\n datetime.today() + timedelta(1): ["bar"] * 50,\n },\n index=np.arange(50),\n )\n repr(unsortable)\n\n def test_repr_float_frame_options(self, float_frame):\n repr(float_frame)\n\n with option_context("display.precision", 3):\n repr(float_frame)\n\n with option_context("display.max_rows", 10, "display.max_columns", 2):\n repr(float_frame)\n\n with option_context("display.max_rows", 1000, "display.max_columns", 1000):\n repr(float_frame)\n\n def test_repr_unicode(self):\n uval = "\u03c3\u03c3\u03c3\u03c3"\n\n df = DataFrame({"A": [uval, uval]})\n\n result = repr(df)\n ex_top = " A"\n assert result.split("\n")[0].rstrip() == ex_top\n\n df = DataFrame({"A": [uval, uval]})\n result = repr(df)\n assert result.split("\n")[0].rstrip() == ex_top\n\n def test_unicode_string_with_unicode(self):\n df = DataFrame({"A": ["\u05d0"]})\n str(df)\n\n def test_repr_unicode_columns(self):\n df = DataFrame({"\u05d0": [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})\n repr(df.columns) # should not raise UnicodeDecodeError\n\n def test_str_to_bytes_raises(self):\n # GH 26447\n df = DataFrame({"A": ["abc"]})\n msg = "^'str' object cannot be interpreted as an integer$"\n with pytest.raises(TypeError, match=msg):\n bytes(df)\n\n def test_very_wide_repr(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 20)),\n columns=np.array(["a" * 10] * 20, dtype=object),\n )\n repr(df)\n\n def test_repr_column_name_unicode_truncation_bug(self):\n # #1906\n df = DataFrame(\n {\n "Id": [7117434],\n "StringCol": (\n "Is it possible to modify drop plot code"\n "so that the output graph is displayed "\n "in iphone simulator, Is it possible to "\n "modify drop plot code so that the "\n "output graph is \xe2\x80\xa8displayed "\n "in iphone simulator.Now we are adding "\n "the CSV file externally. I want to Call "\n "the File through the code.."\n ),\n }\n )\n\n with option_context("display.max_columns", 20):\n assert "StringCol" in repr(df)\n\n def test_latex_repr(self):\n pytest.importorskip("jinja2")\n expected = r"""\begin{tabular}{llll}\n\toprule\n & 0 & 1 & 2 \\\n\midrule\n0 & $\alpha$ & b & c \\\n1 & 1 & 2 & 3 \\\n\bottomrule\n\end{tabular}\n"""\n with option_context(\n "styler.format.escape", None, "styler.render.repr", "latex"\n ):\n df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]])\n result = df._repr_latex_()\n assert result == expected\n\n # GH 12182\n assert df._repr_latex_() is None\n\n def test_repr_with_datetimeindex(self):\n df = DataFrame({"A": [1, 2, 3]}, index=date_range("2000", periods=3))\n result = repr(df)\n expected = " A\n2000-01-01 1\n2000-01-02 2\n2000-01-03 3"\n assert result == expected\n\n def test_repr_with_intervalindex(self):\n # https://github.com/pandas-dev/pandas/pull/24134/files\n df = DataFrame(\n {"A": [1, 2, 3, 4]}, index=IntervalIndex.from_breaks([0, 1, 2, 3, 4])\n )\n result = repr(df)\n expected = " A\n(0, 1] 1\n(1, 2] 2\n(2, 3] 3\n(3, 4] 4"\n assert result == expected\n\n def test_repr_with_categorical_index(self):\n df = DataFrame({"A": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"]))\n result = repr(df)\n expected = " A\na 1\nb 2\nc 3"\n assert result == expected\n\n def test_repr_categorical_dates_periods(self):\n # normal DataFrame\n dt = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern")\n p = period_range("2011-01", freq="M", periods=5)\n df = DataFrame({"dt": dt, "p": p})\n exp = """ dt p\n0 2011-01-01 09:00:00-05:00 2011-01\n1 2011-01-01 10:00:00-05:00 2011-02\n2 2011-01-01 11:00:00-05:00 2011-03\n3 2011-01-01 12:00:00-05:00 2011-04\n4 2011-01-01 13:00:00-05:00 2011-05"""\n\n assert repr(df) == exp\n\n df2 = DataFrame({"dt": Categorical(dt), "p": Categorical(p)})\n assert repr(df2) == exp\n\n @pytest.mark.parametrize("arg", [np.datetime64, np.timedelta64])\n @pytest.mark.parametrize(\n "box, expected",\n [[Series, "0 NaT\ndtype: object"], [DataFrame, " 0\n0 NaT"]],\n )\n def test_repr_np_nat_with_object(self, arg, box, expected):\n # GH 25445\n result = repr(box([arg("NaT")], dtype=object))\n assert result == expected\n\n def test_frame_datetime64_pre1900_repr(self):\n df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="YE-DEC")})\n # it works!\n repr(df)\n\n def test_frame_to_string_with_periodindex(self):\n index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M")\n frame = DataFrame(np.random.default_rng(2).standard_normal((3, 4)), index=index)\n\n # it works!\n frame.to_string()\n\n def test_to_string_ea_na_in_multiindex(self):\n # GH#47986\n df = DataFrame(\n {"a": [1, 2]},\n index=MultiIndex.from_arrays([Series([NA, 1], dtype="Int64")]),\n )\n\n result = df.to_string()\n expected = """ a\n<NA> 1\n1 2"""\n assert result == expected\n\n def test_datetime64tz_slice_non_truncate(self):\n # GH 30263\n df = DataFrame({"x": date_range("2019", periods=10, tz="UTC")})\n expected = repr(df)\n df = df.iloc[:, :5]\n result = repr(df)\n assert result == expected\n\n def test_to_records_no_typeerror_in_repr(self):\n # GH 48526\n df = DataFrame([["a", "b"], ["c", "d"], ["e", "f"]], columns=["left", "right"])\n df["record"] = df[["left", "right"]].to_records()\n expected = """ left right record\n0 a b [0, a, b]\n1 c d [1, c, d]\n2 e f [2, e, f]"""\n result = repr(df)\n assert result == expected\n\n def test_to_records_with_na_record_value(self):\n # GH 48526\n df = DataFrame(\n [["a", np.nan], ["c", "d"], ["e", "f"]], columns=["left", "right"]\n )\n df["record"] = df[["left", "right"]].to_records()\n expected = """ left right record\n0 a NaN [0, a, nan]\n1 c d [1, c, d]\n2 e f [2, e, f]"""\n result = repr(df)\n assert result == expected\n\n def test_to_records_with_na_record(self):\n # GH 48526\n df = DataFrame(\n [["a", "b"], [np.nan, np.nan], ["e", "f"]], columns=[np.nan, "right"]\n )\n df["record"] = df[[np.nan, "right"]].to_records()\n expected = """ NaN right record\n0 a b [0, a, b]\n1 NaN NaN [1, nan, nan]\n2 e f [2, e, f]"""\n result = repr(df)\n assert result == expected\n\n def test_to_records_with_inf_as_na_record(self):\n # GH 48526\n expected = """ NaN inf record\n0 inf b [0, inf, b]\n1 NaN NaN [1, nan, nan]\n2 e f [2, e, f]"""\n msg = "use_inf_as_na option is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with option_context("use_inf_as_na", True):\n df = DataFrame(\n [[np.inf, "b"], [np.nan, np.nan], ["e", "f"]],\n columns=[np.nan, np.inf],\n )\n df["record"] = df[[np.nan, np.inf]].to_records()\n result = repr(df)\n assert result == expected\n\n def test_to_records_with_inf_record(self):\n # GH 48526\n expected = """ NaN inf record\n0 inf b [0, inf, b]\n1 NaN NaN [1, nan, nan]\n2 e f [2, e, f]"""\n msg = "use_inf_as_na option is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with option_context("use_inf_as_na", False):\n df = DataFrame(\n [[np.inf, "b"], [np.nan, np.nan], ["e", "f"]],\n columns=[np.nan, np.inf],\n )\n df["record"] = df[[np.nan, np.inf]].to_records()\n result = repr(df)\n assert result == expected\n\n def test_masked_ea_with_formatter(self):\n # GH#39336\n df = DataFrame(\n {\n "a": Series([0.123456789, 1.123456789], dtype="Float64"),\n "b": Series([1, 2], dtype="Int64"),\n }\n )\n result = df.to_string(formatters=["{:.2f}".format, "{:.2f}".format])\n expected = """ a b\n0 0.12 1.00\n1 1.12 2.00"""\n assert result == expected\n\n def test_repr_ea_columns(self, any_string_dtype):\n # GH#54797\n pytest.importorskip("pyarrow")\n df = DataFrame({"long_column_name": [1, 2, 3], "col2": [4, 5, 6]})\n df.columns = df.columns.astype(any_string_dtype)\n expected = """ long_column_name col2\n0 1 4\n1 2 5\n2 3 6"""\n assert repr(df) == expected\n\n\n@pytest.mark.parametrize(\n "data,output",\n [\n ([2, complex("nan"), 1], [" 2.0+0.0j", " NaN+0.0j", " 1.0+0.0j"]),\n ([2, complex("nan"), -1], [" 2.0+0.0j", " NaN+0.0j", "-1.0+0.0j"]),\n ([-2, complex("nan"), -1], ["-2.0+0.0j", " NaN+0.0j", "-1.0+0.0j"]),\n ([-1.23j, complex("nan"), -1], ["-0.00-1.23j", " NaN+0.00j", "-1.00+0.00j"]),\n ([1.23j, complex("nan"), 1.23], [" 0.00+1.23j", " NaN+0.00j", " 1.23+0.00j"]),\n (\n [-1.23j, complex(np.nan, np.nan), 1],\n ["-0.00-1.23j", " NaN+ NaNj", " 1.00+0.00j"],\n ),\n (\n [-1.23j, complex(1.2, np.nan), 1],\n ["-0.00-1.23j", " 1.20+ NaNj", " 1.00+0.00j"],\n ),\n (\n [-1.23j, complex(np.nan, -1.2), 1],\n ["-0.00-1.23j", " NaN-1.20j", " 1.00+0.00j"],\n ),\n ],\n)\n@pytest.mark.parametrize("as_frame", [True, False])\ndef test_repr_with_complex_nans(data, output, as_frame):\n # GH#53762, GH#53841\n obj = Series(np.array(data))\n if as_frame:\n obj = obj.to_frame(name="val")\n reprs = [f"{i} {val}" for i, val in enumerate(output)]\n expected = f"{'val': >{len(reprs[0])}}\n" + "\n".join(reprs)\n else:\n reprs = [f"{i} {val}" for i, val in enumerate(output)]\n expected = "\n".join(reprs) + "\ndtype: complex128"\n assert str(obj) == expected, f"\n{str(obj)}\n\n{expected}"\n
.venv\Lib\site-packages\pandas\tests\frame\test_repr.py
test_repr.py
Python
16,818
0.95
0.090734
0.079007
react-lib
459
2025-06-29T09:27:57.558171
MIT
true
147c06cd7ca0e290445c8b625d560fab
from datetime import datetime\nimport itertools\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import lib\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Period,\n Series,\n Timedelta,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.reshape import reshape as reshape_lib\n\n\n@pytest.fixture(params=[True, False])\ndef future_stack(request):\n return request.param\n\n\nclass TestDataFrameReshape:\n def test_stack_unstack(self, float_frame, future_stack):\n df = float_frame.copy()\n df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)\n\n stacked = df.stack(future_stack=future_stack)\n stacked_df = DataFrame({"foo": stacked, "bar": stacked})\n\n unstacked = stacked.unstack()\n unstacked_df = stacked_df.unstack()\n\n tm.assert_frame_equal(unstacked, df)\n tm.assert_frame_equal(unstacked_df["bar"], df)\n\n unstacked_cols = stacked.unstack(0)\n unstacked_cols_df = stacked_df.unstack(0)\n tm.assert_frame_equal(unstacked_cols.T, df)\n tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_mixed_level(self, future_stack):\n # GH 18310\n levels = [range(3), [3, "a", "b"], [1, 2]]\n\n # flat columns:\n df = DataFrame(1, index=levels[0], columns=levels[1])\n result = df.stack(future_stack=future_stack)\n expected = Series(1, index=MultiIndex.from_product(levels[:2]))\n tm.assert_series_equal(result, expected)\n\n # MultiIndex columns:\n df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))\n result = df.stack(1, future_stack=future_stack)\n expected = DataFrame(\n 1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]\n )\n tm.assert_frame_equal(result, expected)\n\n # as above, but used labels in level are actually of homogeneous type\n result = df[["a", "b"]].stack(1, future_stack=future_stack)\n expected = expected[["a", "b"]]\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_not_consolidated(self, using_array_manager):\n # Gh#34708\n df = DataFrame({"x": [1, 2, np.nan], "y": [3.0, 4, np.nan]})\n df2 = df[["x"]]\n df2["y"] = df["y"]\n if not using_array_manager:\n assert len(df2._mgr.blocks) == 2\n\n res = df2.unstack()\n expected = df.unstack()\n tm.assert_series_equal(res, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_unstack_fill(self, future_stack):\n # GH #9746: fill_value keyword argument for Series\n # and DataFrame unstack\n\n # From a series\n data = Series([1, 2, 4, 5], dtype=np.int16)\n data.index = MultiIndex.from_tuples(\n [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]\n )\n\n result = data.unstack(fill_value=-1)\n expected = DataFrame(\n {"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16\n )\n tm.assert_frame_equal(result, expected)\n\n # From a series with incorrect data type for fill_value\n result = data.unstack(fill_value=0.5)\n expected = DataFrame(\n {"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float\n )\n tm.assert_frame_equal(result, expected)\n\n # GH #13971: fill_value when unstacking multiple levels:\n df = DataFrame(\n {"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}\n ).set_index(["x", "y", "z"])\n unstacked = df.unstack(["x", "y"], fill_value=0)\n key = ("w", "b", "j")\n expected = unstacked[key]\n result = Series([0, 0, 2], index=unstacked.index, name=key)\n tm.assert_series_equal(result, expected)\n\n stacked = unstacked.stack(["x", "y"], future_stack=future_stack)\n stacked.index = stacked.index.reorder_levels(df.index.names)\n # Workaround for GH #17886 (unnecessarily casts to float):\n stacked = stacked.astype(np.int64)\n result = stacked.loc[df.index]\n tm.assert_frame_equal(result, df)\n\n # From a series\n s = df["w"]\n result = s.unstack(["x", "y"], fill_value=0)\n expected = unstacked["w"]\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_fill_frame(self):\n # From a dataframe\n rows = [[1, 2], [3, 4], [5, 6], [7, 8]]\n df = DataFrame(rows, columns=list("AB"), dtype=np.int32)\n df.index = MultiIndex.from_tuples(\n [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]\n )\n\n result = df.unstack(fill_value=-1)\n\n rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]\n expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)\n expected.columns = MultiIndex.from_tuples(\n [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]\n )\n tm.assert_frame_equal(result, expected)\n\n # From a mixed type dataframe\n df["A"] = df["A"].astype(np.int16)\n df["B"] = df["B"].astype(np.float64)\n\n result = df.unstack(fill_value=-1)\n expected["A"] = expected["A"].astype(np.int16)\n expected["B"] = expected["B"].astype(np.float64)\n tm.assert_frame_equal(result, expected)\n\n # From a dataframe with incorrect data type for fill_value\n result = df.unstack(fill_value=0.5)\n\n rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]\n expected = DataFrame(rows, index=list("xyz"), dtype=float)\n expected.columns = MultiIndex.from_tuples(\n [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_fill_frame_datetime(self):\n # Test unstacking with date times\n dv = date_range("2012-01-01", periods=4).values\n data = Series(dv)\n data.index = MultiIndex.from_tuples(\n [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]\n )\n\n result = data.unstack()\n expected = DataFrame(\n {"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},\n index=["x", "y", "z"],\n )\n tm.assert_frame_equal(result, expected)\n\n result = data.unstack(fill_value=dv[0])\n expected = DataFrame(\n {"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},\n index=["x", "y", "z"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_fill_frame_timedelta(self):\n # Test unstacking with time deltas\n td = [Timedelta(days=i) for i in range(4)]\n data = Series(td)\n data.index = MultiIndex.from_tuples(\n [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]\n )\n\n result = data.unstack()\n expected = DataFrame(\n {"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},\n index=["x", "y", "z"],\n )\n tm.assert_frame_equal(result, expected)\n\n result = data.unstack(fill_value=td[1])\n expected = DataFrame(\n {"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},\n index=["x", "y", "z"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_fill_frame_period(self):\n # Test unstacking with period\n periods = [\n Period("2012-01"),\n Period("2012-02"),\n Period("2012-03"),\n Period("2012-04"),\n ]\n data = Series(periods)\n data.index = MultiIndex.from_tuples(\n [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]\n )\n\n result = data.unstack()\n expected = DataFrame(\n {"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},\n index=["x", "y", "z"],\n )\n tm.assert_frame_equal(result, expected)\n\n result = data.unstack(fill_value=periods[1])\n expected = DataFrame(\n {\n "a": [periods[0], periods[1], periods[3]],\n "b": [periods[1], periods[2], periods[1]],\n },\n index=["x", "y", "z"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_fill_frame_categorical(self):\n # Test unstacking with categorical\n data = Series(["a", "b", "c", "a"], dtype="category")\n data.index = MultiIndex.from_tuples(\n [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]\n )\n\n # By default missing values will be NaN\n result = data.unstack()\n expected = DataFrame(\n {\n "a": pd.Categorical(list("axa"), categories=list("abc")),\n "b": pd.Categorical(list("bcx"), categories=list("abc")),\n },\n index=list("xyz"),\n )\n tm.assert_frame_equal(result, expected)\n\n # Fill with non-category results in a ValueError\n msg = r"Cannot setitem on a Categorical with a new category \(d\)"\n with pytest.raises(TypeError, match=msg):\n data.unstack(fill_value="d")\n\n # Fill with category value replaces missing values as expected\n result = data.unstack(fill_value="c")\n expected = DataFrame(\n {\n "a": pd.Categorical(list("aca"), categories=list("abc")),\n "b": pd.Categorical(list("bcc"), categories=list("abc")),\n },\n index=list("xyz"),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_tuplename_in_multiindex(self):\n # GH 19966\n idx = MultiIndex.from_product(\n [["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]\n )\n df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)\n result = df.unstack(("A", "a"))\n\n expected = DataFrame(\n [[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],\n columns=MultiIndex.from_tuples(\n [\n ("d", "a"),\n ("d", "b"),\n ("d", "c"),\n ("e", "a"),\n ("e", "b"),\n ("e", "c"),\n ],\n names=[None, ("A", "a")],\n ),\n index=Index([1, 2, 3], name=("B", "b")),\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "unstack_idx, expected_values, expected_index, expected_columns",\n [\n (\n ("A", "a"),\n [[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],\n MultiIndex.from_tuples(\n [(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]\n ),\n MultiIndex.from_tuples(\n [("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],\n names=[None, ("A", "a")],\n ),\n ),\n (\n (("A", "a"), "B"),\n [[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],\n Index([3, 4], name="C"),\n MultiIndex.from_tuples(\n [\n ("d", "a", 1),\n ("d", "a", 2),\n ("d", "b", 1),\n ("d", "b", 2),\n ("e", "a", 1),\n ("e", "a", 2),\n ("e", "b", 1),\n ("e", "b", 2),\n ],\n names=[None, ("A", "a"), "B"],\n ),\n ),\n ],\n )\n def test_unstack_mixed_type_name_in_multiindex(\n self, unstack_idx, expected_values, expected_index, expected_columns\n ):\n # GH 19966\n idx = MultiIndex.from_product(\n [["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]\n )\n df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)\n result = df.unstack(unstack_idx)\n\n expected = DataFrame(\n expected_values, columns=expected_columns, index=expected_index\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_preserve_dtypes(self):\n # Checks fix for #11847\n df = DataFrame(\n {\n "state": ["IL", "MI", "NC"],\n "index": ["a", "b", "c"],\n "some_categories": Series(["a", "b", "c"]).astype("category"),\n "A": np.random.default_rng(2).random(3),\n "B": 1,\n "C": "foo",\n "D": pd.Timestamp("20010102"),\n "E": Series([1.0, 50.0, 100.0]).astype("float32"),\n "F": Series([3.0, 4.0, 5.0]).astype("float64"),\n "G": False,\n "H": Series([1, 200, 923442]).astype("int8"),\n }\n )\n\n def unstack_and_compare(df, column_name):\n unstacked1 = df.unstack([column_name])\n unstacked2 = df.unstack(column_name)\n tm.assert_frame_equal(unstacked1, unstacked2)\n\n df1 = df.set_index(["state", "index"])\n unstack_and_compare(df1, "index")\n\n df1 = df.set_index(["state", "some_categories"])\n unstack_and_compare(df1, "some_categories")\n\n df1 = df.set_index(["F", "C"])\n unstack_and_compare(df1, "F")\n\n df1 = df.set_index(["G", "B", "state"])\n unstack_and_compare(df1, "B")\n\n df1 = df.set_index(["E", "A"])\n unstack_and_compare(df1, "E")\n\n df1 = df.set_index(["state", "index"])\n s = df1["A"]\n unstack_and_compare(s, "index")\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_ints(self, future_stack):\n columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))\n df = DataFrame(\n np.random.default_rng(2).standard_normal((30, 27)), columns=columns\n )\n\n tm.assert_frame_equal(\n df.stack(level=[1, 2], future_stack=future_stack),\n df.stack(level=1, future_stack=future_stack).stack(\n level=1, future_stack=future_stack\n ),\n )\n tm.assert_frame_equal(\n df.stack(level=[-2, -1], future_stack=future_stack),\n df.stack(level=1, future_stack=future_stack).stack(\n level=1, future_stack=future_stack\n ),\n )\n\n df_named = df.copy()\n return_value = df_named.columns.set_names(range(3), inplace=True)\n assert return_value is None\n\n tm.assert_frame_equal(\n df_named.stack(level=[1, 2], future_stack=future_stack),\n df_named.stack(level=1, future_stack=future_stack).stack(\n level=1, future_stack=future_stack\n ),\n )\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_mixed_levels(self, future_stack):\n columns = MultiIndex.from_tuples(\n [\n ("A", "cat", "long"),\n ("B", "cat", "long"),\n ("A", "dog", "short"),\n ("B", "dog", "short"),\n ],\n names=["exp", "animal", "hair_length"],\n )\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 4)), columns=columns\n )\n\n animal_hair_stacked = df.stack(\n level=["animal", "hair_length"], future_stack=future_stack\n )\n exp_hair_stacked = df.stack(\n level=["exp", "hair_length"], future_stack=future_stack\n )\n\n # GH #8584: Need to check that stacking works when a number\n # is passed that is both a level name and in the range of\n # the level numbers\n df2 = df.copy()\n df2.columns.names = ["exp", "animal", 1]\n tm.assert_frame_equal(\n df2.stack(level=["animal", 1], future_stack=future_stack),\n animal_hair_stacked,\n check_names=False,\n )\n tm.assert_frame_equal(\n df2.stack(level=["exp", 1], future_stack=future_stack),\n exp_hair_stacked,\n check_names=False,\n )\n\n # When mixed types are passed and the ints are not level\n # names, raise\n msg = (\n "level should contain all level names or all level numbers, not "\n "a mixture of the two"\n )\n with pytest.raises(ValueError, match=msg):\n df2.stack(level=["animal", 0], future_stack=future_stack)\n\n # GH #8584: Having 0 in the level names could raise a\n # strange error about lexsort depth\n df3 = df.copy()\n df3.columns.names = ["exp", "animal", 0]\n tm.assert_frame_equal(\n df3.stack(level=["animal", 0], future_stack=future_stack),\n animal_hair_stacked,\n check_names=False,\n )\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_int_level_names(self, future_stack):\n columns = MultiIndex.from_tuples(\n [\n ("A", "cat", "long"),\n ("B", "cat", "long"),\n ("A", "dog", "short"),\n ("B", "dog", "short"),\n ],\n names=["exp", "animal", "hair_length"],\n )\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 4)), columns=columns\n )\n\n exp_animal_stacked = df.stack(\n level=["exp", "animal"], future_stack=future_stack\n )\n animal_hair_stacked = df.stack(\n level=["animal", "hair_length"], future_stack=future_stack\n )\n exp_hair_stacked = df.stack(\n level=["exp", "hair_length"], future_stack=future_stack\n )\n\n df2 = df.copy()\n df2.columns.names = [0, 1, 2]\n tm.assert_frame_equal(\n df2.stack(level=[1, 2], future_stack=future_stack),\n animal_hair_stacked,\n check_names=False,\n )\n tm.assert_frame_equal(\n df2.stack(level=[0, 1], future_stack=future_stack),\n exp_animal_stacked,\n check_names=False,\n )\n tm.assert_frame_equal(\n df2.stack(level=[0, 2], future_stack=future_stack),\n exp_hair_stacked,\n check_names=False,\n )\n\n # Out-of-order int column names\n df3 = df.copy()\n df3.columns.names = [2, 0, 1]\n tm.assert_frame_equal(\n df3.stack(level=[0, 1], future_stack=future_stack),\n animal_hair_stacked,\n check_names=False,\n )\n tm.assert_frame_equal(\n df3.stack(level=[2, 0], future_stack=future_stack),\n exp_animal_stacked,\n check_names=False,\n )\n tm.assert_frame_equal(\n df3.stack(level=[2, 1], future_stack=future_stack),\n exp_hair_stacked,\n check_names=False,\n )\n\n def test_unstack_bool(self):\n df = DataFrame(\n [False, False],\n index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),\n columns=["col"],\n )\n rs = df.unstack()\n xp = DataFrame(\n np.array([[False, np.nan], [np.nan, False]], dtype=object),\n index=["a", "b"],\n columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),\n )\n tm.assert_frame_equal(rs, xp)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_unstack_level_binding(self, future_stack):\n # GH9856\n mi = MultiIndex(\n levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],\n codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],\n names=["first", "second", "third"],\n )\n s = Series(0, index=mi)\n result = s.unstack([1, 2]).stack(0, future_stack=future_stack)\n\n expected_mi = MultiIndex(\n levels=[["foo", "bar"], ["one", "two"]],\n codes=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=["first", "second"],\n )\n\n expected = DataFrame(\n np.array(\n [[0, np.nan], [np.nan, 0], [0, np.nan], [np.nan, 0]], dtype=np.float64\n ),\n index=expected_mi,\n columns=Index(["b", "a"], name="third"),\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_to_series(self, float_frame):\n # check reversibility\n data = float_frame.unstack()\n\n assert isinstance(data, Series)\n undo = data.unstack().T\n tm.assert_frame_equal(undo, float_frame)\n\n # check NA handling\n data = DataFrame({"x": [1, 2, np.nan], "y": [3.0, 4, np.nan]})\n data.index = Index(["a", "b", "c"])\n result = data.unstack()\n\n midx = MultiIndex(\n levels=[["x", "y"], ["a", "b", "c"]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n )\n expected = Series([1, 2, np.nan, 3, 4, np.nan], index=midx)\n\n tm.assert_series_equal(result, expected)\n\n # check composability of unstack\n old_data = data.copy()\n for _ in range(4):\n data = data.unstack()\n tm.assert_frame_equal(old_data, data)\n\n def test_unstack_dtypes(self, using_infer_string):\n # GH 2929\n rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]\n\n df = DataFrame(rows, columns=list("ABCD"))\n result = df.dtypes\n expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))\n tm.assert_series_equal(result, expected)\n\n # single dtype\n df2 = df.set_index(["A", "B"])\n df3 = df2.unstack("B")\n result = df3.dtypes\n expected = Series(\n [np.dtype("int64")] * 4,\n index=MultiIndex.from_arrays(\n [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")\n ),\n )\n tm.assert_series_equal(result, expected)\n\n # mixed\n df2 = df.set_index(["A", "B"])\n df2["C"] = 3.0\n df3 = df2.unstack("B")\n result = df3.dtypes\n expected = Series(\n [np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,\n index=MultiIndex.from_arrays(\n [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")\n ),\n )\n tm.assert_series_equal(result, expected)\n df2["D"] = "foo"\n df3 = df2.unstack("B")\n result = df3.dtypes\n dtype = (\n pd.StringDtype(na_value=np.nan)\n if using_infer_string\n else np.dtype("object")\n )\n expected = Series(\n [np.dtype("float64")] * 2 + [dtype] * 2,\n index=MultiIndex.from_arrays(\n [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")\n ),\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "c, d",\n (\n (np.zeros(5), np.zeros(5)),\n (np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),\n ),\n )\n def test_unstack_dtypes_mixed_date(self, c, d):\n # GH7405\n df = DataFrame(\n {\n "A": ["a"] * 5,\n "C": c,\n "D": d,\n "B": date_range("2012-01-01", periods=5),\n }\n )\n\n right = df.iloc[:3].copy(deep=True)\n\n df = df.set_index(["A", "B"])\n df["D"] = df["D"].astype("int64")\n\n left = df.iloc[:3].unstack(0)\n right = right.set_index(["A", "B"]).unstack(0)\n right[("D", "a")] = right[("D", "a")].astype("int64")\n\n assert left.shape == (3, 2)\n tm.assert_frame_equal(left, right)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_unstack_non_unique_index_names(self, future_stack):\n idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])\n df = DataFrame([1, 2], index=idx)\n msg = "The name c1 occurs multiple times, use a level number"\n with pytest.raises(ValueError, match=msg):\n df.unstack("c1")\n\n with pytest.raises(ValueError, match=msg):\n df.T.stack("c1", future_stack=future_stack)\n\n def test_unstack_unused_levels(self):\n # GH 17845: unused codes in index make unstack() cast int to float\n idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]\n df = DataFrame([[1, 0]] * 3, index=idx)\n\n result = df.unstack()\n exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])\n expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)\n tm.assert_frame_equal(result, expected)\n assert (result.columns.levels[1] == idx.levels[1]).all()\n\n # Unused items on both levels\n levels = [[0, 1, 7], [0, 1, 2, 3]]\n codes = [[0, 0, 1, 1], [0, 2, 0, 2]]\n idx = MultiIndex(levels, codes)\n block = np.arange(4).reshape(2, 2)\n df = DataFrame(np.concatenate([block, block + 4]), index=idx)\n result = df.unstack()\n expected = DataFrame(\n np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx\n )\n tm.assert_frame_equal(result, expected)\n assert (result.columns.levels[1] == idx.levels[1]).all()\n\n @pytest.mark.parametrize(\n "level, idces, col_level, idx_level",\n (\n (0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),\n (1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),\n ),\n )\n def test_unstack_unused_levels_mixed_with_nan(\n self, level, idces, col_level, idx_level\n ):\n # With mixed dtype and NaN\n levels = [["a", 2, "c"], [1, 3, 5, 7]]\n codes = [[0, -1, 1, 1], [0, 2, -1, 2]]\n idx = MultiIndex(levels, codes)\n data = np.arange(8)\n df = DataFrame(data.reshape(4, 2), index=idx)\n\n result = df.unstack(level=level)\n exp_data = np.zeros(18) * np.nan\n exp_data[idces] = data\n cols = MultiIndex.from_product([[0, 1], col_level])\n expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("cols", [["A", "C"], slice(None)])\n def test_unstack_unused_level(self, cols):\n # GH 18562 : unused codes on the unstacked level\n df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])\n\n ind = df.set_index(["A", "B", "C"], drop=False)\n selection = ind.loc[(slice(None), slice(None), "I"), cols]\n result = selection.unstack()\n\n expected = ind.iloc[[0]][cols]\n expected.columns = MultiIndex.from_product(\n [expected.columns, ["I"]], names=[None, "C"]\n )\n expected.index = expected.index.droplevel("C")\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_long_index(self):\n # PH 32624: Error when using a lot of indices to unstack.\n # The error occurred only, if a lot of indices are used.\n df = DataFrame(\n [[1]],\n columns=MultiIndex.from_tuples([[0]], names=["c1"]),\n index=MultiIndex.from_tuples(\n [[0, 0, 1, 0, 0, 0, 1]],\n names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],\n ),\n )\n result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])\n expected = DataFrame(\n [[1]],\n columns=MultiIndex.from_tuples(\n [[0, 0, 1, 0, 0, 0, 1]],\n names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],\n ),\n index=Index([0], name="i1"),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_multi_level_cols(self):\n # PH 24729: Unstack a df with multi level columns\n df = DataFrame(\n [[0.0, 0.0], [0.0, 0.0]],\n columns=MultiIndex.from_tuples(\n [["B", "C"], ["B", "D"]], names=["c1", "c2"]\n ),\n index=MultiIndex.from_tuples(\n [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]\n ),\n )\n assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]\n\n def test_unstack_multi_level_rows_and_cols(self):\n # PH 28306: Unstack df with multi level cols and rows\n df = DataFrame(\n [[1, 2], [3, 4], [-1, -2], [-3, -4]],\n columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),\n index=MultiIndex.from_tuples(\n [\n ["m1", "P3", 222],\n ["m1", "A5", 111],\n ["m2", "P3", 222],\n ["m2", "A5", 111],\n ],\n names=["i1", "i2", "i3"],\n ),\n )\n result = df.unstack(["i3", "i2"])\n expected = df.unstack(["i3"]).unstack(["i2"])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("idx", [("jim", "joe"), ("joe", "jim")])\n @pytest.mark.parametrize("lev", list(range(2)))\n def test_unstack_nan_index1(self, idx, lev):\n # GH7466\n def cast(val):\n val_str = "" if val != val else val\n return f"{val_str:1}"\n\n df = DataFrame(\n {\n "jim": ["a", "b", np.nan, "d"],\n "joe": ["w", "x", "y", "z"],\n "jolie": ["a.w", "b.x", " .y", "d.z"],\n }\n )\n\n left = df.set_index(["jim", "joe"]).unstack()["jolie"]\n right = df.set_index(["joe", "jim"]).unstack()["jolie"].T\n tm.assert_frame_equal(left, right)\n\n mi = df.set_index(list(idx))\n udf = mi.unstack(level=lev)\n assert udf.notna().values.sum() == len(df)\n mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]\n rows, cols = udf["jolie"].notna().values.nonzero()\n for i, j in zip(rows, cols):\n left = sorted(udf["jolie"].iloc[i, j].split("."))\n right = mk_list(udf["jolie"].index[i]) + mk_list(udf["jolie"].columns[j])\n right = sorted(map(cast, right))\n assert left == right\n\n @pytest.mark.parametrize("idx", itertools.permutations(["1st", "2nd", "3rd"]))\n @pytest.mark.parametrize("lev", list(range(3)))\n @pytest.mark.parametrize("col", ["4th", "5th"])\n def test_unstack_nan_index_repeats(self, idx, lev, col):\n def cast(val):\n val_str = "" if val != val else val\n return f"{val_str:1}"\n\n df = DataFrame(\n {\n "1st": ["d"] * 3\n + [np.nan] * 5\n + ["a"] * 2\n + ["c"] * 3\n + ["e"] * 2\n + ["b"] * 5,\n "2nd": ["y"] * 2\n + ["w"] * 3\n + [np.nan] * 3\n + ["z"] * 4\n + [np.nan] * 3\n + ["x"] * 3\n + [np.nan] * 2,\n "3rd": [\n 67,\n 39,\n 53,\n 72,\n 57,\n 80,\n 31,\n 18,\n 11,\n 30,\n 59,\n 50,\n 62,\n 59,\n 76,\n 52,\n 14,\n 53,\n 60,\n 51,\n ],\n }\n )\n\n df["4th"], df["5th"] = (\n df.apply(lambda r: ".".join(map(cast, r)), axis=1),\n df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),\n )\n\n mi = df.set_index(list(idx))\n udf = mi.unstack(level=lev)\n assert udf.notna().values.sum() == 2 * len(df)\n mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]\n rows, cols = udf[col].notna().values.nonzero()\n for i, j in zip(rows, cols):\n left = sorted(udf[col].iloc[i, j].split("."))\n right = mk_list(udf[col].index[i]) + mk_list(udf[col].columns[j])\n right = sorted(map(cast, right))\n assert left == right\n\n def test_unstack_nan_index2(self):\n # GH7403\n df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})\n # Explicit cast to avoid implicit cast when setting to np.nan\n df = df.astype({"B": "float"})\n df.iloc[3, 1] = np.nan\n left = df.set_index(["A", "B"]).unstack(0)\n\n vals = [\n [3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],\n ]\n vals = list(map(list, zip(*vals)))\n idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")\n cols = MultiIndex(\n levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]\n )\n\n right = DataFrame(vals, columns=cols, index=idx)\n tm.assert_frame_equal(left, right)\n\n df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})\n # Explicit cast to avoid implicit cast when setting to np.nan\n df = df.astype({"B": "float"})\n df.iloc[2, 1] = np.nan\n left = df.set_index(["A", "B"]).unstack(0)\n\n vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]\n cols = MultiIndex(\n levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]\n )\n idx = Index([np.nan, 0, 1, 2, 3], name="B")\n right = DataFrame(vals, columns=cols, index=idx)\n tm.assert_frame_equal(left, right)\n\n df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})\n # Explicit cast to avoid implicit cast when setting to np.nan\n df = df.astype({"B": "float"})\n df.iloc[3, 1] = np.nan\n left = df.set_index(["A", "B"]).unstack(0)\n\n vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]\n cols = MultiIndex(\n levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]\n )\n idx = Index([np.nan, 0, 1, 2, 3], name="B")\n right = DataFrame(vals, columns=cols, index=idx)\n tm.assert_frame_equal(left, right)\n\n def test_unstack_nan_index3(self, using_array_manager):\n # GH7401\n df = DataFrame(\n {\n "A": list("aaaaabbbbb"),\n "B": (date_range("2012-01-01", periods=5).tolist() * 2),\n "C": np.arange(10),\n }\n )\n\n df.iloc[3, 1] = np.nan\n left = df.set_index(["A", "B"]).unstack()\n\n vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])\n idx = Index(["a", "b"], name="A")\n cols = MultiIndex(\n levels=[["C"], date_range("2012-01-01", periods=5)],\n codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],\n names=[None, "B"],\n )\n\n right = DataFrame(vals, columns=cols, index=idx)\n if using_array_manager:\n # INFO(ArrayManager) with ArrayManager preserve dtype where possible\n cols = right.columns[[1, 2, 3, 5]]\n right[cols] = right[cols].astype(df["C"].dtype)\n tm.assert_frame_equal(left, right)\n\n def test_unstack_nan_index4(self):\n # GH4862\n vals = [\n ["Hg", np.nan, np.nan, 680585148],\n ["U", 0.0, np.nan, 680585148],\n ["Pb", 7.07e-06, np.nan, 680585148],\n ["Sn", 2.3614e-05, 0.0133, 680607017],\n ["Ag", 0.0, 0.0133, 680607017],\n ["Hg", -0.00015, 0.0133, 680607017],\n ]\n df = DataFrame(\n vals,\n columns=["agent", "change", "dosage", "s_id"],\n index=[17263, 17264, 17265, 17266, 17267, 17268],\n )\n\n left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()\n\n vals = [\n [np.nan, np.nan, 7.07e-06, np.nan, 0.0],\n [0.0, -0.00015, np.nan, 2.3614e-05, np.nan],\n ]\n\n idx = MultiIndex(\n levels=[[680585148, 680607017], [0.0133]],\n codes=[[0, 1], [-1, 0]],\n names=["s_id", "dosage"],\n )\n\n cols = MultiIndex(\n levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],\n codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],\n names=[None, "agent"],\n )\n\n right = DataFrame(vals, columns=cols, index=idx)\n tm.assert_frame_equal(left, right)\n\n left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])\n tm.assert_frame_equal(left.unstack(), right)\n\n def test_unstack_nan_index5(self):\n # GH9497 - multiple unstack with nulls\n df = DataFrame(\n {\n "1st": [1, 2, 1, 2, 1, 2],\n "2nd": date_range("2014-02-01", periods=6, freq="D"),\n "jim": 100 + np.arange(6),\n "joe": (np.random.default_rng(2).standard_normal(6) * 10).round(2),\n }\n )\n\n df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")\n df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan\n df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan\n\n left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])\n assert left.notna().values.sum() == 2 * len(df)\n\n for col in ["jim", "joe"]:\n for _, r in df.iterrows():\n key = r["1st"], (col, r["2nd"], r["3rd"])\n assert r[col] == left.loc[key]\n\n def test_stack_datetime_column_multiIndex(self, future_stack):\n # GH 8039\n t = datetime(2014, 1, 1)\n df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))\n warn = None if future_stack else FutureWarning\n msg = "The previous implementation of stack is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n result = df.stack(future_stack=future_stack)\n\n eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)])\n ecols = MultiIndex.from_tuples([(t, "A")])\n expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n @pytest.mark.parametrize(\n "multiindex_columns",\n [\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3],\n [0, 1, 2, 4],\n [0, 1, 2],\n [1, 2, 3],\n [2, 3, 4],\n [0, 1],\n [0, 2],\n [0, 3],\n [0],\n [2],\n [4],\n [4, 3, 2, 1, 0],\n [3, 2, 1, 0],\n [4, 2, 1, 0],\n [2, 1, 0],\n [3, 2, 1],\n [4, 3, 2],\n [1, 0],\n [2, 0],\n [3, 0],\n ],\n )\n @pytest.mark.parametrize("level", (-1, 0, 1, [0, 1], [1, 0]))\n def test_stack_partial_multiIndex(self, multiindex_columns, level, future_stack):\n # GH 8844\n dropna = False if not future_stack else lib.no_default\n full_multiindex = MultiIndex.from_tuples(\n [("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],\n names=["Upper", "Lower"],\n )\n multiindex = full_multiindex[multiindex_columns]\n df = DataFrame(\n np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),\n columns=multiindex,\n )\n result = df.stack(level=level, dropna=dropna, future_stack=future_stack)\n\n if isinstance(level, int) and not future_stack:\n # Stacking a single level should not make any all-NaN rows,\n # so df.stack(level=level, dropna=False) should be the same\n # as df.stack(level=level, dropna=True).\n expected = df.stack(level=level, dropna=True, future_stack=future_stack)\n if isinstance(expected, Series):\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_frame_equal(result, expected)\n\n df.columns = MultiIndex.from_tuples(\n df.columns.to_numpy(), names=df.columns.names\n )\n expected = df.stack(level=level, dropna=dropna, future_stack=future_stack)\n if isinstance(expected, Series):\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_full_multiIndex(self, future_stack):\n # GH 8844\n full_multiindex = MultiIndex.from_tuples(\n [("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],\n names=["Upper", "Lower"],\n )\n df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])\n dropna = False if not future_stack else lib.no_default\n result = df.stack(dropna=dropna, future_stack=future_stack)\n expected = DataFrame(\n [[0, 2], [1, np.nan], [3, 5], [4, np.nan]],\n index=MultiIndex(\n levels=[[0, 1], ["u", "x", "y", "z"]],\n codes=[[0, 0, 1, 1], [1, 3, 1, 3]],\n names=[None, "Lower"],\n ),\n columns=Index(["B", "C"], name="Upper"),\n )\n expected["B"] = expected["B"].astype(df.dtypes.iloc[0])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("ordered", [False, True])\n def test_stack_preserve_categorical_dtype(self, ordered, future_stack):\n # GH13854\n cidx = pd.CategoricalIndex(list("yxz"), categories=list("xyz"), ordered=ordered)\n df = DataFrame([[10, 11, 12]], columns=cidx)\n result = df.stack(future_stack=future_stack)\n\n # `MultiIndex.from_product` preserves categorical dtype -\n # it's tested elsewhere.\n midx = MultiIndex.from_product([df.index, cidx])\n expected = Series([10, 11, 12], index=midx)\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n @pytest.mark.parametrize("ordered", [False, True])\n @pytest.mark.parametrize(\n "labels,data",\n [\n (list("xyz"), [10, 11, 12, 13, 14, 15]),\n (list("zyx"), [14, 15, 12, 13, 10, 11]),\n ],\n )\n def test_stack_multi_preserve_categorical_dtype(\n self, ordered, labels, data, future_stack\n ):\n # GH-36991\n cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)\n cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)\n midx = MultiIndex.from_product([cidx, cidx2])\n df = DataFrame([sorted(data)], columns=midx)\n result = df.stack([0, 1], future_stack=future_stack)\n\n labels = labels if future_stack else sorted(labels)\n s_cidx = pd.CategoricalIndex(labels, ordered=ordered)\n expected_data = sorted(data) if future_stack else data\n expected = Series(\n expected_data, index=MultiIndex.from_product([[0], s_cidx, cidx2])\n )\n\n tm.assert_series_equal(result, expected)\n\n def test_stack_preserve_categorical_dtype_values(self, future_stack):\n # GH-23077\n cat = pd.Categorical(["a", "a", "b", "c"])\n df = DataFrame({"A": cat, "B": cat})\n result = df.stack(future_stack=future_stack)\n index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])\n expected = Series(\n pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")\n @pytest.mark.parametrize(\n "index, columns",\n [\n ([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])),\n ([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),\n ([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),\n ],\n )\n def test_stack_multi_columns_non_unique_index(self, index, columns, future_stack):\n # GH-28301\n\n df = DataFrame(index=index, columns=columns).fillna(1)\n stacked = df.stack(future_stack=future_stack)\n new_index = MultiIndex.from_tuples(stacked.index.to_numpy())\n expected = DataFrame(\n stacked.to_numpy(), index=new_index, columns=stacked.columns\n )\n tm.assert_frame_equal(stacked, expected)\n stacked_codes = np.asarray(stacked.index.codes)\n expected_codes = np.asarray(new_index.codes)\n tm.assert_numpy_array_equal(stacked_codes, expected_codes)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n @pytest.mark.parametrize(\n "vals1, vals2, dtype1, dtype2, expected_dtype",\n [\n ([1, 2], [3.0, 4.0], "Int64", "Float64", "Float64"),\n ([1, 2], ["foo", "bar"], "Int64", "string", "object"),\n ],\n )\n def test_stack_multi_columns_mixed_extension_types(\n self, vals1, vals2, dtype1, dtype2, expected_dtype, future_stack\n ):\n # GH45740\n df = DataFrame(\n {\n ("A", 1): Series(vals1, dtype=dtype1),\n ("A", 2): Series(vals2, dtype=dtype2),\n }\n )\n result = df.stack(future_stack=future_stack)\n expected = (\n df.astype(object).stack(future_stack=future_stack).astype(expected_dtype)\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("level", [0, 1])\n def test_unstack_mixed_extension_types(self, level):\n index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"])\n df = DataFrame(\n {\n "A": pd.array([0, 1, None], dtype="Int64"),\n "B": pd.Categorical(["a", "a", "b"]),\n },\n index=index,\n )\n\n result = df.unstack(level=level)\n expected = df.astype(object).unstack(level=level)\n if level == 0:\n expected[("A", "B")] = expected[("A", "B")].fillna(pd.NA)\n else:\n expected[("A", 0)] = expected[("A", 0)].fillna(pd.NA)\n\n expected_dtypes = Series(\n [df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns\n )\n tm.assert_series_equal(result.dtypes, expected_dtypes)\n tm.assert_frame_equal(result.astype(object), expected)\n\n @pytest.mark.parametrize("level", [0, "baz"])\n def test_unstack_swaplevel_sortlevel(self, level):\n # GH 20994\n mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"])\n df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])\n df.columns.name = "foo"\n\n expected = DataFrame(\n [[3, 1, 2, 0]],\n columns=MultiIndex.from_tuples(\n [("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]\n ),\n )\n expected.index.name = "bar"\n\n result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", ["float64", "Float64"])\ndef test_unstack_sort_false(frame_or_series, dtype):\n # GH 15105\n index = MultiIndex.from_tuples(\n [("two", "z", "b"), ("two", "y", "a"), ("one", "z", "b"), ("one", "y", "a")]\n )\n obj = frame_or_series(np.arange(1.0, 5.0), index=index, dtype=dtype)\n result = obj.unstack(level=-1, sort=False)\n\n if frame_or_series is DataFrame:\n expected_columns = MultiIndex.from_tuples([(0, "b"), (0, "a")])\n else:\n expected_columns = ["b", "a"]\n expected = DataFrame(\n [[1.0, np.nan], [np.nan, 2.0], [3.0, np.nan], [np.nan, 4.0]],\n columns=expected_columns,\n index=MultiIndex.from_tuples(\n [("two", "z"), ("two", "y"), ("one", "z"), ("one", "y")]\n ),\n dtype=dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n result = obj.unstack(level=[1, 2], sort=False)\n\n if frame_or_series is DataFrame:\n expected_columns = MultiIndex.from_tuples([(0, "z", "b"), (0, "y", "a")])\n else:\n expected_columns = MultiIndex.from_tuples([("z", "b"), ("y", "a")])\n expected = DataFrame(\n [[1.0, 2.0], [3.0, 4.0]],\n index=["two", "one"],\n columns=expected_columns,\n dtype=dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_unstack_fill_frame_object():\n # GH12815 Test unstacking with object.\n data = Series(["a", "b", "c", "a"], dtype="object")\n data.index = MultiIndex.from_tuples(\n [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]\n )\n\n # By default missing values will be NaN\n result = data.unstack()\n expected = DataFrame(\n {"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]},\n index=list("xyz"),\n dtype=object,\n )\n tm.assert_frame_equal(result, expected)\n\n # Fill with any value replaces missing values as expected\n result = data.unstack(fill_value="d")\n expected = DataFrame(\n {"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz"), dtype=object\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_unstack_timezone_aware_values():\n # GH 18338\n df = DataFrame(\n {\n "timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],\n "a": ["a"],\n "b": ["b"],\n "c": ["c"],\n },\n columns=["timestamp", "a", "b", "c"],\n )\n result = df.set_index(["a", "b"]).unstack()\n expected = DataFrame(\n [[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],\n index=Index(["a"], name="a"),\n columns=MultiIndex(\n levels=[["timestamp", "c"], ["b"]],\n codes=[[0, 1], [0, 0]],\n names=[None, "b"],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_stack_timezone_aware_values(future_stack):\n # GH 19420\n ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York")\n df = DataFrame({"A": ts}, index=["a", "b", "c"])\n result = df.stack(future_stack=future_stack)\n expected = Series(\n ts,\n index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]),\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")\n@pytest.mark.parametrize("dropna", [True, False, lib.no_default])\ndef test_stack_empty_frame(dropna, future_stack):\n # GH 36113\n levels = [np.array([], dtype=np.int64), np.array([], dtype=np.int64)]\n expected = Series(dtype=np.float64, index=MultiIndex(levels=levels, codes=[[], []]))\n if future_stack and dropna is not lib.no_default:\n with pytest.raises(ValueError, match="dropna must be unspecified"):\n DataFrame(dtype=np.float64).stack(dropna=dropna, future_stack=future_stack)\n else:\n result = DataFrame(dtype=np.float64).stack(\n dropna=dropna, future_stack=future_stack\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")\n@pytest.mark.parametrize("dropna", [True, False, lib.no_default])\n@pytest.mark.parametrize("fill_value", [None, 0])\ndef test_stack_unstack_empty_frame(dropna, fill_value, future_stack):\n # GH 36113\n if future_stack and dropna is not lib.no_default:\n with pytest.raises(ValueError, match="dropna must be unspecified"):\n DataFrame(dtype=np.int64).stack(\n dropna=dropna, future_stack=future_stack\n ).unstack(fill_value=fill_value)\n else:\n result = (\n DataFrame(dtype=np.int64)\n .stack(dropna=dropna, future_stack=future_stack)\n .unstack(fill_value=fill_value)\n )\n expected = DataFrame(dtype=np.int64)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_unstack_single_index_series():\n # GH 36113\n msg = r"index must be a MultiIndex to unstack.*"\n with pytest.raises(ValueError, match=msg):\n Series(dtype=np.int64).unstack()\n\n\ndef test_unstacking_multi_index_df():\n # see gh-30740\n df = DataFrame(\n {\n "name": ["Alice", "Bob"],\n "score": [9.5, 8],\n "employed": [False, True],\n "kids": [0, 0],\n "gender": ["female", "male"],\n }\n )\n df = df.set_index(["name", "employed", "kids", "gender"])\n df = df.unstack(["gender"], fill_value=0)\n expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0)\n result = df.unstack(["employed", "kids"], fill_value=0)\n expected = DataFrame(\n [[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]],\n index=Index(["Alice", "Bob"], name="name"),\n columns=MultiIndex.from_tuples(\n [\n ("score", "female", False, 0),\n ("score", "female", True, 0),\n ("score", "male", False, 0),\n ("score", "male", True, 0),\n ],\n names=[None, "gender", "employed", "kids"],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")\ndef test_stack_positional_level_duplicate_column_names(future_stack):\n # https://github.com/pandas-dev/pandas/issues/36353\n columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"])\n df = DataFrame([[1, 1, 1, 1]], columns=columns)\n result = df.stack(0, future_stack=future_stack)\n\n new_columns = Index(["y", "z"], name="a")\n new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"])\n expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_unstack_non_slice_like_blocks(using_array_manager):\n # Case where the mgr_locs of a DataFrame's underlying blocks are not slice-like\n\n mi = MultiIndex.from_product([range(5), ["A", "B", "C"]])\n df = DataFrame(\n {\n 0: np.random.default_rng(2).standard_normal(15),\n 1: np.random.default_rng(2).standard_normal(15).astype(np.int64),\n 2: np.random.default_rng(2).standard_normal(15),\n 3: np.random.default_rng(2).standard_normal(15),\n },\n index=mi,\n )\n if not using_array_manager:\n assert any(not x.mgr_locs.is_slice_like for x in df._mgr.blocks)\n\n res = df.unstack()\n\n expected = pd.concat([df[n].unstack() for n in range(4)], keys=range(4), axis=1)\n tm.assert_frame_equal(res, expected)\n\n\n@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")\ndef test_stack_sort_false(future_stack):\n # GH 15105\n data = [[1, 2, 3.0, 4.0], [2, 3, 4.0, 5.0], [3, 4, np.nan, np.nan]]\n df = DataFrame(\n data,\n columns=MultiIndex(\n levels=[["B", "A"], ["x", "y"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]\n ),\n )\n kwargs = {} if future_stack else {"sort": False}\n result = df.stack(level=0, future_stack=future_stack, **kwargs)\n if future_stack:\n expected = DataFrame(\n {\n "x": [1.0, 3.0, 2.0, 4.0, 3.0, np.nan],\n "y": [2.0, 4.0, 3.0, 5.0, 4.0, np.nan],\n },\n index=MultiIndex.from_arrays(\n [[0, 0, 1, 1, 2, 2], ["B", "A", "B", "A", "B", "A"]]\n ),\n )\n else:\n expected = DataFrame(\n {"x": [1.0, 3.0, 2.0, 4.0, 3.0], "y": [2.0, 4.0, 3.0, 5.0, 4.0]},\n index=MultiIndex.from_arrays([[0, 0, 1, 1, 2], ["B", "A", "B", "A", "B"]]),\n )\n tm.assert_frame_equal(result, expected)\n\n # Codes sorted in this call\n df = DataFrame(\n data,\n columns=MultiIndex.from_arrays([["B", "B", "A", "A"], ["x", "y", "x", "y"]]),\n )\n kwargs = {} if future_stack else {"sort": False}\n result = df.stack(level=0, future_stack=future_stack, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")\ndef test_stack_sort_false_multi_level(future_stack):\n # GH 15105\n idx = MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])\n df = DataFrame([[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=idx)\n kwargs = {} if future_stack else {"sort": False}\n result = df.stack([0, 1], future_stack=future_stack, **kwargs)\n expected_index = MultiIndex.from_tuples(\n [\n ("cat", "weight", "kg"),\n ("cat", "height", "m"),\n ("dog", "weight", "kg"),\n ("dog", "height", "m"),\n ]\n )\n expected = Series([1.0, 2.0, 3.0, 4.0], index=expected_index)\n tm.assert_series_equal(result, expected)\n\n\nclass TestStackUnstackMultiLevel:\n def test_unstack(self, multiindex_year_month_day_dataframe_random_data):\n # just check that it works for now\n ymd = multiindex_year_month_day_dataframe_random_data\n\n unstacked = ymd.unstack()\n unstacked.unstack()\n\n # test that ints work\n ymd.astype(int).unstack()\n\n # test that int32 work\n ymd.astype(np.int32).unstack()\n\n @pytest.mark.parametrize(\n "result_rows,result_columns,index_product,expected_row",\n [\n (\n [[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]],\n ["ix1", "ix2", "col1", "col2", "col3", "col4"],\n 2,\n [None, None, 30.0, None],\n ),\n (\n [[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]],\n ["ix1", "ix2", "col1", "col2", "col3"],\n 2,\n [None, None, 30.0],\n ),\n (\n [[1, 1, None, None, 30.0], [2, None, None, None, 30.0]],\n ["ix1", "ix2", "col1", "col2", "col3"],\n None,\n [None, None, 30.0],\n ),\n ],\n )\n def test_unstack_partial(\n self, result_rows, result_columns, index_product, expected_row\n ):\n # check for regressions on this issue:\n # https://github.com/pandas-dev/pandas/issues/19351\n # make sure DataFrame.unstack() works when its run on a subset of the DataFrame\n # and the Index levels contain values that are not present in the subset\n result = DataFrame(result_rows, columns=result_columns).set_index(\n ["ix1", "ix2"]\n )\n result = result.iloc[1:2].unstack("ix2")\n expected = DataFrame(\n [expected_row],\n columns=MultiIndex.from_product(\n [result_columns[2:], [index_product]], names=[None, "ix2"]\n ),\n index=Index([2], name="ix1"),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_multiple_no_empty_columns(self):\n index = MultiIndex.from_tuples(\n [(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)]\n )\n\n s = Series(np.random.default_rng(2).standard_normal(4), index=index)\n\n unstacked = s.unstack([1, 2])\n expected = unstacked.dropna(axis=1, how="all")\n tm.assert_frame_equal(unstacked, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack(self, multiindex_year_month_day_dataframe_random_data, future_stack):\n ymd = multiindex_year_month_day_dataframe_random_data\n\n # regular roundtrip\n unstacked = ymd.unstack()\n restacked = unstacked.stack(future_stack=future_stack)\n if future_stack:\n # NA values in unstacked persist to restacked in version 3\n restacked = restacked.dropna(how="all")\n tm.assert_frame_equal(restacked, ymd)\n\n unlexsorted = ymd.sort_index(level=2)\n\n unstacked = unlexsorted.unstack(2)\n restacked = unstacked.stack(future_stack=future_stack)\n if future_stack:\n # NA values in unstacked persist to restacked in version 3\n restacked = restacked.dropna(how="all")\n tm.assert_frame_equal(restacked.sort_index(level=0), ymd)\n\n unlexsorted = unlexsorted[::-1]\n unstacked = unlexsorted.unstack(1)\n restacked = unstacked.stack(future_stack=future_stack).swaplevel(1, 2)\n if future_stack:\n # NA values in unstacked persist to restacked in version 3\n restacked = restacked.dropna(how="all")\n tm.assert_frame_equal(restacked.sort_index(level=0), ymd)\n\n unlexsorted = unlexsorted.swaplevel(0, 1)\n unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)\n restacked = unstacked.stack(0, future_stack=future_stack).swaplevel(1, 2)\n if future_stack:\n # NA values in unstacked persist to restacked in version 3\n restacked = restacked.dropna(how="all")\n tm.assert_frame_equal(restacked.sort_index(level=0), ymd)\n\n # columns unsorted\n unstacked = ymd.unstack()\n restacked = unstacked.stack(future_stack=future_stack)\n if future_stack:\n # NA values in unstacked persist to restacked in version 3\n restacked = restacked.dropna(how="all")\n tm.assert_frame_equal(restacked, ymd)\n\n # more than 2 levels in the columns\n unstacked = ymd.unstack(1).unstack(1)\n\n result = unstacked.stack(1, future_stack=future_stack)\n expected = ymd.unstack()\n tm.assert_frame_equal(result, expected)\n\n result = unstacked.stack(2, future_stack=future_stack)\n expected = ymd.unstack(1)\n tm.assert_frame_equal(result, expected)\n\n result = unstacked.stack(0, future_stack=future_stack)\n expected = ymd.stack(future_stack=future_stack).unstack(1).unstack(1)\n tm.assert_frame_equal(result, expected)\n\n # not all levels present in each echelon\n unstacked = ymd.unstack(2).loc[:, ::3]\n stacked = unstacked.stack(future_stack=future_stack).stack(\n future_stack=future_stack\n )\n ymd_stacked = ymd.stack(future_stack=future_stack)\n if future_stack:\n # NA values in unstacked persist to restacked in version 3\n stacked = stacked.dropna(how="all")\n ymd_stacked = ymd_stacked.dropna(how="all")\n tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))\n\n # stack with negative number\n result = ymd.unstack(0).stack(-2, future_stack=future_stack)\n expected = ymd.unstack(0).stack(0, future_stack=future_stack)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "idx, columns, exp_idx",\n [\n [\n list("abab"),\n ["1st", "2nd", "1st"],\n MultiIndex(\n levels=[["a", "b"], ["1st", "2nd"]],\n codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)],\n ),\n ],\n [\n MultiIndex.from_tuples((("a", 2), ("b", 1), ("a", 1), ("b", 2))),\n ["1st", "2nd", "1st"],\n MultiIndex(\n levels=[["a", "b"], [1, 2], ["1st", "2nd"]],\n codes=[\n np.tile(np.arange(2).repeat(3), 2),\n np.repeat([1, 0, 1], [3, 6, 3]),\n np.tile([0, 1, 0], 4),\n ],\n ),\n ],\n ],\n )\n def test_stack_duplicate_index(self, idx, columns, exp_idx, future_stack):\n # GH10417\n df = DataFrame(\n np.arange(12).reshape(4, 3),\n index=idx,\n columns=columns,\n )\n if future_stack:\n msg = "Columns with duplicate values are not supported in stack"\n with pytest.raises(ValueError, match=msg):\n df.stack(future_stack=future_stack)\n else:\n result = df.stack(future_stack=future_stack)\n expected = Series(np.arange(12), index=exp_idx)\n tm.assert_series_equal(result, expected)\n assert result.index.is_unique is False\n li, ri = result.index, expected.index\n tm.assert_index_equal(li, ri)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_unstack_odd_failure(self, future_stack):\n mi = MultiIndex.from_arrays(\n [\n ["Fri"] * 4 + ["Sat"] * 2 + ["Sun"] * 2 + ["Thu"] * 3,\n ["Dinner"] * 2 + ["Lunch"] * 2 + ["Dinner"] * 5 + ["Lunch"] * 2,\n ["No", "Yes"] * 4 + ["No", "No", "Yes"],\n ],\n names=["day", "time", "smoker"],\n )\n df = DataFrame(\n {\n "sum": np.arange(11, dtype="float64"),\n "len": np.arange(11, dtype="float64"),\n },\n index=mi,\n )\n # it works, #2100\n result = df.unstack(2)\n\n recons = result.stack(future_stack=future_stack)\n if future_stack:\n # NA values in unstacked persist to restacked in version 3\n recons = recons.dropna(how="all")\n tm.assert_frame_equal(recons, df)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_mixed_dtype(self, multiindex_dataframe_random_data, future_stack):\n frame = multiindex_dataframe_random_data\n\n df = frame.T\n df["foo", "four"] = "foo"\n df = df.sort_index(level=1, axis=1)\n\n stacked = df.stack(future_stack=future_stack)\n result = df["foo"].stack(future_stack=future_stack).sort_index()\n tm.assert_series_equal(stacked["foo"], result, check_names=False)\n assert result.name is None\n assert stacked["bar"].dtype == np.float64\n\n def test_unstack_bug(self, future_stack):\n df = DataFrame(\n {\n "state": ["naive", "naive", "naive", "active", "active", "active"],\n "exp": ["a", "b", "b", "b", "a", "a"],\n "barcode": [1, 2, 3, 4, 1, 3],\n "v": ["hi", "hi", "bye", "bye", "bye", "peace"],\n "extra": np.arange(6.0),\n }\n )\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby(["state", "exp", "barcode", "v"]).apply(len)\n\n unstacked = result.unstack()\n restacked = unstacked.stack(future_stack=future_stack)\n tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float))\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_unstack_preserve_names(\n self, multiindex_dataframe_random_data, future_stack\n ):\n frame = multiindex_dataframe_random_data\n\n unstacked = frame.unstack()\n assert unstacked.index.name == "first"\n assert unstacked.columns.names == ["exp", "second"]\n\n restacked = unstacked.stack(future_stack=future_stack)\n assert restacked.index.names == frame.index.names\n\n @pytest.mark.parametrize("method", ["stack", "unstack"])\n def test_stack_unstack_wrong_level_name(\n self, method, multiindex_dataframe_random_data, future_stack\n ):\n # GH 18303 - wrong level name should raise\n frame = multiindex_dataframe_random_data\n\n # A DataFrame with flat axes:\n df = frame.loc["foo"]\n\n kwargs = {"future_stack": future_stack} if method == "stack" else {}\n with pytest.raises(KeyError, match="does not match index name"):\n getattr(df, method)("mistake", **kwargs)\n\n if method == "unstack":\n # Same on a Series:\n s = df.iloc[:, 0]\n with pytest.raises(KeyError, match="does not match index name"):\n getattr(s, method)("mistake", **kwargs)\n\n def test_unstack_level_name(self, multiindex_dataframe_random_data):\n frame = multiindex_dataframe_random_data\n\n result = frame.unstack("second")\n expected = frame.unstack(level=1)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_level_name(self, multiindex_dataframe_random_data, future_stack):\n frame = multiindex_dataframe_random_data\n\n unstacked = frame.unstack("second")\n result = unstacked.stack("exp", future_stack=future_stack)\n expected = frame.unstack().stack(0, future_stack=future_stack)\n tm.assert_frame_equal(result, expected)\n\n result = frame.stack("exp", future_stack=future_stack)\n expected = frame.stack(future_stack=future_stack)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_unstack_multiple(\n self, multiindex_year_month_day_dataframe_random_data, future_stack\n ):\n ymd = multiindex_year_month_day_dataframe_random_data\n\n unstacked = ymd.unstack(["year", "month"])\n expected = ymd.unstack("year").unstack("month")\n tm.assert_frame_equal(unstacked, expected)\n assert unstacked.columns.names == expected.columns.names\n\n # series\n s = ymd["A"]\n s_unstacked = s.unstack(["year", "month"])\n tm.assert_frame_equal(s_unstacked, expected["A"])\n\n restacked = unstacked.stack(["year", "month"], future_stack=future_stack)\n if future_stack:\n # NA values in unstacked persist to restacked in version 3\n restacked = restacked.dropna(how="all")\n restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)\n restacked = restacked.sort_index(level=0)\n\n tm.assert_frame_equal(restacked, ymd)\n assert restacked.index.names == ymd.index.names\n\n # GH #451\n unstacked = ymd.unstack([1, 2])\n expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all")\n tm.assert_frame_equal(unstacked, expected)\n\n unstacked = ymd.unstack([2, 1])\n expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all")\n tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_names_and_numbers(\n self, multiindex_year_month_day_dataframe_random_data, future_stack\n ):\n ymd = multiindex_year_month_day_dataframe_random_data\n\n unstacked = ymd.unstack(["year", "month"])\n\n # Can't use mixture of names and numbers to stack\n with pytest.raises(ValueError, match="level should contain"):\n unstacked.stack([0, "month"], future_stack=future_stack)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_multiple_out_of_bounds(\n self, multiindex_year_month_day_dataframe_random_data, future_stack\n ):\n # nlevels == 3\n ymd = multiindex_year_month_day_dataframe_random_data\n\n unstacked = ymd.unstack(["year", "month"])\n\n with pytest.raises(IndexError, match="Too many levels"):\n unstacked.stack([2, 3], future_stack=future_stack)\n with pytest.raises(IndexError, match="not a valid level number"):\n unstacked.stack([-4, -3], future_stack=future_stack)\n\n def test_unstack_period_series(self):\n # GH4342\n idx1 = pd.PeriodIndex(\n ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],\n freq="M",\n name="period",\n )\n idx2 = Index(["A", "B"] * 3, name="str")\n value = [1, 2, 3, 4, 5, 6]\n\n idx = MultiIndex.from_arrays([idx1, idx2])\n s = Series(value, index=idx)\n\n result1 = s.unstack()\n result2 = s.unstack(level=1)\n result3 = s.unstack(level=0)\n\n e_idx = pd.PeriodIndex(\n ["2013-01", "2013-02", "2013-03"], freq="M", name="period"\n )\n expected = DataFrame(\n {"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"]\n )\n expected.columns.name = "str"\n\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n tm.assert_frame_equal(result3, expected.T)\n\n idx1 = pd.PeriodIndex(\n ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],\n freq="M",\n name="period1",\n )\n\n idx2 = pd.PeriodIndex(\n ["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"],\n freq="M",\n name="period2",\n )\n idx = MultiIndex.from_arrays([idx1, idx2])\n s = Series(value, index=idx)\n\n result1 = s.unstack()\n result2 = s.unstack(level=1)\n result3 = s.unstack(level=0)\n\n e_idx = pd.PeriodIndex(\n ["2013-01", "2013-02", "2013-03"], freq="M", name="period1"\n )\n e_cols = pd.PeriodIndex(\n ["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"],\n freq="M",\n name="period2",\n )\n expected = DataFrame(\n [\n [np.nan, np.nan, np.nan, np.nan, 2, 1],\n [np.nan, np.nan, 4, 3, np.nan, np.nan],\n [6, 5, np.nan, np.nan, np.nan, np.nan],\n ],\n index=e_idx,\n columns=e_cols,\n )\n\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n tm.assert_frame_equal(result3, expected.T)\n\n def test_unstack_period_frame(self):\n # GH4342\n idx1 = pd.PeriodIndex(\n ["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"],\n freq="M",\n name="period1",\n )\n idx2 = pd.PeriodIndex(\n ["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"],\n freq="M",\n name="period2",\n )\n value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]}\n idx = MultiIndex.from_arrays([idx1, idx2])\n df = DataFrame(value, index=idx)\n\n result1 = df.unstack()\n result2 = df.unstack(level=1)\n result3 = df.unstack(level=0)\n\n e_1 = pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1")\n e_2 = pd.PeriodIndex(\n ["2013-10", "2013-12", "2014-02", "2013-10", "2013-12", "2014-02"],\n freq="M",\n name="period2",\n )\n e_cols = MultiIndex.from_arrays(["A A A B B B".split(), e_2])\n expected = DataFrame(\n [[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], index=e_1, columns=e_cols\n )\n\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n\n e_1 = pd.PeriodIndex(\n ["2014-01", "2014-02", "2014-01", "2014-02"], freq="M", name="period1"\n )\n e_2 = pd.PeriodIndex(\n ["2013-10", "2013-12", "2014-02"], freq="M", name="period2"\n )\n e_cols = MultiIndex.from_arrays(["A A B B".split(), e_1])\n expected = DataFrame(\n [[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], index=e_2, columns=e_cols\n )\n\n tm.assert_frame_equal(result3, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_multiple_bug(self, future_stack, using_infer_string):\n # bug when some uniques are not present in the data GH#3170\n id_col = ([1] * 3) + ([2] * 3)\n name = (["a"] * 3) + (["b"] * 3)\n date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2)\n var1 = np.random.default_rng(2).integers(0, 100, 6)\n df = DataFrame({"ID": id_col, "NAME": name, "DATE": date, "VAR1": var1})\n\n multi = df.set_index(["DATE", "ID"])\n multi.columns.name = "Params"\n unst = multi.unstack("ID")\n msg = re.escape("agg function failed [how->mean,dtype->")\n if using_infer_string:\n msg = "dtype 'str' does not support operation 'mean'"\n with pytest.raises(TypeError, match=msg):\n unst.resample("W-THU").mean()\n down = unst.resample("W-THU").mean(numeric_only=True)\n rs = down.stack("ID", future_stack=future_stack)\n xp = (\n unst.loc[:, ["VAR1"]]\n .resample("W-THU")\n .mean()\n .stack("ID", future_stack=future_stack)\n )\n xp.columns.name = "Params"\n tm.assert_frame_equal(rs, xp)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_dropna(self, future_stack):\n # GH#3997\n df = DataFrame({"A": ["a1", "a2"], "B": ["b1", "b2"], "C": [1, 1]})\n df = df.set_index(["A", "B"])\n\n dropna = False if not future_stack else lib.no_default\n stacked = df.unstack().stack(dropna=dropna, future_stack=future_stack)\n assert len(stacked) > len(stacked.dropna())\n\n if future_stack:\n with pytest.raises(ValueError, match="dropna must be unspecified"):\n df.unstack().stack(dropna=True, future_stack=future_stack)\n else:\n stacked = df.unstack().stack(dropna=True, future_stack=future_stack)\n tm.assert_frame_equal(stacked, stacked.dropna())\n\n def test_unstack_multiple_hierarchical(self, future_stack):\n df = DataFrame(\n index=[\n [0, 0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 0, 0, 1, 1],\n [0, 1, 0, 1, 0, 1, 0, 1],\n ],\n columns=[[0, 0, 1, 1], [0, 1, 0, 1]],\n )\n\n df.index.names = ["a", "b", "c"]\n df.columns.names = ["d", "e"]\n\n # it works!\n df.unstack(["b", "c"])\n\n def test_unstack_sparse_keyspace(self):\n # memory problems with naive impl GH#2278\n # Generate Long File & Test Pivot\n NUM_ROWS = 1000\n\n df = DataFrame(\n {\n "A": np.random.default_rng(2).integers(100, size=NUM_ROWS),\n "B": np.random.default_rng(3).integers(300, size=NUM_ROWS),\n "C": np.random.default_rng(4).integers(-7, 7, size=NUM_ROWS),\n "D": np.random.default_rng(5).integers(-19, 19, size=NUM_ROWS),\n "E": np.random.default_rng(6).integers(3000, size=NUM_ROWS),\n "F": np.random.default_rng(7).standard_normal(NUM_ROWS),\n }\n )\n\n idf = df.set_index(["A", "B", "C", "D", "E"])\n\n # it works! is sufficient\n idf.unstack("E")\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_unstack_unobserved_keys(self, future_stack):\n # related to GH#2278 refactoring\n levels = [[0, 1], [0, 1, 2, 3]]\n codes = [[0, 0, 1, 1], [0, 2, 0, 2]]\n\n index = MultiIndex(levels, codes)\n\n df = DataFrame(np.random.default_rng(2).standard_normal((4, 2)), index=index)\n\n result = df.unstack()\n assert len(result.columns) == 4\n\n recons = result.stack(future_stack=future_stack)\n tm.assert_frame_equal(recons, df)\n\n @pytest.mark.slow\n def test_unstack_number_of_levels_larger_than_int32(self, monkeypatch):\n # GH#20601\n # GH 26314: Change ValueError to PerformanceWarning\n\n class MockUnstacker(reshape_lib._Unstacker):\n def __init__(self, *args, **kwargs) -> None:\n # __init__ will raise the warning\n super().__init__(*args, **kwargs)\n raise Exception("Don't compute final result.")\n\n with monkeypatch.context() as m:\n m.setattr(reshape_lib, "_Unstacker", MockUnstacker)\n df = DataFrame(\n np.zeros((2**16, 2)),\n index=[np.arange(2**16), np.arange(2**16)],\n )\n msg = "The following operation may generate"\n with tm.assert_produces_warning(PerformanceWarning, match=msg):\n with pytest.raises(Exception, match="Don't compute final result."):\n df.unstack()\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n @pytest.mark.parametrize(\n "levels",\n itertools.chain.from_iterable(\n itertools.product(itertools.permutations([0, 1, 2], width), repeat=2)\n for width in [2, 3]\n ),\n )\n @pytest.mark.parametrize("stack_lev", range(2))\n @pytest.mark.parametrize("sort", [True, False])\n def test_stack_order_with_unsorted_levels(\n self, levels, stack_lev, sort, future_stack\n ):\n # GH#16323\n # deep check for 1-row case\n columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]])\n df = DataFrame(columns=columns, data=[range(4)])\n kwargs = {} if future_stack else {"sort": sort}\n df_stacked = df.stack(stack_lev, future_stack=future_stack, **kwargs)\n for row in df.index:\n for col in df.columns:\n expected = df.loc[row, col]\n result_row = row, col[stack_lev]\n result_col = col[1 - stack_lev]\n result = df_stacked.loc[result_row, result_col]\n assert result == expected\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_order_with_unsorted_levels_multi_row(self, future_stack):\n # GH#16323\n\n # check multi-row case\n mi = MultiIndex(\n levels=[["A", "C", "B"], ["B", "A", "C"]],\n codes=[np.repeat(range(3), 3), np.tile(range(3), 3)],\n )\n df = DataFrame(\n columns=mi, index=range(5), data=np.arange(5 * len(mi)).reshape(5, -1)\n )\n assert all(\n df.loc[row, col]\n == df.stack(0, future_stack=future_stack).loc[(row, col[0]), col[1]]\n for row in df.index\n for col in df.columns\n )\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_order_with_unsorted_levels_multi_row_2(self, future_stack):\n # GH#53636\n levels = ((0, 1), (1, 0))\n stack_lev = 1\n columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]])\n df = DataFrame(columns=columns, data=[range(4)], index=[1, 0, 2, 3])\n kwargs = {} if future_stack else {"sort": True}\n result = df.stack(stack_lev, future_stack=future_stack, **kwargs)\n expected_index = MultiIndex(\n levels=[[0, 1, 2, 3], [0, 1]],\n codes=[[1, 1, 0, 0, 2, 2, 3, 3], [1, 0, 1, 0, 1, 0, 1, 0]],\n )\n expected = DataFrame(\n {\n 0: [0, 1, 0, 1, 0, 1, 0, 1],\n 1: [2, 3, 2, 3, 2, 3, 2, 3],\n },\n index=expected_index,\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_unstack_unordered_multiindex(self, future_stack):\n # GH# 18265\n values = np.arange(5)\n data = np.vstack(\n [\n [f"b{x}" for x in values], # b0, b1, ..\n [f"a{x}" for x in values], # a0, a1, ..\n ]\n )\n df = DataFrame(data.T, columns=["b", "a"])\n df.columns.name = "first"\n second_level_dict = {"x": df}\n multi_level_df = pd.concat(second_level_dict, axis=1)\n multi_level_df.columns.names = ["second", "first"]\n df = multi_level_df.reindex(sorted(multi_level_df.columns), axis=1)\n result = df.stack(["first", "second"], future_stack=future_stack).unstack(\n ["first", "second"]\n )\n expected = DataFrame(\n [["a0", "b0"], ["a1", "b1"], ["a2", "b2"], ["a3", "b3"], ["a4", "b4"]],\n index=[0, 1, 2, 3, 4],\n columns=MultiIndex.from_tuples(\n [("a", "x"), ("b", "x")], names=["first", "second"]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_preserve_types(\n self, multiindex_year_month_day_dataframe_random_data, using_infer_string\n ):\n # GH#403\n ymd = multiindex_year_month_day_dataframe_random_data\n ymd["E"] = "foo"\n ymd["F"] = 2\n\n unstacked = ymd.unstack("month")\n assert unstacked["A", 1].dtype == np.float64\n assert (\n unstacked["E", 1].dtype == np.object_\n if not using_infer_string\n else "string"\n )\n assert unstacked["F", 1].dtype == np.float64\n\n def test_unstack_group_index_overflow(self, future_stack):\n codes = np.tile(np.arange(500), 2)\n level = np.arange(500)\n\n index = MultiIndex(\n levels=[level] * 8 + [[0, 1]],\n codes=[codes] * 8 + [np.arange(2).repeat(500)],\n )\n\n s = Series(np.arange(1000), index=index)\n result = s.unstack()\n assert result.shape == (500, 2)\n\n # test roundtrip\n stacked = result.stack(future_stack=future_stack)\n tm.assert_series_equal(s, stacked.reindex(s.index))\n\n # put it at beginning\n index = MultiIndex(\n levels=[[0, 1]] + [level] * 8,\n codes=[np.arange(2).repeat(500)] + [codes] * 8,\n )\n\n s = Series(np.arange(1000), index=index)\n result = s.unstack(0)\n assert result.shape == (500, 2)\n\n # put it in middle\n index = MultiIndex(\n levels=[level] * 4 + [[0, 1]] + [level] * 4,\n codes=([codes] * 4 + [np.arange(2).repeat(500)] + [codes] * 4),\n )\n\n s = Series(np.arange(1000), index=index)\n result = s.unstack(4)\n assert result.shape == (500, 2)\n\n def test_unstack_with_missing_int_cast_to_float(self, using_array_manager):\n # https://github.com/pandas-dev/pandas/issues/37115\n df = DataFrame(\n {\n "a": ["A", "A", "B"],\n "b": ["ca", "cb", "cb"],\n "v": [10] * 3,\n }\n ).set_index(["a", "b"])\n\n # add another int column to get 2 blocks\n df["is_"] = 1\n if not using_array_manager:\n assert len(df._mgr.blocks) == 2\n\n result = df.unstack("b")\n result[("is_", "ca")] = result[("is_", "ca")].fillna(0)\n\n expected = DataFrame(\n [[10.0, 10.0, 1.0, 1.0], [np.nan, 10.0, 0.0, 1.0]],\n index=Index(["A", "B"], name="a"),\n columns=MultiIndex.from_tuples(\n [("v", "ca"), ("v", "cb"), ("is_", "ca"), ("is_", "cb")],\n names=[None, "b"],\n ),\n )\n if using_array_manager:\n # INFO(ArrayManager) with ArrayManager preserve dtype where possible\n expected[("v", "cb")] = expected[("v", "cb")].astype("int64")\n expected[("is_", "cb")] = expected[("is_", "cb")].astype("int64")\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_with_level_has_nan(self):\n # GH 37510\n df1 = DataFrame(\n {\n "L1": [1, 2, 3, 4],\n "L2": [3, 4, 1, 2],\n "L3": [1, 1, 1, 1],\n "x": [1, 2, 3, 4],\n }\n )\n df1 = df1.set_index(["L1", "L2", "L3"])\n new_levels = ["n1", "n2", "n3", None]\n df1.index = df1.index.set_levels(levels=new_levels, level="L1")\n df1.index = df1.index.set_levels(levels=new_levels, level="L2")\n\n result = df1.unstack("L3")[("x", 1)].sort_index().index\n expected = MultiIndex(\n levels=[["n1", "n2", "n3", None], ["n1", "n2", "n3", None]],\n codes=[[0, 1, 2, 3], [2, 3, 0, 1]],\n names=["L1", "L2"],\n )\n\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_nan_in_multiindex_columns(self, future_stack):\n # GH#39481\n df = DataFrame(\n np.zeros([1, 5]),\n columns=MultiIndex.from_tuples(\n [\n (0, None, None),\n (0, 2, 0),\n (0, 2, 1),\n (0, 3, 0),\n (0, 3, 1),\n ],\n ),\n )\n result = df.stack(2, future_stack=future_stack)\n if future_stack:\n index = MultiIndex(levels=[[0], [0.0, 1.0]], codes=[[0, 0, 0], [-1, 0, 1]])\n columns = MultiIndex(levels=[[0], [2, 3]], codes=[[0, 0, 0], [-1, 0, 1]])\n else:\n index = Index([(0, None), (0, 0), (0, 1)])\n columns = Index([(0, None), (0, 2), (0, 3)])\n expected = DataFrame(\n [[0.0, np.nan, np.nan], [np.nan, 0.0, 0.0], [np.nan, 0.0, 0.0]],\n index=index,\n columns=columns,\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_multi_level_stack_categorical(self, future_stack):\n # GH 15239\n midx = MultiIndex.from_arrays(\n [\n ["A"] * 2 + ["B"] * 2,\n pd.Categorical(list("abab")),\n pd.Categorical(list("ccdd")),\n ]\n )\n df = DataFrame(np.arange(8).reshape(2, 4), columns=midx)\n result = df.stack([1, 2], future_stack=future_stack)\n if future_stack:\n expected = DataFrame(\n [\n [0, np.nan],\n [1, np.nan],\n [np.nan, 2],\n [np.nan, 3],\n [4, np.nan],\n [5, np.nan],\n [np.nan, 6],\n [np.nan, 7],\n ],\n columns=["A", "B"],\n index=MultiIndex.from_arrays(\n [\n [0] * 4 + [1] * 4,\n pd.Categorical(list("abababab")),\n pd.Categorical(list("ccddccdd")),\n ]\n ),\n )\n else:\n expected = DataFrame(\n [\n [0, np.nan],\n [np.nan, 2],\n [1, np.nan],\n [np.nan, 3],\n [4, np.nan],\n [np.nan, 6],\n [5, np.nan],\n [np.nan, 7],\n ],\n columns=["A", "B"],\n index=MultiIndex.from_arrays(\n [\n [0] * 4 + [1] * 4,\n pd.Categorical(list("aabbaabb")),\n pd.Categorical(list("cdcdcdcd")),\n ]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_nan_level(self, future_stack):\n # GH 9406\n df_nan = DataFrame(\n np.arange(4).reshape(2, 2),\n columns=MultiIndex.from_tuples(\n [("A", np.nan), ("B", "b")], names=["Upper", "Lower"]\n ),\n index=Index([0, 1], name="Num"),\n dtype=np.float64,\n )\n result = df_nan.stack(future_stack=future_stack)\n if future_stack:\n index = MultiIndex(\n levels=[[0, 1], [np.nan, "b"]],\n codes=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=["Num", "Lower"],\n )\n else:\n index = MultiIndex.from_tuples(\n [(0, np.nan), (0, "b"), (1, np.nan), (1, "b")], names=["Num", "Lower"]\n )\n expected = DataFrame(\n [[0.0, np.nan], [np.nan, 1], [2.0, np.nan], [np.nan, 3.0]],\n columns=Index(["A", "B"], name="Upper"),\n index=index,\n )\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_categorical_columns(self):\n # GH 14018\n idx = MultiIndex.from_product([["A"], [0, 1]])\n df = DataFrame({"cat": pd.Categorical(["a", "b"])}, index=idx)\n result = df.unstack()\n expected = DataFrame(\n {\n 0: pd.Categorical(["a"], categories=["a", "b"]),\n 1: pd.Categorical(["b"], categories=["a", "b"]),\n },\n index=["A"],\n )\n expected.columns = MultiIndex.from_tuples([("cat", 0), ("cat", 1)])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_unsorted(self, future_stack):\n # GH 16925\n PAE = ["ITA", "FRA"]\n VAR = ["A1", "A2"]\n TYP = ["CRT", "DBT", "NET"]\n MI = MultiIndex.from_product([PAE, VAR, TYP], names=["PAE", "VAR", "TYP"])\n\n V = list(range(len(MI)))\n DF = DataFrame(data=V, index=MI, columns=["VALUE"])\n\n DF = DF.unstack(["VAR", "TYP"])\n DF.columns = DF.columns.droplevel(0)\n DF.loc[:, ("A0", "NET")] = 9999\n\n result = DF.stack(["VAR", "TYP"], future_stack=future_stack).sort_index()\n expected = (\n DF.sort_index(axis=1)\n .stack(["VAR", "TYP"], future_stack=future_stack)\n .sort_index()\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n def test_stack_nullable_dtype(self, future_stack):\n # GH#43561\n columns = MultiIndex.from_product(\n [["54511", "54515"], ["r", "t_mean"]], names=["station", "element"]\n )\n index = Index([1, 2, 3], name="time")\n\n arr = np.array([[50, 226, 10, 215], [10, 215, 9, 220], [305, 232, 111, 220]])\n df = DataFrame(arr, columns=columns, index=index, dtype=pd.Int64Dtype())\n\n result = df.stack("station", future_stack=future_stack)\n\n expected = (\n df.astype(np.int64)\n .stack("station", future_stack=future_stack)\n .astype(pd.Int64Dtype())\n )\n tm.assert_frame_equal(result, expected)\n\n # non-homogeneous case\n df[df.columns[0]] = df[df.columns[0]].astype(pd.Float64Dtype())\n result = df.stack("station", future_stack=future_stack)\n\n expected = DataFrame(\n {\n "r": pd.array(\n [50.0, 10.0, 10.0, 9.0, 305.0, 111.0], dtype=pd.Float64Dtype()\n ),\n "t_mean": pd.array(\n [226, 215, 215, 220, 232, 220], dtype=pd.Int64Dtype()\n ),\n },\n index=MultiIndex.from_product([index, columns.levels[0]]),\n )\n expected.columns.name = "element"\n tm.assert_frame_equal(result, expected)\n\n def test_unstack_mixed_level_names(self):\n # GH#48763\n arrays = [["a", "a"], [1, 2], ["red", "blue"]]\n idx = MultiIndex.from_arrays(arrays, names=("x", 0, "y"))\n df = DataFrame({"m": [1, 2]}, index=idx)\n result = df.unstack("x")\n expected = DataFrame(\n [[1], [2]],\n columns=MultiIndex.from_tuples([("m", "a")], names=[None, "x"]),\n index=MultiIndex.from_tuples([(1, "red"), (2, "blue")], names=[0, "y"]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_stack_tuple_columns(future_stack):\n # GH#54948 - test stack when the input has a non-MultiIndex with tuples\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=[("a", 1), ("a", 2), ("b", 1)]\n )\n result = df.stack(future_stack=future_stack)\n expected = Series(\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n index=MultiIndex(\n levels=[[0, 1, 2], [("a", 1), ("a", 2), ("b", 1)]],\n codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],\n ),\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dtype, na_value",\n [\n ("float64", np.nan),\n ("Float64", np.nan),\n ("Float64", pd.NA),\n ("Int64", pd.NA),\n ],\n)\n@pytest.mark.parametrize("test_multiindex", [True, False])\ndef test_stack_preserves_na(dtype, na_value, test_multiindex):\n # GH#56573\n if test_multiindex:\n index = MultiIndex.from_arrays(2 * [Index([na_value], dtype=dtype)])\n else:\n index = Index([na_value], dtype=dtype)\n df = DataFrame({"a": [1]}, index=index)\n result = df.stack(future_stack=True)\n\n if test_multiindex:\n expected_index = MultiIndex.from_arrays(\n [\n Index([na_value], dtype=dtype),\n Index([na_value], dtype=dtype),\n Index(["a"]),\n ]\n )\n else:\n expected_index = MultiIndex.from_arrays(\n [\n Index([na_value], dtype=dtype),\n Index(["a"]),\n ]\n )\n expected = Series(1, index=expected_index)\n tm.assert_series_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\frame\test_stack_unstack.py
test_stack_unstack.py
Python
97,558
0.75
0.065946
0.06438
node-utils
600
2024-03-03T03:25:24.547203
MIT
true
24c06eefecc5df6e846c4a932c881d75
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning"\n)\n\n\n@pytest.fixture()\ndef gpd_style_subclass_df():\n class SubclassedDataFrame(DataFrame):\n @property\n def _constructor(self):\n return SubclassedDataFrame\n\n return SubclassedDataFrame({"a": [1, 2, 3]})\n\n\nclass TestDataFrameSubclassing:\n def test_no_warning_on_mgr(self):\n # GH#57032\n df = tm.SubclassedDataFrame(\n {"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"]\n )\n with tm.assert_produces_warning(None):\n # df.isna() goes through _constructor_from_mgr, which we want to\n # *not* pass a Manager do __init__\n df.isna()\n df["X"].isna()\n\n def test_frame_subclassing_and_slicing(self):\n # Subclass frame and ensure it returns the right class on slicing it\n # In reference to PR 9632\n\n class CustomSeries(Series):\n @property\n def _constructor(self):\n return CustomSeries\n\n def custom_series_function(self):\n return "OK"\n\n class CustomDataFrame(DataFrame):\n """\n Subclasses pandas DF, fills DF with simulation results, adds some\n custom plotting functions.\n """\n\n def __init__(self, *args, **kw) -> None:\n super().__init__(*args, **kw)\n\n @property\n def _constructor(self):\n return CustomDataFrame\n\n _constructor_sliced = CustomSeries\n\n def custom_frame_function(self):\n return "OK"\n\n data = {"col1": range(10), "col2": range(10)}\n cdf = CustomDataFrame(data)\n\n # Did we get back our own DF class?\n assert isinstance(cdf, CustomDataFrame)\n\n # Do we get back our own Series class after selecting a column?\n cdf_series = cdf.col1\n assert isinstance(cdf_series, CustomSeries)\n assert cdf_series.custom_series_function() == "OK"\n\n # Do we get back our own DF class after slicing row-wise?\n cdf_rows = cdf[1:5]\n assert isinstance(cdf_rows, CustomDataFrame)\n assert cdf_rows.custom_frame_function() == "OK"\n\n # Make sure sliced part of multi-index frame is custom class\n mcol = MultiIndex.from_tuples([("A", "A"), ("A", "B")])\n cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)\n assert isinstance(cdf_multi["A"], CustomDataFrame)\n\n mcol = MultiIndex.from_tuples([("A", ""), ("B", "")])\n cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)\n assert isinstance(cdf_multi2["A"], CustomSeries)\n\n def test_dataframe_metadata(self):\n df = tm.SubclassedDataFrame(\n {"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"]\n )\n df.testattr = "XXX"\n\n assert df.testattr == "XXX"\n assert df[["X"]].testattr == "XXX"\n assert df.loc[["a", "b"], :].testattr == "XXX"\n assert df.iloc[[0, 1], :].testattr == "XXX"\n\n # see gh-9776\n assert df.iloc[0:1, :].testattr == "XXX"\n\n # see gh-10553\n unpickled = tm.round_trip_pickle(df)\n tm.assert_frame_equal(df, unpickled)\n assert df._metadata == unpickled._metadata\n assert df.testattr == unpickled.testattr\n\n def test_indexing_sliced(self):\n # GH 11559\n df = tm.SubclassedDataFrame(\n {"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["a", "b", "c"]\n )\n res = df.loc[:, "X"]\n exp = tm.SubclassedSeries([1, 2, 3], index=list("abc"), name="X")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.iloc[:, 1]\n exp = tm.SubclassedSeries([4, 5, 6], index=list("abc"), name="Y")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.loc[:, "Z"]\n exp = tm.SubclassedSeries([7, 8, 9], index=list("abc"), name="Z")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.loc["a", :]\n exp = tm.SubclassedSeries([1, 4, 7], index=list("XYZ"), name="a")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.iloc[1, :]\n exp = tm.SubclassedSeries([2, 5, 8], index=list("XYZ"), name="b")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.loc["c", :]\n exp = tm.SubclassedSeries([3, 6, 9], index=list("XYZ"), name="c")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n def test_subclass_attr_err_propagation(self):\n # GH 11808\n class A(DataFrame):\n @property\n def nonexistence(self):\n return self.i_dont_exist\n\n with pytest.raises(AttributeError, match=".*i_dont_exist.*"):\n A().nonexistence\n\n def test_subclass_align(self):\n # GH 12983\n df1 = tm.SubclassedDataFrame(\n {"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")\n )\n df2 = tm.SubclassedDataFrame(\n {"c": [1, 2, 4], "d": [1, 2, 4]}, index=list("ABD")\n )\n\n res1, res2 = df1.align(df2, axis=0)\n exp1 = tm.SubclassedDataFrame(\n {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},\n index=list("ABCDE"),\n )\n exp2 = tm.SubclassedDataFrame(\n {"c": [1, 2, np.nan, 4, np.nan], "d": [1, 2, np.nan, 4, np.nan]},\n index=list("ABCDE"),\n )\n assert isinstance(res1, tm.SubclassedDataFrame)\n tm.assert_frame_equal(res1, exp1)\n assert isinstance(res2, tm.SubclassedDataFrame)\n tm.assert_frame_equal(res2, exp2)\n\n res1, res2 = df1.a.align(df2.c)\n assert isinstance(res1, tm.SubclassedSeries)\n tm.assert_series_equal(res1, exp1.a)\n assert isinstance(res2, tm.SubclassedSeries)\n tm.assert_series_equal(res2, exp2.c)\n\n def test_subclass_align_combinations(self):\n # GH 12983\n df = tm.SubclassedDataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))\n s = tm.SubclassedSeries([1, 2, 4], index=list("ABD"), name="x")\n\n # frame + series\n res1, res2 = df.align(s, axis=0)\n exp1 = tm.SubclassedDataFrame(\n {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},\n index=list("ABCDE"),\n )\n # name is lost when\n exp2 = tm.SubclassedSeries(\n [1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x"\n )\n\n assert isinstance(res1, tm.SubclassedDataFrame)\n tm.assert_frame_equal(res1, exp1)\n assert isinstance(res2, tm.SubclassedSeries)\n tm.assert_series_equal(res2, exp2)\n\n # series + frame\n res1, res2 = s.align(df)\n assert isinstance(res1, tm.SubclassedSeries)\n tm.assert_series_equal(res1, exp2)\n assert isinstance(res2, tm.SubclassedDataFrame)\n tm.assert_frame_equal(res2, exp1)\n\n def test_subclass_iterrows(self):\n # GH 13977\n df = tm.SubclassedDataFrame({"a": [1]})\n for i, row in df.iterrows():\n assert isinstance(row, tm.SubclassedSeries)\n tm.assert_series_equal(row, df.loc[i])\n\n def test_subclass_stack(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=["a", "b", "c"],\n columns=["X", "Y", "Z"],\n )\n\n res = df.stack(future_stack=True)\n exp = tm.SubclassedSeries(\n [1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list("aaabbbccc"), list("XYZXYZXYZ")]\n )\n\n tm.assert_series_equal(res, exp)\n\n def test_subclass_stack_multi(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],\n index=MultiIndex.from_tuples(\n list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]\n ),\n )\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 12],\n [11, 13],\n [20, 22],\n [21, 23],\n [30, 32],\n [31, 33],\n [40, 42],\n [41, 43],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))),\n names=["aaa", "ccc", "yyy"],\n ),\n columns=Index(["W", "X"], name="www"),\n )\n\n res = df.stack(future_stack=True)\n tm.assert_frame_equal(res, exp)\n\n res = df.stack("yyy", future_stack=True)\n tm.assert_frame_equal(res, exp)\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 11],\n [12, 13],\n [20, 21],\n [22, 23],\n [30, 31],\n [32, 33],\n [40, 41],\n [42, 43],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))),\n names=["aaa", "ccc", "www"],\n ),\n columns=Index(["y", "z"], name="yyy"),\n )\n\n res = df.stack("www", future_stack=True)\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_stack_multi_mixed(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [\n [10, 11, 12.0, 13.0],\n [20, 21, 22.0, 23.0],\n [30, 31, 32.0, 33.0],\n [40, 41, 42.0, 43.0],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]\n ),\n )\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 12.0],\n [11, 13.0],\n [20, 22.0],\n [21, 23.0],\n [30, 32.0],\n [31, 33.0],\n [40, 42.0],\n [41, 43.0],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))),\n names=["aaa", "ccc", "yyy"],\n ),\n columns=Index(["W", "X"], name="www"),\n )\n\n res = df.stack(future_stack=True)\n tm.assert_frame_equal(res, exp)\n\n res = df.stack("yyy", future_stack=True)\n tm.assert_frame_equal(res, exp)\n\n exp = tm.SubclassedDataFrame(\n [\n [10.0, 11.0],\n [12.0, 13.0],\n [20.0, 21.0],\n [22.0, 23.0],\n [30.0, 31.0],\n [32.0, 33.0],\n [40.0, 41.0],\n [42.0, 43.0],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))),\n names=["aaa", "ccc", "www"],\n ),\n columns=Index(["y", "z"], name="yyy"),\n )\n\n res = df.stack("www", future_stack=True)\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_unstack(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=["a", "b", "c"],\n columns=["X", "Y", "Z"],\n )\n\n res = df.unstack()\n exp = tm.SubclassedSeries(\n [1, 4, 7, 2, 5, 8, 3, 6, 9], index=[list("XXXYYYZZZ"), list("abcabcabc")]\n )\n\n tm.assert_series_equal(res, exp)\n\n def test_subclass_unstack_multi(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],\n index=MultiIndex.from_tuples(\n list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]\n ),\n )\n\n exp = tm.SubclassedDataFrame(\n [[10, 20, 11, 21, 12, 22, 13, 23], [30, 40, 31, 41, 32, 42, 33, 43]],\n index=Index(["A", "B"], name="aaa"),\n columns=MultiIndex.from_tuples(\n list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("cdcdcdcd"))),\n names=["www", "yyy", "ccc"],\n ),\n )\n\n res = df.unstack()\n tm.assert_frame_equal(res, exp)\n\n res = df.unstack("ccc")\n tm.assert_frame_equal(res, exp)\n\n exp = tm.SubclassedDataFrame(\n [[10, 30, 11, 31, 12, 32, 13, 33], [20, 40, 21, 41, 22, 42, 23, 43]],\n index=Index(["c", "d"], name="ccc"),\n columns=MultiIndex.from_tuples(\n list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("ABABABAB"))),\n names=["www", "yyy", "aaa"],\n ),\n )\n\n res = df.unstack("aaa")\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_unstack_multi_mixed(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [\n [10, 11, 12.0, 13.0],\n [20, 21, 22.0, 23.0],\n [30, 31, 32.0, 33.0],\n [40, 41, 42.0, 43.0],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]\n ),\n )\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 20, 11, 21, 12.0, 22.0, 13.0, 23.0],\n [30, 40, 31, 41, 32.0, 42.0, 33.0, 43.0],\n ],\n index=Index(["A", "B"], name="aaa"),\n columns=MultiIndex.from_tuples(\n list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("cdcdcdcd"))),\n names=["www", "yyy", "ccc"],\n ),\n )\n\n res = df.unstack()\n tm.assert_frame_equal(res, exp)\n\n res = df.unstack("ccc")\n tm.assert_frame_equal(res, exp)\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 30, 11, 31, 12.0, 32.0, 13.0, 33.0],\n [20, 40, 21, 41, 22.0, 42.0, 23.0, 43.0],\n ],\n index=Index(["c", "d"], name="ccc"),\n columns=MultiIndex.from_tuples(\n list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("ABABABAB"))),\n names=["www", "yyy", "aaa"],\n ),\n )\n\n res = df.unstack("aaa")\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_pivot(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n {\n "index": ["A", "B", "C", "C", "B", "A"],\n "columns": ["One", "One", "One", "Two", "Two", "Two"],\n "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0],\n }\n )\n\n pivoted = df.pivot(index="index", columns="columns", values="values")\n\n expected = tm.SubclassedDataFrame(\n {\n "One": {"A": 1.0, "B": 2.0, "C": 3.0},\n "Two": {"A": 1.0, "B": 2.0, "C": 3.0},\n }\n )\n\n expected.index.name, expected.columns.name = "index", "columns"\n\n tm.assert_frame_equal(pivoted, expected)\n\n def test_subclassed_melt(self):\n # GH 15564\n cheese = tm.SubclassedDataFrame(\n {\n "first": ["John", "Mary"],\n "last": ["Doe", "Bo"],\n "height": [5.5, 6.0],\n "weight": [130, 150],\n }\n )\n\n melted = pd.melt(cheese, id_vars=["first", "last"])\n\n expected = tm.SubclassedDataFrame(\n [\n ["John", "Doe", "height", 5.5],\n ["Mary", "Bo", "height", 6.0],\n ["John", "Doe", "weight", 130],\n ["Mary", "Bo", "weight", 150],\n ],\n columns=["first", "last", "variable", "value"],\n )\n\n tm.assert_frame_equal(melted, expected)\n\n def test_subclassed_wide_to_long(self):\n # GH 9762\n\n x = np.random.default_rng(2).standard_normal(3)\n df = tm.SubclassedDataFrame(\n {\n "A1970": {0: "a", 1: "b", 2: "c"},\n "A1980": {0: "d", 1: "e", 2: "f"},\n "B1970": {0: 2.5, 1: 1.2, 2: 0.7},\n "B1980": {0: 3.2, 1: 1.3, 2: 0.1},\n "X": dict(zip(range(3), x)),\n }\n )\n\n df["id"] = df.index\n exp_data = {\n "X": x.tolist() + x.tolist(),\n "A": ["a", "b", "c", "d", "e", "f"],\n "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],\n "year": [1970, 1970, 1970, 1980, 1980, 1980],\n "id": [0, 1, 2, 0, 1, 2],\n }\n expected = tm.SubclassedDataFrame(exp_data)\n expected = expected.set_index(["id", "year"])[["X", "A", "B"]]\n long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year")\n\n tm.assert_frame_equal(long_frame, expected)\n\n def test_subclassed_apply(self):\n # GH 19822\n\n def check_row_subclass(row):\n assert isinstance(row, tm.SubclassedSeries)\n\n def stretch(row):\n if row["variable"] == "height":\n row["value"] += 0.5\n return row\n\n df = tm.SubclassedDataFrame(\n [\n ["John", "Doe", "height", 5.5],\n ["Mary", "Bo", "height", 6.0],\n ["John", "Doe", "weight", 130],\n ["Mary", "Bo", "weight", 150],\n ],\n columns=["first", "last", "variable", "value"],\n )\n\n df.apply(lambda x: check_row_subclass(x))\n df.apply(lambda x: check_row_subclass(x), axis=1)\n\n expected = tm.SubclassedDataFrame(\n [\n ["John", "Doe", "height", 6.0],\n ["Mary", "Bo", "height", 6.5],\n ["John", "Doe", "weight", 130],\n ["Mary", "Bo", "weight", 150],\n ],\n columns=["first", "last", "variable", "value"],\n )\n\n result = df.apply(lambda x: stretch(x), axis=1)\n assert isinstance(result, tm.SubclassedDataFrame)\n tm.assert_frame_equal(result, expected)\n\n expected = tm.SubclassedDataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])\n\n result = df.apply(lambda x: tm.SubclassedSeries([1, 2, 3]), axis=1)\n assert isinstance(result, tm.SubclassedDataFrame)\n tm.assert_frame_equal(result, expected)\n\n result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand")\n assert isinstance(result, tm.SubclassedDataFrame)\n tm.assert_frame_equal(result, expected)\n\n expected = tm.SubclassedSeries([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])\n\n result = df.apply(lambda x: [1, 2, 3], axis=1)\n assert not isinstance(result, tm.SubclassedDataFrame)\n tm.assert_series_equal(result, expected)\n\n def test_subclassed_reductions(self, all_reductions):\n # GH 25596\n\n df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n result = getattr(df, all_reductions)()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_subclassed_count(self):\n df = tm.SubclassedDataFrame(\n {\n "Person": ["John", "Myla", "Lewis", "John", "Myla"],\n "Age": [24.0, np.nan, 21.0, 33, 26],\n "Single": [False, True, True, True, False],\n }\n )\n result = df.count()\n assert isinstance(result, tm.SubclassedSeries)\n\n df = tm.SubclassedDataFrame({"A": [1, 0, 3], "B": [0, 5, 6], "C": [7, 8, 0]})\n result = df.count()\n assert isinstance(result, tm.SubclassedSeries)\n\n df = tm.SubclassedDataFrame(\n [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],\n index=MultiIndex.from_tuples(\n list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]\n ),\n )\n result = df.count()\n assert isinstance(result, tm.SubclassedSeries)\n\n df = tm.SubclassedDataFrame()\n result = df.count()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_isin(self):\n df = tm.SubclassedDataFrame(\n {"num_legs": [2, 4], "num_wings": [2, 0]}, index=["falcon", "dog"]\n )\n result = df.isin([0, 2])\n assert isinstance(result, tm.SubclassedDataFrame)\n\n def test_duplicated(self):\n df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n result = df.duplicated()\n assert isinstance(result, tm.SubclassedSeries)\n\n df = tm.SubclassedDataFrame()\n result = df.duplicated()\n assert isinstance(result, tm.SubclassedSeries)\n\n @pytest.mark.parametrize("idx_method", ["idxmax", "idxmin"])\n def test_idx(self, idx_method):\n df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n result = getattr(df, idx_method)()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_dot(self):\n df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n s = tm.SubclassedSeries([1, 1, 2, 1])\n result = df.dot(s)\n assert isinstance(result, tm.SubclassedSeries)\n\n df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n s = tm.SubclassedDataFrame([1, 1, 2, 1])\n result = df.dot(s)\n assert isinstance(result, tm.SubclassedDataFrame)\n\n def test_memory_usage(self):\n df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n result = df.memory_usage()\n assert isinstance(result, tm.SubclassedSeries)\n\n result = df.memory_usage(index=False)\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_corrwith(self):\n pytest.importorskip("scipy")\n index = ["a", "b", "c", "d", "e"]\n columns = ["one", "two", "three", "four"]\n df1 = tm.SubclassedDataFrame(\n np.random.default_rng(2).standard_normal((5, 4)),\n index=index,\n columns=columns,\n )\n df2 = tm.SubclassedDataFrame(\n np.random.default_rng(2).standard_normal((4, 4)),\n index=index[:4],\n columns=columns,\n )\n correls = df1.corrwith(df2, axis=1, drop=True, method="kendall")\n\n assert isinstance(correls, (tm.SubclassedSeries))\n\n def test_asof(self):\n N = 3\n rng = pd.date_range("1/1/1990", periods=N, freq="53s")\n df = tm.SubclassedDataFrame(\n {\n "A": [np.nan, np.nan, np.nan],\n "B": [np.nan, np.nan, np.nan],\n "C": [np.nan, np.nan, np.nan],\n },\n index=rng,\n )\n\n result = df.asof(rng[-2:])\n assert isinstance(result, tm.SubclassedDataFrame)\n\n result = df.asof(rng[-2])\n assert isinstance(result, tm.SubclassedSeries)\n\n result = df.asof("1989-12-31")\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_idxmin_preserves_subclass(self):\n # GH 28330\n\n df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n result = df.idxmin()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_idxmax_preserves_subclass(self):\n # GH 28330\n\n df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n result = df.idxmax()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_convert_dtypes_preserves_subclass(self, gpd_style_subclass_df):\n # GH 43668\n df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n result = df.convert_dtypes()\n assert isinstance(result, tm.SubclassedDataFrame)\n\n result = gpd_style_subclass_df.convert_dtypes()\n assert isinstance(result, type(gpd_style_subclass_df))\n\n def test_astype_preserves_subclass(self):\n # GH#40810\n df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n\n result = df.astype({"A": np.int64, "B": np.int32, "C": np.float64})\n assert isinstance(result, tm.SubclassedDataFrame)\n\n def test_equals_subclass(self):\n # https://github.com/pandas-dev/pandas/pull/34402\n # allow subclass in both directions\n df1 = DataFrame({"a": [1, 2, 3]})\n df2 = tm.SubclassedDataFrame({"a": [1, 2, 3]})\n assert df1.equals(df2)\n assert df2.equals(df1)\n\n def test_replace_list_method(self):\n # https://github.com/pandas-dev/pandas/pull/46018\n df = tm.SubclassedDataFrame({"A": [0, 1, 2]})\n msg = "The 'method' keyword in SubclassedDataFrame.replace is deprecated"\n with tm.assert_produces_warning(\n FutureWarning, match=msg, raise_on_extra_warnings=False\n ):\n result = df.replace([1, 2], method="ffill")\n expected = tm.SubclassedDataFrame({"A": [0, 0, 0]})\n assert isinstance(result, tm.SubclassedDataFrame)\n tm.assert_frame_equal(result, expected)\n\n\nclass MySubclassWithMetadata(DataFrame):\n _metadata = ["my_metadata"]\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n my_metadata = kwargs.pop("my_metadata", None)\n if args and isinstance(args[0], MySubclassWithMetadata):\n my_metadata = args[0].my_metadata # type: ignore[has-type]\n self.my_metadata = my_metadata\n\n @property\n def _constructor(self):\n return MySubclassWithMetadata\n\n\ndef test_constructor_with_metadata():\n # https://github.com/pandas-dev/pandas/pull/54922\n # https://github.com/pandas-dev/pandas/issues/55120\n df = MySubclassWithMetadata(\n np.random.default_rng(2).random((5, 3)), columns=["A", "B", "C"]\n )\n subset = df[["A", "B"]]\n assert isinstance(subset, MySubclassWithMetadata)\n\n\nclass SimpleDataFrameSubClass(DataFrame):\n """A subclass of DataFrame that does not define a constructor."""\n\n\nclass SimpleSeriesSubClass(Series):\n """A subclass of Series that does not define a constructor."""\n\n\nclass TestSubclassWithoutConstructor:\n def test_copy_df(self):\n expected = DataFrame({"a": [1, 2, 3]})\n result = SimpleDataFrameSubClass(expected).copy()\n\n assert (\n type(result) is DataFrame\n ) # assert_frame_equal only checks isinstance(lhs, type(rhs))\n tm.assert_frame_equal(result, expected)\n\n def test_copy_series(self):\n expected = Series([1, 2, 3])\n result = SimpleSeriesSubClass(expected).copy()\n\n tm.assert_series_equal(result, expected)\n\n def test_series_to_frame(self):\n orig = Series([1, 2, 3])\n expected = orig.to_frame()\n result = SimpleSeriesSubClass(orig).to_frame()\n\n assert (\n type(result) is DataFrame\n ) # assert_frame_equal only checks isinstance(lhs, type(rhs))\n tm.assert_frame_equal(result, expected)\n\n def test_groupby(self):\n df = SimpleDataFrameSubClass(DataFrame({"a": [1, 2, 3]}))\n\n for _, v in df.groupby("a"):\n assert type(v) is DataFrame\n
.venv\Lib\site-packages\pandas\tests\frame\test_subclass.py
test_subclass.py
Python
27,880
0.95
0.082424
0.057437
node-utils
15
2023-07-13T06:55:13.058694
Apache-2.0
true
ec3aa61954da1c0ebfa16bd67905e086
from functools import partial\nimport re\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.types import is_extension_array_dtype\n\ndtypes = [\n "int64",\n "Int64",\n {"A": "int64", "B": "Int64"},\n]\n\n\n@pytest.mark.parametrize("dtype", dtypes)\ndef test_unary_unary(dtype):\n # unary input, unary output\n values = np.array([[-1, -1], [1, 1]], dtype="int64")\n df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype)\n result = np.positive(df)\n expected = pd.DataFrame(\n np.positive(values), index=df.index, columns=df.columns\n ).astype(dtype)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", dtypes)\ndef test_unary_binary(request, dtype):\n # unary input, binary output\n if is_extension_array_dtype(dtype) or isinstance(dtype, dict):\n request.applymarker(\n pytest.mark.xfail(\n reason="Extension / mixed with multiple outputs not implemented."\n )\n )\n\n values = np.array([[-1, -1], [1, 1]], dtype="int64")\n df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype)\n result_pandas = np.modf(df)\n assert isinstance(result_pandas, tuple)\n assert len(result_pandas) == 2\n expected_numpy = np.modf(values)\n\n for result, b in zip(result_pandas, expected_numpy):\n expected = pd.DataFrame(b, index=df.index, columns=df.columns)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", dtypes)\ndef test_binary_input_dispatch_binop(dtype):\n # binop ufuncs are dispatched to our dunder methods.\n values = np.array([[-1, -1], [1, 1]], dtype="int64")\n df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype)\n result = np.add(df, df)\n expected = pd.DataFrame(\n np.add(values, values), index=df.index, columns=df.columns\n ).astype(dtype)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func,arg,expected",\n [\n (np.add, 1, [2, 3, 4, 5]),\n (\n partial(np.add, where=[[False, True], [True, False]]),\n np.array([[1, 1], [1, 1]]),\n [0, 3, 4, 0],\n ),\n (np.power, np.array([[1, 1], [2, 2]]), [1, 2, 9, 16]),\n (np.subtract, 2, [-1, 0, 1, 2]),\n (\n partial(np.negative, where=np.array([[False, True], [True, False]])),\n None,\n [0, -2, -3, 0],\n ),\n ],\n)\ndef test_ufunc_passes_args(func, arg, expected):\n # GH#40662\n arr = np.array([[1, 2], [3, 4]])\n df = pd.DataFrame(arr)\n result_inplace = np.zeros_like(arr)\n # 1-argument ufunc\n if arg is None:\n result = func(df, out=result_inplace)\n else:\n result = func(df, arg, out=result_inplace)\n\n expected = np.array(expected).reshape(2, 2)\n tm.assert_numpy_array_equal(result_inplace, expected)\n\n expected = pd.DataFrame(expected)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype_a", dtypes)\n@pytest.mark.parametrize("dtype_b", dtypes)\ndef test_binary_input_aligns_columns(request, dtype_a, dtype_b):\n if (\n is_extension_array_dtype(dtype_a)\n or isinstance(dtype_a, dict)\n or is_extension_array_dtype(dtype_b)\n or isinstance(dtype_b, dict)\n ):\n request.applymarker(\n pytest.mark.xfail(\n reason="Extension / mixed with multiple inputs not implemented."\n )\n )\n\n df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}).astype(dtype_a)\n\n if isinstance(dtype_a, dict) and isinstance(dtype_b, dict):\n dtype_b = dtype_b.copy()\n dtype_b["C"] = dtype_b.pop("B")\n df2 = pd.DataFrame({"A": [1, 2], "C": [3, 4]}).astype(dtype_b)\n # As of 2.0, align first before applying the ufunc\n result = np.heaviside(df1, df2)\n expected = np.heaviside(\n np.array([[1, 3, np.nan], [2, 4, np.nan]]),\n np.array([[1, np.nan, 3], [2, np.nan, 4]]),\n )\n expected = pd.DataFrame(expected, index=[0, 1], columns=["A", "B", "C"])\n tm.assert_frame_equal(result, expected)\n\n result = np.heaviside(df1, df2.values)\n expected = pd.DataFrame([[1.0, 1.0], [1.0, 1.0]], columns=["A", "B"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", dtypes)\ndef test_binary_input_aligns_index(request, dtype):\n if is_extension_array_dtype(dtype) or isinstance(dtype, dict):\n request.applymarker(\n pytest.mark.xfail(\n reason="Extension / mixed with multiple inputs not implemented."\n )\n )\n df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).astype(dtype)\n df2 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "c"]).astype(dtype)\n result = np.heaviside(df1, df2)\n expected = np.heaviside(\n np.array([[1, 3], [3, 4], [np.nan, np.nan]]),\n np.array([[1, 3], [np.nan, np.nan], [3, 4]]),\n )\n # TODO(FloatArray): this will be Float64Dtype.\n expected = pd.DataFrame(expected, index=["a", "b", "c"], columns=["A", "B"])\n tm.assert_frame_equal(result, expected)\n\n result = np.heaviside(df1, df2.values)\n expected = pd.DataFrame(\n [[1.0, 1.0], [1.0, 1.0]], columns=["A", "B"], index=["a", "b"]\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_binary_frame_series_raises():\n # We don't currently implement\n df = pd.DataFrame({"A": [1, 2]})\n with pytest.raises(NotImplementedError, match="logaddexp"):\n np.logaddexp(df, df["A"])\n\n with pytest.raises(NotImplementedError, match="logaddexp"):\n np.logaddexp(df["A"], df)\n\n\ndef test_unary_accumulate_axis():\n # https://github.com/pandas-dev/pandas/issues/39259\n df = pd.DataFrame({"a": [1, 3, 2, 4]})\n result = np.maximum.accumulate(df)\n expected = pd.DataFrame({"a": [1, 3, 3, 4]})\n tm.assert_frame_equal(result, expected)\n\n df = pd.DataFrame({"a": [1, 3, 2, 4], "b": [0.1, 4.0, 3.0, 2.0]})\n result = np.maximum.accumulate(df)\n # in theory could preserve int dtype for default axis=0\n expected = pd.DataFrame({"a": [1.0, 3.0, 3.0, 4.0], "b": [0.1, 4.0, 4.0, 4.0]})\n tm.assert_frame_equal(result, expected)\n\n result = np.maximum.accumulate(df, axis=0)\n tm.assert_frame_equal(result, expected)\n\n result = np.maximum.accumulate(df, axis=1)\n expected = pd.DataFrame({"a": [1.0, 3.0, 2.0, 4.0], "b": [1.0, 4.0, 3.0, 4.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_frame_outer_disallowed():\n df = pd.DataFrame({"A": [1, 2]})\n with pytest.raises(NotImplementedError, match=""):\n # deprecation enforced in 2.0\n np.subtract.outer(df, df)\n\n\ndef test_alignment_deprecation_enforced():\n # Enforced in 2.0\n # https://github.com/pandas-dev/pandas/issues/39184\n df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n df2 = pd.DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]})\n s1 = pd.Series([1, 2], index=["a", "b"])\n s2 = pd.Series([1, 2], index=["b", "c"])\n\n # binary dataframe / dataframe\n expected = pd.DataFrame({"a": [2, 4, 6], "b": [8, 10, 12]})\n\n with tm.assert_produces_warning(None):\n # aligned -> no warning!\n result = np.add(df1, df1)\n tm.assert_frame_equal(result, expected)\n\n result = np.add(df1, df2.values)\n tm.assert_frame_equal(result, expected)\n\n result = np.add(df1, df2)\n expected = pd.DataFrame({"a": [np.nan] * 3, "b": [5, 7, 9], "c": [np.nan] * 3})\n tm.assert_frame_equal(result, expected)\n\n result = np.add(df1.values, df2)\n expected = pd.DataFrame({"b": [2, 4, 6], "c": [8, 10, 12]})\n tm.assert_frame_equal(result, expected)\n\n # binary dataframe / series\n expected = pd.DataFrame({"a": [2, 3, 4], "b": [6, 7, 8]})\n\n with tm.assert_produces_warning(None):\n # aligned -> no warning!\n result = np.add(df1, s1)\n tm.assert_frame_equal(result, expected)\n\n result = np.add(df1, s2.values)\n tm.assert_frame_equal(result, expected)\n\n expected = pd.DataFrame(\n {"a": [np.nan] * 3, "b": [5.0, 6.0, 7.0], "c": [np.nan] * 3}\n )\n result = np.add(df1, s2)\n tm.assert_frame_equal(result, expected)\n\n msg = "Cannot apply ufunc <ufunc 'add'> to mixed DataFrame and Series inputs."\n with pytest.raises(NotImplementedError, match=msg):\n np.add(s2, df1)\n\n\ndef test_alignment_deprecation_many_inputs_enforced():\n # Enforced in 2.0\n # https://github.com/pandas-dev/pandas/issues/39184\n # test that the deprecation also works with > 2 inputs -> using a numba\n # written ufunc for this because numpy itself doesn't have such ufuncs\n numba = pytest.importorskip("numba")\n\n @numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])\n def my_ufunc(x, y, z):\n return x + y + z\n\n df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n df2 = pd.DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]})\n df3 = pd.DataFrame({"a": [1, 2, 3], "c": [4, 5, 6]})\n\n result = my_ufunc(df1, df2, df3)\n expected = pd.DataFrame(np.full((3, 3), np.nan), columns=["a", "b", "c"])\n tm.assert_frame_equal(result, expected)\n\n # all aligned -> no warning\n with tm.assert_produces_warning(None):\n result = my_ufunc(df1, df1, df1)\n expected = pd.DataFrame([[3.0, 12.0], [6.0, 15.0], [9.0, 18.0]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n # mixed frame / arrays\n msg = (\n r"operands could not be broadcast together with shapes \(3,3\) \(3,3\) \(3,2\)"\n )\n with pytest.raises(ValueError, match=msg):\n my_ufunc(df1, df2, df3.values)\n\n # single frame -> no warning\n with tm.assert_produces_warning(None):\n result = my_ufunc(df1, df2.values, df3.values)\n tm.assert_frame_equal(result, expected)\n\n # takes indices of first frame\n msg = (\n r"operands could not be broadcast together with shapes \(3,2\) \(3,3\) \(3,3\)"\n )\n with pytest.raises(ValueError, match=msg):\n my_ufunc(df1.values, df2, df3)\n\n\ndef test_array_ufuncs_for_many_arguments():\n # GH39853\n def add3(x, y, z):\n return x + y + z\n\n ufunc = np.frompyfunc(add3, 3, 1)\n df = pd.DataFrame([[1, 2], [3, 4]])\n\n result = ufunc(df, df, 1)\n expected = pd.DataFrame([[3, 5], [7, 9]], dtype=object)\n tm.assert_frame_equal(result, expected)\n\n ser = pd.Series([1, 2])\n msg = (\n "Cannot apply ufunc <ufunc 'add3 (vectorized)'> "\n "to mixed DataFrame and Series inputs."\n )\n with pytest.raises(NotImplementedError, match=re.escape(msg)):\n ufunc(df, df, ser)\n
.venv\Lib\site-packages\pandas\tests\frame\test_ufunc.py
test_ufunc.py
Python
10,554
0.95
0.07074
0.103175
node-utils
992
2025-04-04T18:45:05.315193
GPL-3.0
true
7e8a6ebe99acb56714271f898e90a901
from decimal import Decimal\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_version_gte1p25\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass TestDataFrameUnaryOperators:\n # __pos__, __neg__, __invert__\n\n @pytest.mark.parametrize(\n "df,expected",\n [\n (pd.DataFrame({"a": [-1, 1]}), pd.DataFrame({"a": [1, -1]})),\n (pd.DataFrame({"a": [False, True]}), pd.DataFrame({"a": [True, False]})),\n (\n pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),\n pd.DataFrame({"a": pd.Series(pd.to_timedelta([1, -1]))}),\n ),\n ],\n )\n def test_neg_numeric(self, df, expected):\n tm.assert_frame_equal(-df, expected)\n tm.assert_series_equal(-df["a"], expected["a"])\n\n @pytest.mark.parametrize(\n "df, expected",\n [\n (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),\n ([Decimal("1.0"), Decimal("2.0")], [Decimal("-1.0"), Decimal("-2.0")]),\n ],\n )\n def test_neg_object(self, df, expected):\n # GH#21380\n df = pd.DataFrame({"a": df})\n expected = pd.DataFrame({"a": expected})\n tm.assert_frame_equal(-df, expected)\n tm.assert_series_equal(-df["a"], expected["a"])\n\n @pytest.mark.parametrize(\n "df",\n [\n pd.DataFrame({"a": ["a", "b"]}),\n pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])}),\n ],\n )\n def test_neg_raises(self, df, using_infer_string):\n msg = (\n "bad operand type for unary -: 'str'|"\n r"bad operand type for unary -: 'DatetimeArray'|"\n "unary '-' not supported for dtype"\n )\n with pytest.raises(TypeError, match=msg):\n (-df)\n with pytest.raises(TypeError, match=msg):\n (-df["a"])\n\n def test_invert(self, float_frame):\n df = float_frame\n\n tm.assert_frame_equal(-(df < 0), ~(df < 0))\n\n def test_invert_mixed(self):\n shape = (10, 5)\n df = pd.concat(\n [\n pd.DataFrame(np.zeros(shape, dtype="bool")),\n pd.DataFrame(np.zeros(shape, dtype=int)),\n ],\n axis=1,\n ignore_index=True,\n )\n result = ~df\n expected = pd.concat(\n [\n pd.DataFrame(np.ones(shape, dtype="bool")),\n pd.DataFrame(-np.ones(shape, dtype=int)),\n ],\n axis=1,\n ignore_index=True,\n )\n tm.assert_frame_equal(result, expected)\n\n def test_invert_empty_not_input(self):\n # GH#51032\n df = pd.DataFrame()\n result = ~df\n tm.assert_frame_equal(df, result)\n assert df is not result\n\n @pytest.mark.parametrize(\n "df",\n [\n pd.DataFrame({"a": [-1, 1]}),\n pd.DataFrame({"a": [False, True]}),\n pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),\n ],\n )\n def test_pos_numeric(self, df):\n # GH#16073\n tm.assert_frame_equal(+df, df)\n tm.assert_series_equal(+df["a"], df["a"])\n\n @pytest.mark.parametrize(\n "df",\n [\n pd.DataFrame({"a": np.array([-1, 2], dtype=object)}),\n pd.DataFrame({"a": [Decimal("-1.0"), Decimal("2.0")]}),\n ],\n )\n def test_pos_object(self, df):\n # GH#21380\n tm.assert_frame_equal(+df, df)\n tm.assert_series_equal(+df["a"], df["a"])\n\n @pytest.mark.parametrize(\n "df",\n [\n pytest.param(\n pd.DataFrame({"a": ["a", "b"]}),\n # filterwarnings removable once min numpy version is 1.25\n marks=[\n pytest.mark.filterwarnings("ignore:Applying:DeprecationWarning")\n ],\n ),\n ],\n )\n def test_pos_object_raises(self, df):\n # GH#21380\n if np_version_gte1p25:\n with pytest.raises(\n TypeError, match=r"^bad operand type for unary \+: \'str\'$"\n ):\n tm.assert_frame_equal(+df, df)\n else:\n tm.assert_series_equal(+df["a"], df["a"])\n\n @pytest.mark.parametrize(\n "df", [pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])})]\n )\n def test_pos_raises(self, df):\n msg = r"bad operand type for unary \+: 'DatetimeArray'"\n with pytest.raises(TypeError, match=msg):\n (+df)\n with pytest.raises(TypeError, match=msg):\n (+df["a"])\n\n def test_unary_nullable(self):\n df = pd.DataFrame(\n {\n "a": pd.array([1, -2, 3, pd.NA], dtype="Int64"),\n "b": pd.array([4.0, -5.0, 6.0, pd.NA], dtype="Float32"),\n "c": pd.array([True, False, False, pd.NA], dtype="boolean"),\n # include numpy bool to make sure bool-vs-boolean behavior\n # is consistent in non-NA locations\n "d": np.array([True, False, False, True]),\n }\n )\n\n result = +df\n res_ufunc = np.positive(df)\n expected = df\n # TODO: assert that we have copies?\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(res_ufunc, expected)\n\n result = -df\n res_ufunc = np.negative(df)\n expected = pd.DataFrame(\n {\n "a": pd.array([-1, 2, -3, pd.NA], dtype="Int64"),\n "b": pd.array([-4.0, 5.0, -6.0, pd.NA], dtype="Float32"),\n "c": pd.array([False, True, True, pd.NA], dtype="boolean"),\n "d": np.array([False, True, True, False]),\n }\n )\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(res_ufunc, expected)\n\n result = abs(df)\n res_ufunc = np.abs(df)\n expected = pd.DataFrame(\n {\n "a": pd.array([1, 2, 3, pd.NA], dtype="Int64"),\n "b": pd.array([4.0, 5.0, 6.0, pd.NA], dtype="Float32"),\n "c": pd.array([True, False, False, pd.NA], dtype="boolean"),\n "d": np.array([True, False, False, True]),\n }\n )\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(res_ufunc, expected)\n
.venv\Lib\site-packages\pandas\tests\frame\test_unary.py
test_unary.py
Python
6,287
0.95
0.092308
0.057143
react-lib
882
2024-07-16T04:48:54.942580
BSD-3-Clause
true
156266ba67d05f35e3d04cf14af156d7
import pytest\n\nfrom pandas.core.frame import DataFrame\n\n\n@pytest.fixture\ndef dataframe():\n return DataFrame({"a": [1, 2], "b": [3, 4]})\n\n\nclass TestDataFrameValidate:\n """Tests for error handling related to data types of method arguments."""\n\n @pytest.mark.parametrize(\n "func",\n [\n "query",\n "eval",\n "set_index",\n "reset_index",\n "dropna",\n "drop_duplicates",\n "sort_values",\n ],\n )\n @pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0])\n def test_validate_bool_args(self, dataframe, func, inplace):\n msg = 'For argument "inplace" expected type bool'\n kwargs = {"inplace": inplace}\n\n if func == "query":\n kwargs["expr"] = "a > b"\n elif func == "eval":\n kwargs["expr"] = "a + b"\n elif func == "set_index":\n kwargs["keys"] = ["a"]\n elif func == "sort_values":\n kwargs["by"] = ["a"]\n\n with pytest.raises(ValueError, match=msg):\n getattr(dataframe, func)(**kwargs)\n
.venv\Lib\site-packages\pandas\tests\frame\test_validate.py
test_validate.py
Python
1,094
0.85
0.121951
0
react-lib
909
2024-03-30T04:56:58.974615
MIT
true
08c46b605dd2c97f1ba201cc6996080a
from collections import OrderedDict\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n RangeIndex,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestFromDict:\n # Note: these tests are specific to the from_dict method, not for\n # passing dictionaries to DataFrame.__init__\n\n def test_constructor_list_of_odicts(self):\n data = [\n OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),\n OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),\n OrderedDict([["a", 1.5], ["d", 6]]),\n OrderedDict(),\n OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),\n OrderedDict([["b", 3], ["c", 4], ["d", 6]]),\n ]\n\n result = DataFrame(data)\n expected = DataFrame.from_dict(\n dict(zip(range(len(data)), data)), orient="index"\n )\n tm.assert_frame_equal(result, expected.reindex(result.index))\n\n def test_constructor_single_row(self):\n data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])]\n\n result = DataFrame(data)\n expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex(\n result.index\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.xfail(using_string_dtype(), reason="columns inferring logic broken")\n def test_constructor_list_of_series(self):\n data = [\n OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),\n OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),\n ]\n sdict = OrderedDict(zip(["x", "y"], data))\n idx = Index(["a", "b", "c"])\n\n # all named\n data2 = [\n Series([1.5, 3, 4], idx, dtype="O", name="x"),\n Series([1.5, 3, 6], idx, name="y"),\n ]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient="index")\n tm.assert_frame_equal(result, expected)\n\n # some unnamed\n data2 = [\n Series([1.5, 3, 4], idx, dtype="O", name="x"),\n Series([1.5, 3, 6], idx),\n ]\n result = DataFrame(data2)\n\n sdict = OrderedDict(zip(["x", "Unnamed 0"], data))\n expected = DataFrame.from_dict(sdict, orient="index")\n tm.assert_frame_equal(result, expected)\n\n # none named\n data = [\n OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),\n OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),\n OrderedDict([["a", 1.5], ["d", 6]]),\n OrderedDict(),\n OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),\n OrderedDict([["b", 3], ["c", 4], ["d", 6]]),\n ]\n data = [Series(d) for d in data]\n\n result = DataFrame(data)\n sdict = OrderedDict(zip(range(len(data)), data))\n expected = DataFrame.from_dict(sdict, orient="index")\n tm.assert_frame_equal(result, expected.reindex(result.index))\n\n result2 = DataFrame(data, index=np.arange(6, dtype=np.int64))\n tm.assert_frame_equal(result, result2)\n\n result = DataFrame([Series(dtype=object)])\n expected = DataFrame(index=[0])\n tm.assert_frame_equal(result, expected)\n\n data = [\n OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),\n OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),\n ]\n sdict = OrderedDict(zip(range(len(data)), data))\n\n idx = Index(["a", "b", "c"])\n data2 = [Series([1.5, 3, 4], idx, dtype="O"), Series([1.5, 3, 6], idx)]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient="index")\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_orient(self, float_string_frame):\n data_dict = float_string_frame.T._series\n recons = DataFrame.from_dict(data_dict, orient="index")\n expected = float_string_frame.reindex(index=recons.index)\n tm.assert_frame_equal(recons, expected)\n\n # dict of sequence\n a = {"hi": [32, 3, 3], "there": [3, 5, 3]}\n rs = DataFrame.from_dict(a, orient="index")\n xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))\n tm.assert_frame_equal(rs, xp)\n\n def test_constructor_from_ordered_dict(self):\n # GH#8425\n a = OrderedDict(\n [\n ("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])),\n ("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])),\n ("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])),\n ]\n )\n expected = DataFrame.from_dict(a, orient="columns").T\n result = DataFrame.from_dict(a, orient="index")\n tm.assert_frame_equal(result, expected)\n\n def test_from_dict_columns_parameter(self):\n # GH#18529\n # Test new columns parameter for from_dict that was added to make\n # from_items(..., orient='index', columns=[...]) easier to replicate\n result = DataFrame.from_dict(\n OrderedDict([("A", [1, 2]), ("B", [4, 5])]),\n orient="index",\n columns=["one", "two"],\n )\n expected = DataFrame([[1, 2], [4, 5]], index=["A", "B"], columns=["one", "two"])\n tm.assert_frame_equal(result, expected)\n\n msg = "cannot use columns parameter with orient='columns'"\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_dict(\n {"A": [1, 2], "B": [4, 5]},\n orient="columns",\n columns=["one", "two"],\n )\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_dict({"A": [1, 2], "B": [4, 5]}, columns=["one", "two"])\n\n @pytest.mark.parametrize(\n "data_dict, orient, expected",\n [\n ({}, "index", RangeIndex(0)),\n (\n [{("a",): 1}, {("a",): 2}],\n "columns",\n Index([("a",)], tupleize_cols=False),\n ),\n (\n [OrderedDict([(("a",), 1), (("b",), 2)])],\n "columns",\n Index([("a",), ("b",)], tupleize_cols=False),\n ),\n ([{("a", "b"): 1}], "columns", Index([("a", "b")], tupleize_cols=False)),\n ],\n )\n def test_constructor_from_dict_tuples(self, data_dict, orient, expected):\n # GH#16769\n df = DataFrame.from_dict(data_dict, orient)\n result = df.columns\n tm.assert_index_equal(result, expected)\n\n def test_frame_dict_constructor_empty_series(self):\n s1 = Series(\n [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)])\n )\n s2 = Series(\n [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)])\n )\n s3 = Series(dtype=object)\n\n # it works!\n DataFrame({"foo": s1, "bar": s2, "baz": s3})\n DataFrame.from_dict({"foo": s1, "baz": s3, "bar": s2})\n\n def test_from_dict_scalars_requires_index(self):\n msg = "If using all scalar values, you must pass an index"\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))\n\n def test_from_dict_orient_invalid(self):\n msg = (\n "Expected 'index', 'columns' or 'tight' for orient parameter. "\n "Got 'abc' instead"\n )\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_dict({"foo": 1, "baz": 3, "bar": 2}, orient="abc")\n\n def test_from_dict_order_with_single_column(self):\n data = {\n "alpha": {\n "value2": 123,\n "value1": 532,\n "animal": 222,\n "plant": False,\n "name": "test",\n }\n }\n result = DataFrame.from_dict(\n data,\n orient="columns",\n )\n expected = DataFrame(\n [[123], [532], [222], [False], ["test"]],\n index=["value2", "value1", "animal", "plant", "name"],\n columns=["alpha"],\n )\n tm.assert_frame_equal(result, expected)\n
.venv\Lib\site-packages\pandas\tests\frame\constructors\test_from_dict.py
test_from_dict.py
Python
8,121
0.95
0.070796
0.061224
node-utils
797
2023-11-28T17:28:53.782684
MIT
true
80923df6d72f1dd4d240f0b687875862
from collections.abc import Iterator\nfrom datetime import datetime\nfrom decimal import Decimal\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas.compat import is_platform_little_endian\n\nfrom pandas import (\n CategoricalIndex,\n DataFrame,\n Index,\n Interval,\n RangeIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestFromRecords:\n def test_from_records_dt64tz_frame(self):\n # GH#51162 don't lose tz when calling from_records with DataFrame input\n dti = date_range("2016-01-01", periods=10, tz="US/Pacific")\n df = DataFrame({i: dti for i in range(4)})\n with tm.assert_produces_warning(FutureWarning):\n res = DataFrame.from_records(df)\n tm.assert_frame_equal(res, df)\n\n def test_from_records_with_datetimes(self):\n # this may fail on certain platforms because of a numpy issue\n # related GH#6140\n if not is_platform_little_endian():\n pytest.skip("known failure of test on non-little endian")\n\n # construction with a null in a recarray\n # GH#6140\n expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})\n\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [("EXPIRY", "<M8[ns]")]\n\n recarray = np.rec.fromarrays(arrdata, dtype=dtypes)\n\n result = DataFrame.from_records(recarray)\n tm.assert_frame_equal(result, expected)\n\n # coercion should work too\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [("EXPIRY", "<M8[m]")]\n recarray = np.rec.fromarrays(arrdata, dtype=dtypes)\n result = DataFrame.from_records(recarray)\n # we get the closest supported unit, "s"\n expected["EXPIRY"] = expected["EXPIRY"].astype("M8[s]")\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.xfail(using_string_dtype(), reason="dtype checking logic doesn't work")\n def test_from_records_sequencelike(self):\n df = DataFrame(\n {\n "A": np.array(\n np.random.default_rng(2).standard_normal(6), dtype=np.float64\n ),\n "A1": np.array(\n np.random.default_rng(2).standard_normal(6), dtype=np.float64\n ),\n "B": np.array(np.arange(6), dtype=np.int64),\n "C": ["foo"] * 6,\n "D": np.array([True, False] * 3, dtype=bool),\n "E": np.array(\n np.random.default_rng(2).standard_normal(6), dtype=np.float32\n ),\n "E1": np.array(\n np.random.default_rng(2).standard_normal(6), dtype=np.float32\n ),\n "F": np.array(np.arange(6), dtype=np.int32),\n }\n )\n\n # this is actually tricky to create the recordlike arrays and\n # have the dtypes be intact\n blocks = df._to_dict_of_blocks()\n tuples = []\n columns = []\n dtypes = []\n for dtype, b in blocks.items():\n columns.extend(b.columns)\n dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])\n for i in range(len(df.index)):\n tup = []\n for _, b in blocks.items():\n tup.extend(b.iloc[i].values)\n tuples.append(tuple(tup))\n\n recarray = np.array(tuples, dtype=dtypes).view(np.rec.recarray)\n recarray2 = df.to_records()\n lists = [list(x) for x in tuples]\n\n # tuples (lose the dtype info)\n result = DataFrame.from_records(tuples, columns=columns).reindex(\n columns=df.columns\n )\n\n # created recarray and with to_records recarray (have dtype info)\n result2 = DataFrame.from_records(recarray, columns=columns).reindex(\n columns=df.columns\n )\n result3 = DataFrame.from_records(recarray2, columns=columns).reindex(\n columns=df.columns\n )\n\n # list of tuples (no dtype info)\n result4 = DataFrame.from_records(lists, columns=columns).reindex(\n columns=df.columns\n )\n\n tm.assert_frame_equal(result, df, check_dtype=False)\n tm.assert_frame_equal(result2, df)\n tm.assert_frame_equal(result3, df)\n tm.assert_frame_equal(result4, df, check_dtype=False)\n\n # tuples is in the order of the columns\n result = DataFrame.from_records(tuples)\n tm.assert_index_equal(result.columns, RangeIndex(8))\n\n # test exclude parameter & we are casting the results here (as we don't\n # have dtype info to recover)\n columns_to_test = [columns.index("C"), columns.index("E1")]\n\n exclude = list(set(range(8)) - set(columns_to_test))\n result = DataFrame.from_records(tuples, exclude=exclude)\n result.columns = [columns[i] for i in sorted(columns_to_test)]\n tm.assert_series_equal(result["C"], df["C"])\n tm.assert_series_equal(result["E1"], df["E1"])\n\n def test_from_records_sequencelike_empty(self):\n # empty case\n result = DataFrame.from_records([], columns=["foo", "bar", "baz"])\n assert len(result) == 0\n tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))\n\n result = DataFrame.from_records([])\n assert len(result) == 0\n assert len(result.columns) == 0\n\n def test_from_records_dictlike(self):\n # test the dict methods\n df = DataFrame(\n {\n "A": np.array(\n np.random.default_rng(2).standard_normal(6), dtype=np.float64\n ),\n "A1": np.array(\n np.random.default_rng(2).standard_normal(6), dtype=np.float64\n ),\n "B": np.array(np.arange(6), dtype=np.int64),\n "C": ["foo"] * 6,\n "D": np.array([True, False] * 3, dtype=bool),\n "E": np.array(\n np.random.default_rng(2).standard_normal(6), dtype=np.float32\n ),\n "E1": np.array(\n np.random.default_rng(2).standard_normal(6), dtype=np.float32\n ),\n "F": np.array(np.arange(6), dtype=np.int32),\n }\n )\n\n # columns is in a different order here than the actual items iterated\n # from the dict\n blocks = df._to_dict_of_blocks()\n columns = []\n for b in blocks.values():\n columns.extend(b.columns)\n\n asdict = dict(df.items())\n asdict2 = {x: y.values for x, y in df.items()}\n\n # dict of series & dict of ndarrays (have dtype info)\n results = []\n results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))\n results.append(\n DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)\n )\n results.append(\n DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)\n )\n\n for r in results:\n tm.assert_frame_equal(r, df)\n\n def test_from_records_with_index_data(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 3)), columns=["A", "B", "C"]\n )\n\n data = np.random.default_rng(2).standard_normal(10)\n with tm.assert_produces_warning(FutureWarning):\n df1 = DataFrame.from_records(df, index=data)\n tm.assert_index_equal(df1.index, Index(data))\n\n def test_from_records_bad_index_column(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 3)), columns=["A", "B", "C"]\n )\n\n # should pass\n with tm.assert_produces_warning(FutureWarning):\n df1 = DataFrame.from_records(df, index=["C"])\n tm.assert_index_equal(df1.index, Index(df.C))\n\n with tm.assert_produces_warning(FutureWarning):\n df1 = DataFrame.from_records(df, index="C")\n tm.assert_index_equal(df1.index, Index(df.C))\n\n # should fail\n msg = "|".join(\n [\n r"'None of \[2\] are in the columns'",\n ]\n )\n with pytest.raises(KeyError, match=msg):\n with tm.assert_produces_warning(FutureWarning):\n DataFrame.from_records(df, index=[2])\n with pytest.raises(KeyError, match=msg):\n with tm.assert_produces_warning(FutureWarning):\n DataFrame.from_records(df, index=2)\n\n def test_from_records_non_tuple(self):\n class Record:\n def __init__(self, *args) -> None:\n self.args = args\n\n def __getitem__(self, i):\n return self.args[i]\n\n def __iter__(self) -> Iterator:\n return iter(self.args)\n\n recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]\n tups = [tuple(rec) for rec in recs]\n\n result = DataFrame.from_records(recs)\n expected = DataFrame.from_records(tups)\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_len0_with_columns(self):\n # GH#2633\n result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])\n expected = Index(["bar"])\n\n assert len(result) == 0\n assert result.index.name == "foo"\n tm.assert_index_equal(result.columns, expected)\n\n def test_from_records_series_list_dict(self):\n # GH#27358\n expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T\n data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])\n result = DataFrame.from_records(data)\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_series_categorical_index(self):\n # GH#32805\n index = CategoricalIndex(\n [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]\n )\n series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)\n frame = DataFrame.from_records(series_of_dicts, index=index)\n expected = DataFrame(\n {"a": [1, 2, np.nan], "b": [np.nan, np.nan, 3]}, index=index\n )\n tm.assert_frame_equal(frame, expected)\n\n def test_frame_from_records_utc(self):\n rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}\n\n # it works\n DataFrame.from_records([rec], index="begin_time")\n\n def test_from_records_to_records(self):\n # from numpy documentation\n arr = np.zeros((2,), dtype=("i4,f4,S10"))\n arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]\n\n DataFrame.from_records(arr)\n\n index = Index(np.arange(len(arr))[::-1])\n indexed_frame = DataFrame.from_records(arr, index=index)\n tm.assert_index_equal(indexed_frame.index, index)\n\n # without names, it should go to last ditch\n arr2 = np.zeros((2, 3))\n tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))\n\n # wrong length\n msg = "|".join(\n [\n r"Length of values \(2\) does not match length of index \(1\)",\n ]\n )\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_records(arr, index=index[:-1])\n\n indexed_frame = DataFrame.from_records(arr, index="f1")\n\n # what to do?\n records = indexed_frame.to_records()\n assert len(records.dtype.names) == 3\n\n records = indexed_frame.to_records(index=False)\n assert len(records.dtype.names) == 2\n assert "index" not in records.dtype.names\n\n def test_from_records_nones(self):\n tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]\n\n df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"])\n assert np.isnan(df["c"][0])\n\n def test_from_records_iterator(self):\n arr = np.array(\n [(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],\n dtype=[\n ("x", np.float64),\n ("u", np.float32),\n ("y", np.int64),\n ("z", np.int32),\n ],\n )\n df = DataFrame.from_records(iter(arr), nrows=2)\n xp = DataFrame(\n {\n "x": np.array([1.0, 3.0], dtype=np.float64),\n "u": np.array([1.0, 3.0], dtype=np.float32),\n "y": np.array([2, 4], dtype=np.int64),\n "z": np.array([2, 4], dtype=np.int32),\n }\n )\n tm.assert_frame_equal(df.reindex_like(xp), xp)\n\n # no dtypes specified here, so just compare with the default\n arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]\n df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2)\n tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False)\n\n def test_from_records_tuples_generator(self):\n def tuple_generator(length):\n for i in range(length):\n letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"\n yield (i, letters[i % len(letters)], i / length)\n\n columns_names = ["Integer", "String", "Float"]\n columns = [\n [i[j] for i in tuple_generator(10)] for j in range(len(columns_names))\n ]\n data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = tuple_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_lists_generator(self):\n def list_generator(length):\n for i in range(length):\n letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"\n yield [i, letters[i % len(letters)], i / length]\n\n columns_names = ["Integer", "String", "Float"]\n columns = [\n [i[j] for i in list_generator(10)] for j in range(len(columns_names))\n ]\n data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = list_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_columns_not_modified(self):\n tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)]\n\n columns = ["a", "b", "c"]\n original_columns = list(columns)\n\n DataFrame.from_records(tuples, columns=columns, index="a")\n\n assert columns == original_columns\n\n def test_from_records_decimal(self):\n tuples = [(Decimal("1.5"),), (Decimal("2.5"),), (None,)]\n\n df = DataFrame.from_records(tuples, columns=["a"])\n assert df["a"].dtype == object\n\n df = DataFrame.from_records(tuples, columns=["a"], coerce_float=True)\n assert df["a"].dtype == np.float64\n assert np.isnan(df["a"].values[-1])\n\n def test_from_records_duplicates(self):\n result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])\n\n expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])\n\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_set_index_name(self):\n def create_dict(order_id):\n return {\n "order_id": order_id,\n "quantity": np.random.default_rng(2).integers(1, 10),\n "price": np.random.default_rng(2).integers(1, 10),\n }\n\n documents = [create_dict(i) for i in range(10)]\n # demo missing data\n documents.append({"order_id": 10, "quantity": 5})\n\n result = DataFrame.from_records(documents, index="order_id")\n assert result.index.name == "order_id"\n\n # MultiIndex\n result = DataFrame.from_records(documents, index=["order_id", "quantity"])\n assert result.index.names == ("order_id", "quantity")\n\n def test_from_records_misc_brokenness(self):\n # GH#2179\n\n data = {1: ["foo"], 2: ["bar"]}\n\n result = DataFrame.from_records(data, columns=["a", "b"])\n exp = DataFrame(data, columns=["a", "b"])\n tm.assert_frame_equal(result, exp)\n\n # overlap in index/index_names\n\n data = {"a": [1, 2, 3], "b": [4, 5, 6]}\n\n result = DataFrame.from_records(data, index=["a", "b", "c"])\n exp = DataFrame(data, index=["a", "b", "c"])\n tm.assert_frame_equal(result, exp)\n\n def test_from_records_misc_brokenness2(self):\n # GH#2623\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), "hi"]) # test col upconverts to obj\n result = DataFrame.from_records(rows, columns=["date", "test"])\n expected = DataFrame(\n {"date": [row[0] for row in rows], "test": [row[1] for row in rows]}\n )\n tm.assert_frame_equal(result, expected)\n assert result.dtypes["test"] == np.dtype(object)\n\n def test_from_records_misc_brokenness3(self):\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), 1])\n result = DataFrame.from_records(rows, columns=["date", "test"])\n expected = DataFrame(\n {"date": [row[0] for row in rows], "test": [row[1] for row in rows]}\n )\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_empty(self):\n # GH#3562\n result = DataFrame.from_records([], columns=["a", "b", "c"])\n expected = DataFrame(columns=["a", "b", "c"])\n tm.assert_frame_equal(result, expected)\n\n result = DataFrame.from_records([], columns=["a", "b", "b"])\n expected = DataFrame(columns=["a", "b", "b"])\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_empty_with_nonempty_fields_gh3682(self):\n a = np.array([(1, 2)], dtype=[("id", np.int64), ("value", np.int64)])\n df = DataFrame.from_records(a, index="id")\n\n ex_index = Index([1], name="id")\n expected = DataFrame({"value": [2]}, index=ex_index, columns=["value"])\n tm.assert_frame_equal(df, expected)\n\n b = a[:0]\n df2 = DataFrame.from_records(b, index="id")\n tm.assert_frame_equal(df2, df.iloc[:0])\n\n def test_from_records_empty2(self):\n # GH#42456\n dtype = [("prop", int)]\n shape = (0, len(dtype))\n arr = np.empty(shape, dtype=dtype)\n\n result = DataFrame.from_records(arr)\n expected = DataFrame({"prop": np.array([], dtype=int)})\n tm.assert_frame_equal(result, expected)\n\n alt = DataFrame(arr)\n tm.assert_frame_equal(alt, expected)\n
.venv\Lib\site-packages\pandas\tests\frame\constructors\test_from_records.py
test_from_records.py
Python
18,570
0.95
0.115308
0.09291
python-kit
836
2023-11-03T13:01:35.033386
MIT
true
724304a64c3f0cc0efb7fd958eb46b38
\n\n
.venv\Lib\site-packages\pandas\tests\frame\constructors\__pycache__\test_from_dict.cpython-313.pyc
test_from_dict.cpython-313.pyc
Other
11,493
0.95
0.007634
0.007937
vue-tools
725
2023-11-18T12:10:51.232898
BSD-3-Clause
true
7cbf298dfbdae51e5d5cf6bf550f730c
\n\n
.venv\Lib\site-packages\pandas\tests\frame\constructors\__pycache__\test_from_records.cpython-313.pyc
test_from_records.cpython-313.pyc
Other
31,006
0.8
0
0.044776
vue-tools
382
2023-12-30T08:14:15.370526
MIT
true
a8bbec799e130bade1a2a0323d2f272a
\n\n
.venv\Lib\site-packages\pandas\tests\frame\constructors\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
206
0.7
0
0
python-kit
175
2024-06-19T02:02:32.645743
Apache-2.0
true
231cdce7e18ff016d63c65a031340471
"""\nTests for values coercion in setitem-like operations on DataFrame.\n\nFor the most part, these should be multi-column DataFrames, otherwise\nwe would share the tests with Series.\n"""\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n MultiIndex,\n NaT,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDataFrameSetitemCoercion:\n @pytest.mark.parametrize("consolidate", [True, False])\n def test_loc_setitem_multiindex_columns(self, consolidate):\n # GH#18415 Setting values in a single column preserves dtype,\n # while setting them in multiple columns did unwanted cast.\n\n # Note that A here has 2 blocks, below we do the same thing\n # with a consolidated frame.\n A = DataFrame(np.zeros((6, 5), dtype=np.float32))\n A = pd.concat([A, A], axis=1, keys=[1, 2])\n if consolidate:\n A = A._consolidate()\n\n A.loc[2:3, (1, slice(2, 3))] = np.ones((2, 2), dtype=np.float32)\n assert (A.dtypes == np.float32).all()\n\n A.loc[0:5, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32)\n\n assert (A.dtypes == np.float32).all()\n\n A.loc[:, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32)\n assert (A.dtypes == np.float32).all()\n\n # TODO: i think this isn't about MultiIndex and could be done with iloc?\n\n\ndef test_37477():\n # fixed by GH#45121\n orig = DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]})\n expected = DataFrame({"A": [1, 2, 3], "B": [3, 1.2, 5]})\n\n df = orig.copy()\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n df.at[1, "B"] = 1.2\n tm.assert_frame_equal(df, expected)\n\n df = orig.copy()\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n df.loc[1, "B"] = 1.2\n tm.assert_frame_equal(df, expected)\n\n df = orig.copy()\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n df.iat[1, 1] = 1.2\n tm.assert_frame_equal(df, expected)\n\n df = orig.copy()\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n df.iloc[1, 1] = 1.2\n tm.assert_frame_equal(df, expected)\n\n\ndef test_6942(indexer_al):\n # check that the .at __setitem__ after setting "Live" actually sets the data\n start = Timestamp("2014-04-01")\n t1 = Timestamp("2014-04-23 12:42:38.883082")\n t2 = Timestamp("2014-04-24 01:33:30.040039")\n\n dti = date_range(start, periods=1)\n orig = DataFrame(index=dti, columns=["timenow", "Live"])\n\n df = orig.copy()\n indexer_al(df)[start, "timenow"] = t1\n\n df["Live"] = True\n\n df.at[start, "timenow"] = t2\n assert df.iloc[0, 0] == t2\n\n\ndef test_26395(indexer_al):\n # .at case fixed by GH#45121 (best guess)\n df = DataFrame(index=["A", "B", "C"])\n df["D"] = 0\n\n indexer_al(df)["C", "D"] = 2\n expected = DataFrame(\n {"D": [0, 0, 2]},\n index=["A", "B", "C"],\n columns=pd.Index(["D"], dtype=object),\n dtype=np.int64,\n )\n tm.assert_frame_equal(df, expected)\n\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n indexer_al(df)["C", "D"] = 44.5\n expected = DataFrame(\n {"D": [0, 0, 44.5]},\n index=["A", "B", "C"],\n columns=pd.Index(["D"], dtype=object),\n dtype=np.float64,\n )\n tm.assert_frame_equal(df, expected)\n\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n indexer_al(df)["C", "D"] = "hello"\n expected = DataFrame(\n {"D": [0, 0, "hello"]},\n index=["A", "B", "C"],\n columns=pd.Index(["D"], dtype=object),\n dtype=object,\n )\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.xfail(reason="unwanted upcast")\ndef test_15231():\n df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])\n df.loc[2] = Series({"a": 5, "b": 6})\n assert (df.dtypes == np.int64).all()\n\n df.loc[3] = Series({"a": 7})\n\n # df["a"] doesn't have any NaNs, should not have been cast\n exp_dtypes = Series([np.int64, np.float64], dtype=object, index=["a", "b"])\n tm.assert_series_equal(df.dtypes, exp_dtypes)\n\n\ndef test_iloc_setitem_unnecesssary_float_upcasting():\n # GH#12255\n df = DataFrame(\n {\n 0: np.array([1, 3], dtype=np.float32),\n 1: np.array([2, 4], dtype=np.float32),\n 2: ["a", "b"],\n }\n )\n orig = df.copy()\n\n values = df[0].values.reshape(2, 1)\n df.iloc[:, 0:1] = values\n\n tm.assert_frame_equal(df, orig)\n\n\n@pytest.mark.xfail(reason="unwanted casting to dt64")\ndef test_12499():\n # TODO: OP in GH#12499 used np.datetim64("NaT") instead of pd.NaT,\n # which has consequences for the expected df["two"] (though i think at\n # the time it might not have because of a separate bug). See if it makes\n # a difference which one we use here.\n ts = Timestamp("2016-03-01 03:13:22.98986", tz="UTC")\n\n data = [{"one": 0, "two": ts}]\n orig = DataFrame(data)\n df = orig.copy()\n df.loc[1] = [np.nan, NaT]\n\n expected = DataFrame(\n {"one": [0, np.nan], "two": Series([ts, NaT], dtype="datetime64[ns, UTC]")}\n )\n tm.assert_frame_equal(df, expected)\n\n data = [{"one": 0, "two": ts}]\n df = orig.copy()\n df.loc[1, :] = [np.nan, NaT]\n tm.assert_frame_equal(df, expected)\n\n\ndef test_20476():\n mi = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]])\n df = DataFrame(-1, index=range(3), columns=mi)\n filler = DataFrame([[1, 2, 3.0]] * 3, index=range(3), columns=["a", "b", "c"])\n df["A"] = filler\n\n expected = DataFrame(\n {\n 0: [1, 1, 1],\n 1: [2, 2, 2],\n 2: [3.0, 3.0, 3.0],\n 3: [-1, -1, -1],\n 4: [-1, -1, -1],\n 5: [-1, -1, -1],\n }\n )\n expected.columns = mi\n exp_dtypes = Series(\n [np.dtype(np.int64)] * 2 + [np.dtype(np.float64)] + [np.dtype(np.int64)] * 3,\n index=mi,\n )\n tm.assert_series_equal(df.dtypes, exp_dtypes)\n
.venv\Lib\site-packages\pandas\tests\frame\indexing\test_coercion.py
test_coercion.py
Python
6,225
0.95
0.065421
0.081871
vue-tools
727
2023-11-22T18:41:51.511452
GPL-3.0
true
3a19c665559c439f578ebd7d4031ea28
import re\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n MultiIndex,\n)\n\n\nclass TestDataFrameDelItem:\n def test_delitem(self, float_frame):\n del float_frame["A"]\n assert "A" not in float_frame\n\n def test_delitem_multiindex(self):\n midx = MultiIndex.from_product([["A", "B"], [1, 2]])\n df = DataFrame(np.random.default_rng(2).standard_normal((4, 4)), columns=midx)\n assert len(df.columns) == 4\n assert ("A",) in df.columns\n assert "A" in df.columns\n\n result = df["A"]\n assert isinstance(result, DataFrame)\n del df["A"]\n\n assert len(df.columns) == 2\n\n # A still in the levels, BUT get a KeyError if trying\n # to delete\n assert ("A",) not in df.columns\n with pytest.raises(KeyError, match=re.escape("('A',)")):\n del df[("A",)]\n\n # behavior of dropped/deleted MultiIndex levels changed from\n # GH 2770 to GH 19027: MultiIndex no longer '.__contains__'\n # levels which are dropped/deleted\n assert "A" not in df.columns\n with pytest.raises(KeyError, match=re.escape("('A',)")):\n del df["A"]\n\n def test_delitem_corner(self, float_frame):\n f = float_frame.copy()\n del f["D"]\n assert len(f.columns) == 3\n with pytest.raises(KeyError, match=r"^'D'$"):\n del f["D"]\n del f["B"]\n assert len(f.columns) == 2\n\n def test_delitem_col_still_multiindex(self):\n arrays = [["a", "b", "c", "top"], ["", "", "", "OD"], ["", "", "", "wx"]]\n\n tuples = sorted(zip(*arrays))\n index = MultiIndex.from_tuples(tuples)\n\n df = DataFrame(np.random.default_rng(2).standard_normal((3, 4)), columns=index)\n del df[("a", "", "")]\n assert isinstance(df.columns, MultiIndex)\n
.venv\Lib\site-packages\pandas\tests\frame\indexing\test_delitem.py
test_delitem.py
Python
1,832
0.95
0.1
0.106383
vue-tools
344
2024-09-26T18:38:53.056029
GPL-3.0
true
910e584b9ea00245276b93fa3afde001
import pytest\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\n\nclass TestGet:\n def test_get(self, float_frame):\n b = float_frame.get("B")\n tm.assert_series_equal(b, float_frame["B"])\n\n assert float_frame.get("foo") is None\n tm.assert_series_equal(\n float_frame.get("foo", float_frame["B"]), float_frame["B"]\n )\n\n @pytest.mark.parametrize(\n "df",\n [\n DataFrame(),\n DataFrame(columns=list("AB")),\n DataFrame(columns=list("AB"), index=range(3)),\n ],\n )\n def test_get_none(self, df):\n # see gh-5652\n assert df.get(None) is None\n
.venv\Lib\site-packages\pandas\tests\frame\indexing\test_get.py
test_get.py
Python
662
0.95
0.111111
0.045455
python-kit
664
2023-11-26T08:03:06.386565
BSD-3-Clause
true
ddf2ef6e64e923003d07fc16db9615f4
import re\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n CategoricalDtype,\n CategoricalIndex,\n DataFrame,\n DateOffset,\n DatetimeIndex,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n concat,\n date_range,\n get_dummies,\n period_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import SparseArray\n\n\nclass TestGetitem:\n def test_getitem_unused_level_raises(self):\n # GH#20410\n mi = MultiIndex(\n levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],\n codes=[[1, 0], [1, 0]],\n )\n df = DataFrame(-1, index=range(3), columns=mi)\n\n with pytest.raises(KeyError, match="notevenone"):\n df["notevenone"]\n\n def test_getitem_periodindex(self):\n rng = period_range("1/1/2000", periods=5)\n df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)), columns=rng)\n\n ts = df[rng[0]]\n tm.assert_series_equal(ts, df.iloc[:, 0])\n\n ts = df["1/1/2000"]\n tm.assert_series_equal(ts, df.iloc[:, 0])\n\n def test_getitem_list_of_labels_categoricalindex_cols(self):\n # GH#16115\n cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])\n\n expected = DataFrame([[1, 0], [0, 1]], dtype="bool", index=[0, 1], columns=cats)\n dummies = get_dummies(cats)\n result = dummies[list(dummies.columns)]\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_sparse_column_return_type_and_dtype(self):\n # https://github.com/pandas-dev/pandas/issues/23559\n data = SparseArray([0, 1])\n df = DataFrame({"A": data})\n expected = Series(data, name="A")\n result = df["A"]\n tm.assert_series_equal(result, expected)\n\n # Also check iloc and loc while we're here\n result = df.iloc[:, 0]\n tm.assert_series_equal(result, expected)\n\n result = df.loc[:, "A"]\n tm.assert_series_equal(result, expected)\n\n def test_getitem_string_columns(self):\n # GH#46185\n df = DataFrame([[1, 2]], columns=Index(["A", "B"], dtype="string"))\n result = df.A\n expected = df["A"]\n tm.assert_series_equal(result, expected)\n\n\nclass TestGetitemListLike:\n def test_getitem_list_missing_key(self):\n # GH#13822, incorrect error string with non-unique columns when missing\n # column is accessed\n df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})\n df.columns = ["x", "x", "z"]\n\n # Check that we get the correct value in the KeyError\n with pytest.raises(KeyError, match=r"\['y'\] not in index"):\n df[["x", "y", "z"]]\n\n def test_getitem_list_duplicates(self):\n # GH#1943\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 4)), columns=list("AABC")\n )\n df.columns.name = "foo"\n\n result = df[["B", "C"]]\n assert result.columns.name == "foo"\n\n expected = df.iloc[:, 2:]\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_dupe_cols(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])\n msg = "\"None of [Index(['baf'], dtype="\n with pytest.raises(KeyError, match=re.escape(msg)):\n df[["baf"]]\n\n @pytest.mark.parametrize(\n "idx_type",\n [\n list,\n iter,\n Index,\n set,\n lambda keys: dict(zip(keys, range(len(keys)))),\n lambda keys: dict(zip(keys, range(len(keys)))).keys(),\n ],\n ids=["list", "iter", "Index", "set", "dict", "dict_keys"],\n )\n @pytest.mark.parametrize("levels", [1, 2])\n def test_getitem_listlike(self, idx_type, levels, float_frame):\n # GH#21294\n\n if levels == 1:\n frame, missing = float_frame, "food"\n else:\n # MultiIndex columns\n frame = DataFrame(\n np.random.default_rng(2).standard_normal((8, 3)),\n columns=Index(\n [("foo", "bar"), ("baz", "qux"), ("peek", "aboo")],\n name=("sth", "sth2"),\n ),\n )\n missing = ("good", "food")\n\n keys = [frame.columns[1], frame.columns[0]]\n idx = idx_type(keys)\n idx_check = list(idx_type(keys))\n\n if isinstance(idx, (set, dict)):\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n frame[idx]\n\n return\n else:\n result = frame[idx]\n\n expected = frame.loc[:, idx_check]\n expected.columns.names = frame.columns.names\n\n tm.assert_frame_equal(result, expected)\n\n idx = idx_type(keys + [missing])\n with pytest.raises(KeyError, match="not in index"):\n frame[idx]\n\n def test_getitem_iloc_generator(self):\n # GH#39614\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n indexer = (x for x in [1, 2])\n result = df.iloc[indexer]\n expected = DataFrame({"a": [2, 3], "b": [5, 6]}, index=[1, 2])\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_iloc_two_dimensional_generator(self):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n indexer = (x for x in [1, 2])\n result = df.iloc[indexer, 1]\n expected = Series([5, 6], name="b", index=[1, 2])\n tm.assert_series_equal(result, expected)\n\n def test_getitem_iloc_dateoffset_days(self):\n # GH 46671\n df = DataFrame(\n list(range(10)),\n index=date_range("01-01-2022", periods=10, freq=DateOffset(days=1)),\n )\n result = df.loc["2022-01-01":"2022-01-03"]\n expected = DataFrame(\n [0, 1, 2],\n index=DatetimeIndex(\n ["2022-01-01", "2022-01-02", "2022-01-03"],\n dtype="datetime64[ns]",\n freq=DateOffset(days=1),\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame(\n list(range(10)),\n index=date_range(\n "01-01-2022", periods=10, freq=DateOffset(days=1, hours=2)\n ),\n )\n result = df.loc["2022-01-01":"2022-01-03"]\n expected = DataFrame(\n [0, 1, 2],\n index=DatetimeIndex(\n ["2022-01-01 00:00:00", "2022-01-02 02:00:00", "2022-01-03 04:00:00"],\n dtype="datetime64[ns]",\n freq=DateOffset(days=1, hours=2),\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame(\n list(range(10)),\n index=date_range("01-01-2022", periods=10, freq=DateOffset(minutes=3)),\n )\n result = df.loc["2022-01-01":"2022-01-03"]\n tm.assert_frame_equal(result, df)\n\n\nclass TestGetitemCallable:\n def test_getitem_callable(self, float_frame):\n # GH#12533\n result = float_frame[lambda x: "A"]\n expected = float_frame.loc[:, "A"]\n tm.assert_series_equal(result, expected)\n\n result = float_frame[lambda x: ["A", "B"]]\n expected = float_frame.loc[:, ["A", "B"]]\n tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])\n\n df = float_frame[:3]\n result = df[lambda x: [True, False, True]]\n expected = float_frame.iloc[[0, 2], :]\n tm.assert_frame_equal(result, expected)\n\n def test_loc_multiindex_columns_one_level(self):\n # GH#29749\n df = DataFrame([[1, 2]], columns=[["a", "b"]])\n expected = DataFrame([1], columns=[["a"]])\n\n result = df["a"]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[:, "a"]\n tm.assert_frame_equal(result, expected)\n\n\nclass TestGetitemBooleanMask:\n def test_getitem_bool_mask_categorical_index(self):\n df3 = DataFrame(\n {\n "A": np.arange(6, dtype="int64"),\n },\n index=CategoricalIndex(\n [1, 1, 2, 1, 3, 2],\n dtype=CategoricalDtype([3, 2, 1], ordered=True),\n name="B",\n ),\n )\n df4 = DataFrame(\n {\n "A": np.arange(6, dtype="int64"),\n },\n index=CategoricalIndex(\n [1, 1, 2, 1, 3, 2],\n dtype=CategoricalDtype([3, 2, 1], ordered=False),\n name="B",\n ),\n )\n\n result = df3[df3.index == "a"]\n expected = df3.iloc[[]]\n tm.assert_frame_equal(result, expected)\n\n result = df4[df4.index == "a"]\n expected = df4.iloc[[]]\n tm.assert_frame_equal(result, expected)\n\n result = df3[df3.index == 1]\n expected = df3.iloc[[0, 1, 3]]\n tm.assert_frame_equal(result, expected)\n\n result = df4[df4.index == 1]\n expected = df4.iloc[[0, 1, 3]]\n tm.assert_frame_equal(result, expected)\n\n # since we have an ordered categorical\n\n # CategoricalIndex([1, 1, 2, 1, 3, 2],\n # categories=[3, 2, 1],\n # ordered=True,\n # name='B')\n result = df3[df3.index < 2]\n expected = df3.iloc[[4]]\n tm.assert_frame_equal(result, expected)\n\n result = df3[df3.index > 1]\n expected = df3.iloc[[]]\n tm.assert_frame_equal(result, expected)\n\n # unordered\n # cannot be compared\n\n # CategoricalIndex([1, 1, 2, 1, 3, 2],\n # categories=[3, 2, 1],\n # ordered=False,\n # name='B')\n msg = "Unordered Categoricals can only compare equality or not"\n with pytest.raises(TypeError, match=msg):\n df4[df4.index < 2]\n with pytest.raises(TypeError, match=msg):\n df4[df4.index > 1]\n\n @pytest.mark.parametrize(\n "data1,data2,expected_data",\n (\n (\n [[1, 2], [3, 4]],\n [[0.5, 6], [7, 8]],\n [[np.nan, 3.0], [np.nan, 4.0], [np.nan, 7.0], [6.0, 8.0]],\n ),\n (\n [[1, 2], [3, 4]],\n [[5, 6], [7, 8]],\n [[np.nan, 3.0], [np.nan, 4.0], [5, 7], [6, 8]],\n ),\n ),\n )\n def test_getitem_bool_mask_duplicate_columns_mixed_dtypes(\n self,\n data1,\n data2,\n expected_data,\n ):\n # GH#31954\n\n df1 = DataFrame(np.array(data1))\n df2 = DataFrame(np.array(data2))\n df = concat([df1, df2], axis=1)\n\n result = df[df > 2]\n\n exdict = {i: np.array(col) for i, col in enumerate(expected_data)}\n expected = DataFrame(exdict).rename(columns={2: 0, 3: 1})\n tm.assert_frame_equal(result, expected)\n\n @pytest.fixture\n def df_dup_cols(self):\n dups = ["A", "A", "C", "D"]\n df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")\n return df\n\n def test_getitem_boolean_frame_unaligned_with_duplicate_columns(self, df_dup_cols):\n # `df.A > 6` is a DataFrame with a different shape from df\n\n # boolean with the duplicate raises\n df = df_dup_cols\n msg = "cannot reindex on an axis with duplicate labels"\n with pytest.raises(ValueError, match=msg):\n df[df.A > 6]\n\n def test_getitem_boolean_series_with_duplicate_columns(self, df_dup_cols):\n # boolean indexing\n # GH#4879\n df = DataFrame(\n np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"\n )\n expected = df[df.C > 6]\n expected.columns = df_dup_cols.columns\n\n df = df_dup_cols\n result = df[df.C > 6]\n\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_boolean_frame_with_duplicate_columns(self, df_dup_cols):\n # where\n df = DataFrame(\n np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"\n )\n # `df > 6` is a DataFrame with the same shape+alignment as df\n expected = df[df > 6]\n expected.columns = df_dup_cols.columns\n\n df = df_dup_cols\n result = df[df > 6]\n\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_empty_frame_with_boolean(self):\n # Test for issue GH#11859\n\n df = DataFrame()\n df2 = df[df > 0]\n tm.assert_frame_equal(df, df2)\n\n def test_getitem_returns_view_when_column_is_unique_in_df(\n self, using_copy_on_write, warn_copy_on_write\n ):\n # GH#45316\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])\n df_orig = df.copy()\n view = df["b"]\n with tm.assert_cow_warning(warn_copy_on_write):\n view.loc[:] = 100\n if using_copy_on_write:\n expected = df_orig\n else:\n expected = DataFrame([[1, 2, 100], [4, 5, 100]], columns=["a", "a", "b"])\n tm.assert_frame_equal(df, expected)\n\n def test_getitem_frozenset_unique_in_column(self):\n # GH#41062\n df = DataFrame([[1, 2, 3, 4]], columns=[frozenset(["KEY"]), "B", "C", "C"])\n result = df[frozenset(["KEY"])]\n expected = Series([1], name=frozenset(["KEY"]))\n tm.assert_series_equal(result, expected)\n\n\nclass TestGetitemSlice:\n def test_getitem_slice_float64(self, frame_or_series):\n values = np.arange(10.0, 50.0, 2)\n index = Index(values)\n\n start, end = values[[5, 15]]\n\n data = np.random.default_rng(2).standard_normal((20, 3))\n if frame_or_series is not DataFrame:\n data = data[:, 0]\n\n obj = frame_or_series(data, index=index)\n\n result = obj[start:end]\n expected = obj.iloc[5:16]\n tm.assert_equal(result, expected)\n\n result = obj.loc[start:end]\n tm.assert_equal(result, expected)\n\n def test_getitem_datetime_slice(self):\n # GH#43223\n df = DataFrame(\n {"a": 0},\n index=DatetimeIndex(\n [\n "11.01.2011 22:00",\n "11.01.2011 23:00",\n "12.01.2011 00:00",\n "2011-01-13 00:00",\n ]\n ),\n )\n with pytest.raises(\n KeyError, match="Value based partial slicing on non-monotonic"\n ):\n df["2011-01-01":"2011-11-01"]\n\n def test_getitem_slice_same_dim_only_one_axis(self):\n # GH#54622\n df = DataFrame(np.random.default_rng(2).standard_normal((10, 8)))\n result = df.iloc[(slice(None, None, 2),)]\n assert result.shape == (5, 8)\n expected = df.iloc[slice(None, None, 2), slice(None)]\n tm.assert_frame_equal(result, expected)\n\n\nclass TestGetitemDeprecatedIndexers:\n @pytest.mark.parametrize("key", [{"a", "b"}, {"a": "a"}])\n def test_getitem_dict_and_set_deprecated(self, key):\n # GH#42825 enforced in 2.0\n df = DataFrame(\n [[1, 2], [3, 4]], columns=MultiIndex.from_tuples([("a", 1), ("b", 2)])\n )\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n df[key]\n
.venv\Lib\site-packages\pandas\tests\frame\indexing\test_getitem.py
test_getitem.py
Python
15,002
0.95
0.088983
0.09949
awesome-app
80
2025-04-14T21:21:07.612373
BSD-3-Clause
true
b96a4403695ed70e0a1dd491663dd34a
import pytest\n\nfrom pandas import (\n DataFrame,\n MultiIndex,\n)\n\n\nclass TestGetValue:\n def test_get_set_value_no_partial_indexing(self):\n # partial w/ MultiIndex raise exception\n index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])\n df = DataFrame(index=index, columns=range(4))\n with pytest.raises(KeyError, match=r"^0$"):\n df._get_value(0, 1)\n\n def test_get_value(self, float_frame):\n for idx in float_frame.index:\n for col in float_frame.columns:\n result = float_frame._get_value(idx, col)\n expected = float_frame[col][idx]\n assert result == expected\n
.venv\Lib\site-packages\pandas\tests\frame\indexing\test_get_value.py
test_get_value.py
Python
679
0.95
0.227273
0.055556
node-utils
155
2023-10-19T14:18:49.468941
GPL-3.0
true
805dd8ace77192aaa40ae056dbce91d1
from collections import namedtuple\nfrom datetime import (\n datetime,\n timedelta,\n)\nfrom decimal import Decimal\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import iNaT\nfrom pandas.errors import (\n InvalidIndexError,\n PerformanceWarning,\n SettingWithCopyError,\n)\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.common import is_integer\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n DatetimeIndex,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n isna,\n notna,\n to_datetime,\n)\nimport pandas._testing as tm\n\n# We pass through a TypeError raised by numpy\n_slice_msg = "slice indices must be integers or None or have an __index__ method"\n\n\nclass TestDataFrameIndexing:\n def test_getitem(self, float_frame):\n # Slicing\n sl = float_frame[:20]\n assert len(sl.index) == 20\n\n # Column access\n for _, series in sl.items():\n assert len(series.index) == 20\n tm.assert_index_equal(series.index, sl.index)\n\n for key, _ in float_frame._series.items():\n assert float_frame[key] is not None\n\n assert "random" not in float_frame\n with pytest.raises(KeyError, match="random"):\n float_frame["random"]\n\n def test_getitem_numeric_should_not_fallback_to_positional(self, any_numeric_dtype):\n # GH51053\n dtype = any_numeric_dtype\n idx = Index([1, 0, 1], dtype=dtype)\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=idx)\n result = df[1]\n expected = DataFrame([[1, 3], [4, 6]], columns=Index([1, 1], dtype=dtype))\n tm.assert_frame_equal(result, expected, check_exact=True)\n\n def test_getitem2(self, float_frame):\n df = float_frame.copy()\n df["$10"] = np.random.default_rng(2).standard_normal(len(df))\n\n ad = np.random.default_rng(2).standard_normal(len(df))\n df["@awesome_domain"] = ad\n\n with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):\n df.__getitem__('df["$10"]')\n\n res = df["@awesome_domain"]\n tm.assert_numpy_array_equal(ad, res.values)\n\n def test_setitem_numeric_should_not_fallback_to_positional(self, any_numeric_dtype):\n # GH51053\n dtype = any_numeric_dtype\n idx = Index([1, 0, 1], dtype=dtype)\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=idx)\n df[1] = 10\n expected = DataFrame([[10, 2, 10], [10, 5, 10]], columns=idx)\n tm.assert_frame_equal(df, expected, check_exact=True)\n\n def test_setitem_list(self, float_frame):\n float_frame["E"] = "foo"\n data = float_frame[["A", "B"]]\n float_frame[["B", "A"]] = data\n\n tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)\n tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)\n\n msg = "Columns must be same length as key"\n with pytest.raises(ValueError, match=msg):\n data[["A"]] = float_frame[["A", "B"]]\n newcolumndata = range(len(data.index) - 1)\n msg = (\n rf"Length of values \({len(newcolumndata)}\) "\n rf"does not match length of index \({len(data)}\)"\n )\n with pytest.raises(ValueError, match=msg):\n data["A"] = newcolumndata\n\n def test_setitem_list2(self):\n df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=int)\n df.loc[1, ["tt1", "tt2"]] = [1, 2]\n\n result = df.loc[df.index[1], ["tt1", "tt2"]]\n expected = Series([1, 2], df.columns, dtype=int, name=1)\n tm.assert_series_equal(result, expected)\n\n df["tt1"] = df["tt2"] = "0"\n df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]\n result = df.loc[df.index[1], ["tt1", "tt2"]]\n expected = Series(["1", "2"], df.columns, name=1)\n tm.assert_series_equal(result, expected)\n\n def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):\n # boolean indexing\n d = datetime_frame.index[10]\n indexer = datetime_frame.index > d\n indexer_obj = indexer.astype(object)\n\n subindex = datetime_frame.index[indexer]\n subframe = datetime_frame[indexer]\n\n tm.assert_index_equal(subindex, subframe.index)\n with pytest.raises(ValueError, match="Item wrong length"):\n datetime_frame[indexer[:-1]]\n\n subframe_obj = datetime_frame[indexer_obj]\n tm.assert_frame_equal(subframe_obj, subframe)\n\n with pytest.raises(ValueError, match="Boolean array expected"):\n datetime_frame[datetime_frame]\n\n # test that Series work\n indexer_obj = Series(indexer_obj, datetime_frame.index)\n\n subframe_obj = datetime_frame[indexer_obj]\n tm.assert_frame_equal(subframe_obj, subframe)\n\n # test that Series indexers reindex\n # we are producing a warning that since the passed boolean\n # key is not the same as the given index, we will reindex\n # not sure this is really necessary\n with tm.assert_produces_warning(UserWarning):\n indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])\n subframe_obj = datetime_frame[indexer_obj]\n tm.assert_frame_equal(subframe_obj, subframe)\n\n # test df[df > 0]\n for df in [\n datetime_frame,\n mixed_float_frame,\n mixed_int_frame,\n ]:\n data = df._get_numeric_data()\n bif = df[df > 0]\n bifw = DataFrame(\n {c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},\n index=data.index,\n columns=data.columns,\n )\n\n # add back other columns to compare\n for c in df.columns:\n if c not in bifw:\n bifw[c] = df[c]\n bifw = bifw.reindex(columns=df.columns)\n\n tm.assert_frame_equal(bif, bifw, check_dtype=False)\n for c in df.columns:\n if bif[c].dtype != bifw[c].dtype:\n assert bif[c].dtype == df[c].dtype\n\n def test_getitem_boolean_casting(self, datetime_frame):\n # don't upcast if we don't need to\n df = datetime_frame.copy()\n df["E"] = 1\n df["E"] = df["E"].astype("int32")\n df["E1"] = df["E"].copy()\n df["F"] = 1\n df["F"] = df["F"].astype("int64")\n df["F1"] = df["F"].copy()\n\n casted = df[df > 0]\n result = casted.dtypes\n expected = Series(\n [np.dtype("float64")] * 4\n + [np.dtype("int32")] * 2\n + [np.dtype("int64")] * 2,\n index=["A", "B", "C", "D", "E", "E1", "F", "F1"],\n )\n tm.assert_series_equal(result, expected)\n\n # int block splitting\n df.loc[df.index[1:3], ["E1", "F1"]] = 0\n casted = df[df > 0]\n result = casted.dtypes\n expected = Series(\n [np.dtype("float64")] * 4\n + [np.dtype("int32")]\n + [np.dtype("float64")]\n + [np.dtype("int64")]\n + [np.dtype("float64")],\n index=["A", "B", "C", "D", "E", "E1", "F", "F1"],\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "lst", [[True, False, True], [True, True, True], [False, False, False]]\n )\n def test_getitem_boolean_list(self, lst):\n df = DataFrame(np.arange(12).reshape(3, 4))\n result = df[lst]\n expected = df.loc[df.index[lst]]\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_boolean_iadd(self):\n arr = np.random.default_rng(2).standard_normal((5, 5))\n\n df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])\n\n df[df < 0] += 1\n arr[arr < 0] += 1\n\n tm.assert_almost_equal(df.values, arr)\n\n def test_boolean_index_empty_corner(self):\n # #2096\n blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))\n\n # both of these should succeed trivially\n k = np.array([], bool)\n\n blah[k]\n blah[k] = 0\n\n def test_getitem_ix_mixed_integer(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 3)),\n index=[1, 10, "C", "E"],\n columns=[1, 2, 3],\n )\n\n result = df.iloc[:-1]\n expected = df.loc[df.index[:-1]]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[[1, 10]]\n expected = df.loc[Index([1, 10])]\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_ix_mixed_integer2(self):\n # 11320\n df = DataFrame(\n {\n "rna": (1.5, 2.2, 3.2, 4.5),\n -1000: [11, 21, 36, 40],\n 0: [10, 22, 43, 34],\n 1000: [0, 10, 20, 30],\n },\n columns=["rna", -1000, 0, 1000],\n )\n result = df[[1000]]\n expected = df.iloc[:, [3]]\n tm.assert_frame_equal(result, expected)\n result = df[[-1000]]\n expected = df.iloc[:, [1]]\n tm.assert_frame_equal(result, expected)\n\n def test_getattr(self, float_frame):\n tm.assert_series_equal(float_frame.A, float_frame["A"])\n msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"\n with pytest.raises(AttributeError, match=msg):\n float_frame.NONEXISTENT_NAME\n\n def test_setattr_column(self):\n df = DataFrame({"foobar": 1}, index=range(10))\n\n df.foobar = 5\n assert (df.foobar == 5).all()\n\n def test_setitem(\n self, float_frame, using_copy_on_write, warn_copy_on_write, using_infer_string\n ):\n # not sure what else to do here\n series = float_frame["A"][::2]\n float_frame["col5"] = series\n assert "col5" in float_frame\n\n assert len(series) == 15\n assert len(float_frame) == 30\n\n exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))\n exp = Series(exp, index=float_frame.index, name="col5")\n tm.assert_series_equal(float_frame["col5"], exp)\n\n series = float_frame["A"]\n float_frame["col6"] = series\n tm.assert_series_equal(series, float_frame["col6"], check_names=False)\n\n # set ndarray\n arr = np.random.default_rng(2).standard_normal(len(float_frame))\n float_frame["col9"] = arr\n assert (float_frame["col9"] == arr).all()\n\n float_frame["col7"] = 5\n assert (float_frame["col7"] == 5).all()\n\n float_frame["col0"] = 3.14\n assert (float_frame["col0"] == 3.14).all()\n\n float_frame["col8"] = "foo"\n assert (float_frame["col8"] == "foo").all()\n\n # this is partially a view (e.g. some blocks are view)\n # so raise/warn\n smaller = float_frame[:2]\n\n msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"\n if using_copy_on_write or warn_copy_on_write:\n # With CoW, adding a new column doesn't raise a warning\n smaller["col10"] = ["1", "2"]\n else:\n with pytest.raises(SettingWithCopyError, match=msg):\n smaller["col10"] = ["1", "2"]\n\n if using_infer_string:\n assert smaller["col10"].dtype == "str"\n else:\n assert smaller["col10"].dtype == np.object_\n assert (smaller["col10"] == ["1", "2"]).all()\n\n def test_setitem2(self):\n # dtype changing GH4204\n df = DataFrame([[0, 0]])\n df.iloc[0] = np.nan\n expected = DataFrame([[np.nan, np.nan]])\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame([[0, 0]])\n df.loc[0] = np.nan\n tm.assert_frame_equal(df, expected)\n\n def test_setitem_boolean(self, float_frame):\n df = float_frame.copy()\n values = float_frame.values.copy()\n\n df[df["A"] > 0] = 4\n values[values[:, 0] > 0] = 4\n tm.assert_almost_equal(df.values, values)\n\n # test that column reindexing works\n series = df["A"] == 4\n series = series.reindex(df.index[::-1])\n df[series] = 1\n values[values[:, 0] == 4] = 1\n tm.assert_almost_equal(df.values, values)\n\n df[df > 0] = 5\n values[values > 0] = 5\n tm.assert_almost_equal(df.values, values)\n\n df[df == 5] = 0\n values[values == 5] = 0\n tm.assert_almost_equal(df.values, values)\n\n # a df that needs alignment first\n df[df[:-1] < 0] = 2\n np.putmask(values[:-1], values[:-1] < 0, 2)\n tm.assert_almost_equal(df.values, values)\n\n # indexed with same shape but rows-reversed df\n df[df[::-1] == 2] = 3\n values[values == 2] = 3\n tm.assert_almost_equal(df.values, values)\n\n msg = "Must pass DataFrame or 2-d ndarray with boolean values only"\n with pytest.raises(TypeError, match=msg):\n df[df * 0] = 2\n\n # index with DataFrame\n df_orig = df.copy()\n mask = df > np.abs(df)\n df[df > np.abs(df)] = np.nan\n values = df_orig.values.copy()\n values[mask.values] = np.nan\n expected = DataFrame(values, index=df_orig.index, columns=df_orig.columns)\n tm.assert_frame_equal(df, expected)\n\n # set from DataFrame\n df[df > np.abs(df)] = df * 2\n np.putmask(values, mask.values, df.values * 2)\n expected = DataFrame(values, index=df_orig.index, columns=df_orig.columns)\n tm.assert_frame_equal(df, expected)\n\n def test_setitem_cast(self, float_frame):\n float_frame["D"] = float_frame["D"].astype("i8")\n assert float_frame["D"].dtype == np.int64\n\n # #669, should not cast?\n # this is now set to int64, which means a replacement of the column to\n # the value dtype (and nothing to do with the existing dtype)\n float_frame["B"] = 0\n assert float_frame["B"].dtype == np.int64\n\n # cast if pass array of course\n float_frame["B"] = np.arange(len(float_frame))\n assert issubclass(float_frame["B"].dtype.type, np.integer)\n\n float_frame["foo"] = "bar"\n float_frame["foo"] = 0\n assert float_frame["foo"].dtype == np.int64\n\n float_frame["foo"] = "bar"\n float_frame["foo"] = 2.5\n assert float_frame["foo"].dtype == np.float64\n\n float_frame["something"] = 0\n assert float_frame["something"].dtype == np.int64\n float_frame["something"] = 2\n assert float_frame["something"].dtype == np.int64\n float_frame["something"] = 2.5\n assert float_frame["something"].dtype == np.float64\n\n def test_setitem_corner(self, float_frame, using_infer_string):\n # corner case\n df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))\n del df["B"]\n df["B"] = [1.0, 2.0, 3.0]\n assert "B" in df\n assert len(df.columns) == 2\n\n df["A"] = "beginning"\n df["E"] = "foo"\n df["D"] = "bar"\n df[datetime.now()] = "date"\n df[datetime.now()] = 5.0\n\n # what to do when empty frame with index\n dm = DataFrame(index=float_frame.index)\n dm["A"] = "foo"\n dm["B"] = "bar"\n assert len(dm.columns) == 2\n assert dm.values.dtype == np.object_\n\n # upcast\n dm["C"] = 1\n assert dm["C"].dtype == np.int64\n\n dm["E"] = 1.0\n assert dm["E"].dtype == np.float64\n\n # set existing column\n dm["A"] = "bar"\n assert "bar" == dm["A"].iloc[0]\n\n dm = DataFrame(index=np.arange(3))\n dm["A"] = 1\n dm["foo"] = "bar"\n del dm["foo"]\n dm["foo"] = "bar"\n if using_infer_string:\n assert dm["foo"].dtype == "str"\n else:\n assert dm["foo"].dtype == np.object_\n\n dm["coercible"] = ["1", "2", "3"]\n if using_infer_string:\n assert dm["coercible"].dtype == "str"\n else:\n assert dm["coercible"].dtype == np.object_\n\n def test_setitem_corner2(self):\n data = {\n "title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,\n "cruft": np.random.default_rng(2).random(20),\n }\n\n df = DataFrame(data)\n ix = df[df["title"] == "bar"].index\n\n df.loc[ix, ["title"]] = "foobar"\n df.loc[ix, ["cruft"]] = 0\n\n assert df.loc[1, "title"] == "foobar"\n assert df.loc[1, "cruft"] == 0\n\n def test_setitem_ambig(self, using_infer_string):\n # Difficulties with mixed-type data\n # Created as float type\n dm = DataFrame(index=range(3), columns=range(3))\n\n coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))\n uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))\n\n dm[0] = np.ones(3)\n assert len(dm.columns) == 3\n\n dm[1] = coercable_series\n assert len(dm.columns) == 3\n\n dm[2] = uncoercable_series\n assert len(dm.columns) == 3\n if using_infer_string:\n assert dm[2].dtype == "str"\n else:\n assert dm[2].dtype == np.object_\n\n def test_setitem_None(self, float_frame):\n # GH #766\n float_frame[None] = float_frame["A"]\n tm.assert_series_equal(\n float_frame.iloc[:, -1], float_frame["A"], check_names=False\n )\n tm.assert_series_equal(\n float_frame.loc[:, None], float_frame["A"], check_names=False\n )\n tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)\n\n def test_loc_setitem_boolean_mask_allfalse(self):\n # GH 9596\n df = DataFrame(\n {"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}\n )\n\n result = df.copy()\n result.loc[result.b.isna(), "a"] = result.a.copy()\n tm.assert_frame_equal(result, df)\n\n def test_getitem_fancy_slice_integers_step(self):\n df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)))\n\n # this is OK\n df.iloc[:8:2]\n df.iloc[:8:2] = np.nan\n assert isna(df.iloc[:8:2]).values.all()\n\n def test_getitem_setitem_integer_slice_keyerrors(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 5)), index=range(0, 20, 2)\n )\n\n # this is OK\n cp = df.copy()\n cp.iloc[4:10] = 0\n assert (cp.iloc[4:10] == 0).values.all()\n\n # so is this\n cp = df.copy()\n cp.iloc[3:11] = 0\n assert (cp.iloc[3:11] == 0).values.all()\n\n result = df.iloc[2:6]\n result2 = df.loc[3:11]\n expected = df.reindex([4, 6, 8, 10])\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # non-monotonic, raise KeyError\n df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]\n with pytest.raises(KeyError, match=r"^3$"):\n df2.loc[3:11]\n with pytest.raises(KeyError, match=r"^3$"):\n df2.loc[3:11] = 0\n\n @td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view\n def test_fancy_getitem_slice_mixed(\n self, float_frame, float_string_frame, using_copy_on_write, warn_copy_on_write\n ):\n sliced = float_string_frame.iloc[:, -3:]\n assert sliced["D"].dtype == np.float64\n\n # get view with single block\n # setting it triggers setting with copy\n original = float_frame.copy()\n sliced = float_frame.iloc[:, -3:]\n\n assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)\n\n with tm.assert_cow_warning(warn_copy_on_write):\n sliced.loc[:, "C"] = 4.0\n if not using_copy_on_write:\n assert (float_frame["C"] == 4).all()\n\n # with the enforcement of GH#45333 in 2.0, this remains a view\n np.shares_memory(sliced["C"]._values, float_frame["C"]._values)\n else:\n tm.assert_frame_equal(float_frame, original)\n\n def test_getitem_setitem_non_ix_labels(self):\n df = DataFrame(range(20), index=date_range("2020-01-01", periods=20))\n\n start, end = df.index[[5, 10]]\n\n result = df.loc[start:end]\n result2 = df[start:end]\n expected = df[5:11]\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n result = df.copy()\n result.loc[start:end] = 0\n result2 = df.copy()\n result2[start:end] = 0\n expected = df.copy()\n expected[5:11] = 0\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n def test_ix_multi_take(self):\n df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)))\n rs = df.loc[df.index == 0, :]\n xp = df.reindex([0])\n tm.assert_frame_equal(rs, xp)\n\n # GH#1321\n df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)))\n rs = df.loc[df.index == 0, df.columns == 1]\n xp = df.reindex(index=[0], columns=[1])\n tm.assert_frame_equal(rs, xp)\n\n def test_getitem_fancy_scalar(self, float_frame):\n f = float_frame\n ix = f.loc\n\n # individual value\n for col in f.columns:\n ts = f[col]\n for idx in f.index[::5]:\n assert ix[idx, col] == ts[idx]\n\n @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values\n def test_setitem_fancy_scalar(self, float_frame):\n f = float_frame\n expected = float_frame.copy()\n ix = f.loc\n\n # individual value\n for j, col in enumerate(f.columns):\n f[col]\n for idx in f.index[::5]:\n i = f.index.get_loc(idx)\n val = np.random.default_rng(2).standard_normal()\n expected.iloc[i, j] = val\n\n ix[idx, col] = val\n tm.assert_frame_equal(f, expected)\n\n def test_getitem_fancy_boolean(self, float_frame):\n f = float_frame\n ix = f.loc\n\n expected = f.reindex(columns=["B", "D"])\n result = ix[:, [False, True, False, True]]\n tm.assert_frame_equal(result, expected)\n\n expected = f.reindex(index=f.index[5:10], columns=["B", "D"])\n result = ix[f.index[5:10], [False, True, False, True]]\n tm.assert_frame_equal(result, expected)\n\n boolvec = f.index > f.index[7]\n expected = f.reindex(index=f.index[boolvec])\n result = ix[boolvec]\n tm.assert_frame_equal(result, expected)\n result = ix[boolvec, :]\n tm.assert_frame_equal(result, expected)\n\n result = ix[boolvec, f.columns[2:]]\n expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])\n tm.assert_frame_equal(result, expected)\n\n @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values\n def test_setitem_fancy_boolean(self, float_frame):\n # from 2d, set with booleans\n frame = float_frame.copy()\n expected = float_frame.copy()\n values = expected.values.copy()\n\n mask = frame["A"] > 0\n frame.loc[mask] = 0.0\n values[mask.values] = 0.0\n expected = DataFrame(values, index=expected.index, columns=expected.columns)\n tm.assert_frame_equal(frame, expected)\n\n frame = float_frame.copy()\n expected = float_frame.copy()\n values = expected.values.copy()\n frame.loc[mask, ["A", "B"]] = 0.0\n values[mask.values, :2] = 0.0\n expected = DataFrame(values, index=expected.index, columns=expected.columns)\n tm.assert_frame_equal(frame, expected)\n\n def test_getitem_fancy_ints(self, float_frame):\n result = float_frame.iloc[[1, 4, 7]]\n expected = float_frame.loc[float_frame.index[[1, 4, 7]]]\n tm.assert_frame_equal(result, expected)\n\n result = float_frame.iloc[:, [2, 0, 1]]\n expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_setitem_boolean_misaligned(self, float_frame):\n # boolean index misaligned labels\n mask = float_frame["A"][::-1] > 1\n\n result = float_frame.loc[mask]\n expected = float_frame.loc[mask[::-1]]\n tm.assert_frame_equal(result, expected)\n\n cp = float_frame.copy()\n expected = float_frame.copy()\n cp.loc[mask] = 0\n expected.loc[mask] = 0\n tm.assert_frame_equal(cp, expected)\n\n def test_getitem_setitem_boolean_multi(self):\n df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)))\n\n # get\n k1 = np.array([True, False, True])\n k2 = np.array([False, True])\n result = df.loc[k1, k2]\n expected = df.loc[[0, 2], [1]]\n tm.assert_frame_equal(result, expected)\n\n expected = df.copy()\n df.loc[np.array([True, False, True]), np.array([False, True])] = 5\n expected.loc[[0, 2], [1]] = 5\n tm.assert_frame_equal(df, expected)\n\n def test_getitem_setitem_float_labels(self, using_array_manager):\n index = Index([1.5, 2, 3, 4, 5])\n df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)), index=index)\n\n result = df.loc[1.5:4]\n expected = df.reindex([1.5, 2, 3, 4])\n tm.assert_frame_equal(result, expected)\n assert len(result) == 4\n\n result = df.loc[4:5]\n expected = df.reindex([4, 5]) # reindex with int\n tm.assert_frame_equal(result, expected, check_index_type=False)\n assert len(result) == 2\n\n result = df.loc[4:5]\n expected = df.reindex([4.0, 5.0]) # reindex with float\n tm.assert_frame_equal(result, expected)\n assert len(result) == 2\n\n # loc_float changes this to work properly\n result = df.loc[1:2]\n expected = df.iloc[0:2]\n tm.assert_frame_equal(result, expected)\n\n expected = df.iloc[0:2]\n msg = r"The behavior of obj\[i:j\] with a float-dtype index"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df[1:2]\n tm.assert_frame_equal(result, expected)\n\n # #2727\n index = Index([1.0, 2.5, 3.5, 4.5, 5.0])\n df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)), index=index)\n\n # positional slicing only via iloc!\n msg = (\n "cannot do positional indexing on Index with "\n r"these indexers \[1.0\] of type float"\n )\n with pytest.raises(TypeError, match=msg):\n df.iloc[1.0:5]\n\n result = df.iloc[4:5]\n expected = df.reindex([5.0])\n tm.assert_frame_equal(result, expected)\n assert len(result) == 1\n\n cp = df.copy()\n\n with pytest.raises(TypeError, match=_slice_msg):\n cp.iloc[1.0:5] = 0\n\n with pytest.raises(TypeError, match=msg):\n result = cp.iloc[1.0:5] == 0\n\n assert result.values.all()\n assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()\n\n cp = df.copy()\n cp.iloc[4:5] = 0\n assert (cp.iloc[4:5] == 0).values.all()\n assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()\n\n # float slicing\n result = df.loc[1.0:5]\n expected = df\n tm.assert_frame_equal(result, expected)\n assert len(result) == 5\n\n result = df.loc[1.1:5]\n expected = df.reindex([2.5, 3.5, 4.5, 5.0])\n tm.assert_frame_equal(result, expected)\n assert len(result) == 4\n\n result = df.loc[4.51:5]\n expected = df.reindex([5.0])\n tm.assert_frame_equal(result, expected)\n assert len(result) == 1\n\n result = df.loc[1.0:5.0]\n expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])\n tm.assert_frame_equal(result, expected)\n assert len(result) == 5\n\n cp = df.copy()\n cp.loc[1.0:5.0] = 0\n result = cp.loc[1.0:5.0]\n assert (result == 0).values.all()\n\n def test_setitem_single_column_mixed_datetime(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((5, 3)),\n index=["a", "b", "c", "d", "e"],\n columns=["foo", "bar", "baz"],\n )\n\n df["timestamp"] = Timestamp("20010102")\n\n # check our dtypes\n result = df.dtypes\n expected = Series(\n [np.dtype("float64")] * 3 + [np.dtype("datetime64[s]")],\n index=["foo", "bar", "baz", "timestamp"],\n )\n tm.assert_series_equal(result, expected)\n\n # GH#16674 iNaT is treated as an integer when given by the user\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n df.loc["b", "timestamp"] = iNaT\n assert not isna(df.loc["b", "timestamp"])\n assert df["timestamp"].dtype == np.object_\n assert df.loc["b", "timestamp"] == iNaT\n\n # allow this syntax (as of GH#3216)\n df.loc["c", "timestamp"] = np.nan\n assert isna(df.loc["c", "timestamp"])\n\n # allow this syntax\n df.loc["d", :] = np.nan\n assert not isna(df.loc["c", :]).all()\n\n def test_setitem_mixed_datetime(self):\n # GH 9336\n expected = DataFrame(\n {\n "a": [0, 0, 0, 0, 13, 14],\n "b": [\n datetime(2012, 1, 1),\n 1,\n "x",\n "y",\n datetime(2013, 1, 1),\n datetime(2014, 1, 1),\n ],\n }\n )\n df = DataFrame(0, columns=list("ab"), index=range(6))\n df["b"] = pd.NaT\n df.loc[0, "b"] = datetime(2012, 1, 1)\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n df.loc[1, "b"] = 1\n df.loc[[2, 3], "b"] = "x", "y"\n A = np.array(\n [\n [13, np.datetime64("2013-01-01T00:00:00")],\n [14, np.datetime64("2014-01-01T00:00:00")],\n ]\n )\n df.loc[[4, 5], ["a", "b"]] = A\n tm.assert_frame_equal(df, expected)\n\n def test_setitem_frame_float(self, float_frame):\n piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]\n float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values\n result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values\n expected = piece.values\n tm.assert_almost_equal(result, expected)\n\n # dtype inference\n @pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string)")\n def test_setitem_frame_mixed(self, float_string_frame):\n # GH 3216\n\n # already aligned\n f = float_string_frame.copy()\n piece = DataFrame(\n [[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]\n )\n key = (f.index[slice(None, 2)], ["A", "B"])\n f.loc[key] = piece\n tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)\n\n # dtype inference\n @pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string)")\n def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):\n # GH#3216 rows unaligned\n f = float_string_frame.copy()\n piece = DataFrame(\n [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],\n index=list(f.index[0:2]) + ["foo", "bar"],\n columns=["A", "B"],\n )\n key = (f.index[slice(None, 2)], ["A", "B"])\n f.loc[key] = piece\n tm.assert_almost_equal(\n f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]\n )\n\n # dtype inference\n @pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string)")\n def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):\n # GH#3216 key is unaligned with values\n f = float_string_frame.copy()\n piece = f.loc[f.index[:2], ["A"]]\n piece.index = f.index[-2:]\n key = (f.index[slice(-2, None)], ["A", "B"])\n f.loc[key] = piece\n piece["B"] = np.nan\n tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)\n\n def test_setitem_frame_mixed_ndarray(self, float_string_frame):\n # GH#3216 ndarray\n f = float_string_frame.copy()\n piece = float_string_frame.loc[f.index[:2], ["A", "B"]]\n key = (f.index[slice(-2, None)], ["A", "B"])\n f.loc[key] = piece.values\n tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)\n\n def test_setitem_frame_upcast(self):\n # needs upcasting\n df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])\n df2 = df.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5\n expected = df.reindex(columns=["A", "B"])\n expected += 0.5\n expected["C"] = df["C"]\n tm.assert_frame_equal(df2, expected)\n\n def test_setitem_frame_align(self, float_frame):\n piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]\n piece.index = float_frame.index[-2:]\n piece.columns = ["A", "B"]\n float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece\n result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values\n expected = piece.values\n tm.assert_almost_equal(result, expected)\n\n def test_getitem_setitem_ix_duplicates(self):\n # #1201\n df = DataFrame(\n np.random.default_rng(2).standard_normal((5, 3)),\n index=["foo", "foo", "bar", "baz", "bar"],\n )\n\n result = df.loc["foo"]\n expected = df[:2]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc["bar"]\n expected = df.iloc[[2, 4]]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc["baz"]\n expected = df.iloc[3]\n tm.assert_series_equal(result, expected)\n\n def test_getitem_ix_boolean_duplicates_multiple(self):\n # #1201\n df = DataFrame(\n np.random.default_rng(2).standard_normal((5, 3)),\n index=["foo", "foo", "bar", "baz", "bar"],\n )\n\n result = df.loc[["bar"]]\n exp = df.iloc[[2, 4]]\n tm.assert_frame_equal(result, exp)\n\n result = df.loc[df[1] > 0]\n exp = df[df[1] > 0]\n tm.assert_frame_equal(result, exp)\n\n result = df.loc[df[0] > 0]\n exp = df[df[0] > 0]\n tm.assert_frame_equal(result, exp)\n\n @pytest.mark.parametrize("bool_value", [True, False])\n def test_getitem_setitem_ix_bool_keyerror(self, bool_value):\n # #2199\n df = DataFrame({"a": [1, 2, 3]})\n message = f"{bool_value}: boolean label can not be used without a boolean index"\n with pytest.raises(KeyError, match=message):\n df.loc[bool_value]\n\n msg = "cannot use a single bool to index into setitem"\n with pytest.raises(KeyError, match=msg):\n df.loc[bool_value] = 0\n\n # TODO: rename? remove?\n def test_single_element_ix_dont_upcast(self, float_frame):\n float_frame["E"] = 1\n assert issubclass(float_frame["E"].dtype.type, (int, np.integer))\n\n result = float_frame.loc[float_frame.index[5], "E"]\n assert is_integer(result)\n\n # GH 11617\n df = DataFrame({"a": [1.23]})\n df["b"] = 666\n\n result = df.loc[0, "b"]\n assert is_integer(result)\n\n expected = Series([666], [0], name="b")\n result = df.loc[[0], "b"]\n tm.assert_series_equal(result, expected)\n\n def test_iloc_callable_tuple_return_value(self):\n # GH53769\n df = DataFrame(np.arange(40).reshape(10, 4), index=range(0, 20, 2))\n msg = "callable with iloc"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.iloc[lambda _: (0,)]\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.iloc[lambda _: (0,)] = 1\n\n def test_iloc_row(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)), index=range(0, 20, 2)\n )\n\n result = df.iloc[1]\n exp = df.loc[2]\n tm.assert_series_equal(result, exp)\n\n result = df.iloc[2]\n exp = df.loc[4]\n tm.assert_series_equal(result, exp)\n\n # slice\n result = df.iloc[slice(4, 8)]\n expected = df.loc[8:14]\n tm.assert_frame_equal(result, expected)\n\n # list of integers\n result = df.iloc[[1, 2, 4, 6]]\n expected = df.reindex(df.index[[1, 2, 4, 6]])\n tm.assert_frame_equal(result, expected)\n\n def test_iloc_row_slice_view(self, using_copy_on_write, warn_copy_on_write):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)), index=range(0, 20, 2)\n )\n original = df.copy()\n\n # verify slice is view\n # setting it makes it raise/warn\n subset = df.iloc[slice(4, 8)]\n\n assert np.shares_memory(df[2], subset[2])\n\n exp_col = original[2].copy()\n with tm.assert_cow_warning(warn_copy_on_write):\n subset.loc[:, 2] = 0.0\n if not using_copy_on_write:\n exp_col._values[4:8] = 0.0\n\n # With the enforcement of GH#45333 in 2.0, this remains a view\n assert np.shares_memory(df[2], subset[2])\n tm.assert_series_equal(df[2], exp_col)\n\n def test_iloc_col(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 10)), columns=range(0, 20, 2)\n )\n\n result = df.iloc[:, 1]\n exp = df.loc[:, 2]\n tm.assert_series_equal(result, exp)\n\n result = df.iloc[:, 2]\n exp = df.loc[:, 4]\n tm.assert_series_equal(result, exp)\n\n # slice\n result = df.iloc[:, slice(4, 8)]\n expected = df.loc[:, 8:14]\n tm.assert_frame_equal(result, expected)\n\n # list of integers\n result = df.iloc[:, [1, 2, 4, 6]]\n expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])\n tm.assert_frame_equal(result, expected)\n\n def test_iloc_col_slice_view(\n self, using_array_manager, using_copy_on_write, warn_copy_on_write\n ):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 10)), columns=range(0, 20, 2)\n )\n original = df.copy()\n subset = df.iloc[:, slice(4, 8)]\n\n if not using_array_manager and not using_copy_on_write:\n # verify slice is view\n assert np.shares_memory(df[8]._values, subset[8]._values)\n\n with tm.assert_cow_warning(warn_copy_on_write):\n subset.loc[:, 8] = 0.0\n\n assert (df[8] == 0).all()\n\n # with the enforcement of GH#45333 in 2.0, this remains a view\n assert np.shares_memory(df[8]._values, subset[8]._values)\n else:\n if using_copy_on_write:\n # verify slice is view\n assert np.shares_memory(df[8]._values, subset[8]._values)\n subset[8] = 0.0\n # subset changed\n assert (subset[8] == 0).all()\n # but df itself did not change (setitem replaces full column)\n tm.assert_frame_equal(df, original)\n\n def test_loc_duplicates(self):\n # gh-17105\n\n # insert a duplicate element to the index\n trange = date_range(\n start=Timestamp(year=2017, month=1, day=1),\n end=Timestamp(year=2017, month=1, day=5),\n )\n\n trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))\n\n df = DataFrame(0, index=trange, columns=["A", "B"])\n bool_idx = np.array([False, False, False, False, False, True])\n\n # assignment\n df.loc[trange[bool_idx], "A"] = 6\n\n expected = DataFrame(\n {"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange\n )\n tm.assert_frame_equal(df, expected)\n\n # in-place\n df = DataFrame(0, index=trange, columns=["A", "B"])\n df.loc[trange[bool_idx], "A"] += 6\n tm.assert_frame_equal(df, expected)\n\n def test_setitem_with_unaligned_tz_aware_datetime_column(self):\n # GH 12981\n # Assignment of unaligned offset-aware datetime series.\n # Make sure timezone isn't lost\n column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")\n df = DataFrame({"dates": column})\n df["dates"] = column[[1, 0, 2]]\n tm.assert_series_equal(df["dates"], column)\n\n df = DataFrame({"dates": column})\n df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]]\n tm.assert_series_equal(df["dates"], column)\n\n def test_loc_setitem_datetimelike_with_inference(self):\n # GH 7592\n # assignment of timedeltas with NaT\n\n one_hour = timedelta(hours=1)\n df = DataFrame(index=date_range("20130101", periods=4))\n df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]")\n df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]")\n df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]")\n df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]")\n df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]")\n df["F"] = np.timedelta64("NaT")\n df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]")\n df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3)\n df["H"] = np.datetime64("NaT")\n result = df.dtypes\n expected = Series(\n [np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2,\n index=Index(list("ABCDEFGH"), dtype=object),\n )\n tm.assert_series_equal(result, expected)\n\n def test_getitem_boolean_indexing_mixed(self):\n df = DataFrame(\n {\n 0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n 1: {\n 35: np.nan,\n 40: 0.32632316859446198,\n 43: np.nan,\n 49: 0.32632316859446198,\n 50: 0.39114724480578139,\n },\n 2: {\n 35: np.nan,\n 40: np.nan,\n 43: 0.29012581014105987,\n 49: np.nan,\n 50: np.nan,\n },\n 3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n 4: {\n 35: 0.34215328467153283,\n 40: np.nan,\n 43: np.nan,\n 49: np.nan,\n 50: np.nan,\n },\n "y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1},\n }\n )\n\n # mixed int/float ok\n df2 = df.copy()\n df2[df2 > 0.3] = 1\n expected = df.copy()\n expected.loc[40, 1] = 1\n expected.loc[49, 1] = 1\n expected.loc[50, 1] = 1\n expected.loc[35, 4] = 1\n tm.assert_frame_equal(df2, expected)\n\n df["foo"] = "test"\n msg = "not supported between instances|unorderable types|Invalid comparison"\n\n with pytest.raises(TypeError, match=msg):\n df[df > 0.3] = 1\n\n def test_type_error_multiindex(self):\n # See gh-12218\n mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])\n dg = DataFrame(\n [[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i")\n )\n with pytest.raises(InvalidIndexError, match="slice"):\n dg[:, 0]\n\n index = Index(range(2), name="i")\n columns = MultiIndex(\n levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"]\n )\n expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)\n\n result = dg.loc[:, (slice(None), 0)]\n tm.assert_frame_equal(result, expected)\n\n name = ("x", 0)\n index = Index(range(2), name="i")\n expected = Series([1, 3], index=index, name=name)\n\n result = dg["x", 0]\n tm.assert_series_equal(result, expected)\n\n def test_getitem_interval_index_partial_indexing(self):\n # GH#36490\n df = DataFrame(\n np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5))\n )\n\n expected = df.iloc[:, 0]\n\n res = df[0.5]\n tm.assert_series_equal(res, expected)\n\n res = df.loc[:, 0.5]\n tm.assert_series_equal(res, expected)\n\n def test_setitem_array_as_cell_value(self):\n # GH#43422\n df = DataFrame(columns=["a", "b"], dtype=object)\n df.loc[0] = {"a": np.zeros((2,)), "b": np.zeros((2, 2))}\n expected = DataFrame({"a": [np.zeros((2,))], "b": [np.zeros((2, 2))]})\n tm.assert_frame_equal(df, expected)\n\n def test_iloc_setitem_nullable_2d_values(self):\n df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")\n orig = df.copy()\n\n df.loc[:] = df.values[:, ::-1]\n tm.assert_frame_equal(df, orig)\n\n df.loc[:] = pd.core.arrays.NumpyExtensionArray(df.values[:, ::-1])\n tm.assert_frame_equal(df, orig)\n\n df.iloc[:] = df.iloc[:, :].copy()\n tm.assert_frame_equal(df, orig)\n\n def test_getitem_segfault_with_empty_like_object(self):\n # GH#46848\n df = DataFrame(np.empty((1, 1), dtype=object))\n df[0] = np.empty_like(df[0])\n # this produces the segfault\n df[[0]]\n\n @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")\n @pytest.mark.parametrize(\n "null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")]\n )\n def test_setting_mismatched_na_into_nullable_fails(\n self, null, any_numeric_ea_dtype\n ):\n # GH#44514 don't cast mismatched nulls to pd.NA\n df = DataFrame({"A": [1, 2, 3]}, dtype=any_numeric_ea_dtype)\n ser = df["A"].copy()\n arr = ser._values\n\n msg = "|".join(\n [\n r"timedelta64\[ns\] cannot be converted to (Floating|Integer)Dtype",\n r"datetime64\[ns\] cannot be converted to (Floating|Integer)Dtype",\n "'values' contains non-numeric NA",\n r"Invalid value '.*' for dtype '(U?Int|Float)\d{1,2}'",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n arr[0] = null\n\n with pytest.raises(TypeError, match=msg):\n arr[:2] = [null, null]\n\n with pytest.raises(TypeError, match=msg):\n ser[0] = null\n\n with pytest.raises(TypeError, match=msg):\n ser[:2] = [null, null]\n\n with pytest.raises(TypeError, match=msg):\n ser.iloc[0] = null\n\n with pytest.raises(TypeError, match=msg):\n ser.iloc[:2] = [null, null]\n\n with pytest.raises(TypeError, match=msg):\n df.iloc[0, 0] = null\n\n with pytest.raises(TypeError, match=msg):\n df.iloc[:2, 0] = [null, null]\n\n # Multi-Block\n df2 = df.copy()\n df2["B"] = ser.copy()\n with pytest.raises(TypeError, match=msg):\n df2.iloc[0, 0] = null\n\n with pytest.raises(TypeError, match=msg):\n df2.iloc[:2, 0] = [null, null]\n\n def test_loc_expand_empty_frame_keep_index_name(self):\n # GH#45621\n df = DataFrame(columns=["b"], index=Index([], name="a"))\n df.loc[0] = 1\n expected = DataFrame({"b": [1]}, index=Index([0], name="a"))\n tm.assert_frame_equal(df, expected)\n\n def test_loc_expand_empty_frame_keep_midx_names(self):\n # GH#46317\n df = DataFrame(\n columns=["d"], index=MultiIndex.from_tuples([], names=["a", "b", "c"])\n )\n df.loc[(1, 2, 3)] = "foo"\n expected = DataFrame(\n {"d": ["foo"]},\n index=MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"]),\n )\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize(\n "val, idxr",\n [\n ("x", "a"),\n ("x", ["a"]),\n (1, "a"),\n (1, ["a"]),\n ],\n )\n def test_loc_setitem_rhs_frame(self, idxr, val):\n # GH#47578\n df = DataFrame({"a": [1, 2]})\n\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n df.loc[:, idxr] = DataFrame({"a": [val, 11]}, index=[1, 2])\n expected = DataFrame({"a": [np.nan, val]})\n tm.assert_frame_equal(df, expected)\n\n @td.skip_array_manager_invalid_test\n def test_iloc_setitem_enlarge_no_warning(self, warn_copy_on_write):\n # GH#47381\n df = DataFrame(columns=["a", "b"])\n expected = df.copy()\n view = df[:]\n df.iloc[:, 0] = np.array([1, 2], dtype=np.float64)\n tm.assert_frame_equal(view, expected)\n\n def test_loc_internals_not_updated_correctly(self):\n # GH#47867 all steps are necessary to reproduce the initial bug\n df = DataFrame(\n {"bool_col": True, "a": 1, "b": 2.5},\n index=MultiIndex.from_arrays([[1, 2], [1, 2]], names=["idx1", "idx2"]),\n )\n idx = [(1, 1)]\n\n df["c"] = 3\n df.loc[idx, "c"] = 0\n\n df.loc[idx, "c"]\n df.loc[idx, ["a", "b"]]\n\n df.loc[idx, "c"] = 15\n result = df.loc[idx, "c"]\n expected = df = Series(\n 15,\n index=MultiIndex.from_arrays([[1], [1]], names=["idx1", "idx2"]),\n name="c",\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("val", [None, [None], pd.NA, [pd.NA]])\n def test_iloc_setitem_string_list_na(self, val):\n # GH#45469\n df = DataFrame({"a": ["a", "b", "c"]}, dtype="string")\n df.iloc[[0], :] = val\n expected = DataFrame({"a": [pd.NA, "b", "c"]}, dtype="string")\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize("val", [None, pd.NA])\n def test_iloc_setitem_string_na(self, val):\n # GH#45469\n df = DataFrame({"a": ["a", "b", "c"]}, dtype="string")\n df.iloc[0, :] = val\n expected = DataFrame({"a": [pd.NA, "b", "c"]}, dtype="string")\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize("func", [list, Series, np.array])\n def test_iloc_setitem_ea_null_slice_length_one_list(self, func):\n # GH#48016\n df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")\n df.iloc[:, func([0])] = 5\n expected = DataFrame({"a": [5, 5, 5]}, dtype="Int64")\n tm.assert_frame_equal(df, expected)\n\n def test_loc_named_tuple_for_midx(self):\n # GH#48124\n df = DataFrame(\n index=MultiIndex.from_product(\n [["A", "B"], ["a", "b", "c"]], names=["first", "second"]\n )\n )\n indexer_tuple = namedtuple("Indexer", df.index.names)\n idxr = indexer_tuple(first="A", second=["a", "b"])\n result = df.loc[idxr, :]\n expected = DataFrame(\n index=MultiIndex.from_tuples(\n [("A", "a"), ("A", "b")], names=["first", "second"]\n )\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("indexer", [["a"], "a"])\n @pytest.mark.parametrize("col", [{}, {"b": 1}])\n def test_set_2d_casting_date_to_int(self, col, indexer):\n # GH#49159\n df = DataFrame(\n {"a": [Timestamp("2022-12-29"), Timestamp("2022-12-30")], **col},\n )\n df.loc[[1], indexer] = df["a"] + pd.Timedelta(days=1)\n expected = DataFrame(\n {"a": [Timestamp("2022-12-29"), Timestamp("2022-12-31")], **col},\n )\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize("col", [{}, {"name": "a"}])\n def test_loc_setitem_reordering_with_all_true_indexer(self, col):\n # GH#48701\n n = 17\n df = DataFrame({**col, "x": range(n), "y": range(n)})\n expected = df.copy()\n df.loc[n * [True], ["x", "y"]] = df[["x", "y"]]\n tm.assert_frame_equal(df, expected)\n\n def test_loc_rhs_empty_warning(self):\n # GH48480\n df = DataFrame(columns=["a", "b"])\n expected = df.copy()\n rhs = DataFrame(columns=["a"])\n with tm.assert_produces_warning(None):\n df.loc[:, "a"] = rhs\n tm.assert_frame_equal(df, expected)\n\n def test_iloc_ea_series_indexer(self):\n # GH#49521\n df = DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])\n indexer = Series([0, 1], dtype="Int64")\n row_indexer = Series([1], dtype="Int64")\n result = df.iloc[row_indexer, indexer]\n expected = DataFrame([[5, 6]], index=[1])\n tm.assert_frame_equal(result, expected)\n\n result = df.iloc[row_indexer.values, indexer.values]\n tm.assert_frame_equal(result, expected)\n\n def test_iloc_ea_series_indexer_with_na(self):\n # GH#49521\n df = DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])\n indexer = Series([0, pd.NA], dtype="Int64")\n msg = "cannot convert"\n with pytest.raises(ValueError, match=msg):\n df.iloc[:, indexer]\n with pytest.raises(ValueError, match=msg):\n df.iloc[:, indexer.values]\n\n @pytest.mark.parametrize("indexer", [True, (True,)])\n @pytest.mark.parametrize("dtype", [bool, "boolean"])\n def test_loc_bool_multiindex(self, dtype, indexer):\n # GH#47687\n midx = MultiIndex.from_arrays(\n [\n Series([True, True, False, False], dtype=dtype),\n Series([True, False, True, False], dtype=dtype),\n ],\n names=["a", "b"],\n )\n df = DataFrame({"c": [1, 2, 3, 4]}, index=midx)\n with tm.maybe_produces_warning(PerformanceWarning, isinstance(indexer, tuple)):\n result = df.loc[indexer]\n expected = DataFrame(\n {"c": [1, 2]}, index=Index([True, False], name="b", dtype=dtype)\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("utc", [False, True])\n @pytest.mark.parametrize("indexer", ["date", ["date"]])\n def test_loc_datetime_assignment_dtype_does_not_change(self, utc, indexer):\n # GH#49837\n df = DataFrame(\n {\n "date": to_datetime(\n [datetime(2022, 1, 20), datetime(2022, 1, 22)], utc=utc\n ),\n "update": [True, False],\n }\n )\n expected = df.copy(deep=True)\n\n update_df = df[df["update"]]\n\n df.loc[df["update"], indexer] = update_df["date"]\n\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize("indexer, idx", [(tm.loc, 1), (tm.iloc, 2)])\n def test_setitem_value_coercing_dtypes(self, indexer, idx):\n # GH#50467\n df = DataFrame([["1", np.nan], ["2", np.nan], ["3", np.nan]], dtype=object)\n rhs = DataFrame([[1, np.nan], [2, np.nan]])\n indexer(df)[:idx, :] = rhs\n expected = DataFrame([[1, np.nan], [2, np.nan], ["3", np.nan]], dtype=object)\n tm.assert_frame_equal(df, expected)\n\n\nclass TestDataFrameIndexingUInt64:\n def test_setitem(self):\n df = DataFrame(\n {"A": np.arange(3), "B": [2**63, 2**63 + 5, 2**63 + 10]},\n dtype=np.uint64,\n )\n idx = df["A"].rename("foo")\n\n # setitem\n assert "C" not in df.columns\n df["C"] = idx\n tm.assert_series_equal(df["C"], Series(idx, name="C"))\n\n assert "D" not in df.columns\n df["D"] = "foo"\n df["D"] = idx\n tm.assert_series_equal(df["D"], Series(idx, name="D"))\n del df["D"]\n\n # With NaN: because uint64 has no NaN element,\n # the column should be cast to object.\n df2 = df.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n df2.iloc[1, 1] = pd.NaT\n df2.iloc[1, 2] = pd.NaT\n result = df2["B"]\n tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))\n tm.assert_series_equal(\n df2.dtypes,\n Series(\n [np.dtype("uint64"), np.dtype("O"), np.dtype("O")],\n index=["A", "B", "C"],\n ),\n )\n\n\ndef test_object_casting_indexing_wraps_datetimelike(using_array_manager):\n # GH#31649, check the indexing methods all the way down the stack\n df = DataFrame(\n {\n "A": [1, 2],\n "B": date_range("2000", periods=2),\n "C": pd.timedelta_range("1 Day", periods=2),\n }\n )\n\n ser = df.loc[0]\n assert isinstance(ser.values[1], Timestamp)\n assert isinstance(ser.values[2], pd.Timedelta)\n\n ser = df.iloc[0]\n assert isinstance(ser.values[1], Timestamp)\n assert isinstance(ser.values[2], pd.Timedelta)\n\n ser = df.xs(0, axis=0)\n assert isinstance(ser.values[1], Timestamp)\n assert isinstance(ser.values[2], pd.Timedelta)\n\n if using_array_manager:\n # remainder of the test checking BlockManager internals\n return\n\n mgr = df._mgr\n mgr._rebuild_blknos_and_blklocs()\n arr = mgr.fast_xs(0).array\n assert isinstance(arr[1], Timestamp)\n assert isinstance(arr[2], pd.Timedelta)\n\n blk = mgr.blocks[mgr.blknos[1]]\n assert blk.dtype == "M8[ns]" # we got the right block\n val = blk.iget((0, 0))\n assert isinstance(val, Timestamp)\n\n blk = mgr.blocks[mgr.blknos[2]]\n assert blk.dtype == "m8[ns]" # we got the right block\n val = blk.iget((0, 0))\n assert isinstance(val, pd.Timedelta)\n\n\nmsg1 = r"Cannot setitem on a Categorical with a new category( \(.*\))?, set the"\nmsg2 = "Cannot set a Categorical with another, without identical categories"\n\n\nclass TestLocILocDataFrameCategorical:\n @pytest.fixture\n def orig(self):\n cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])\n idx = Index(["h", "i", "j", "k", "l", "m", "n"])\n values = [1, 1, 1, 1, 1, 1, 1]\n orig = DataFrame({"cats": cats, "values": values}, index=idx)\n return orig\n\n @pytest.fixture\n def exp_single_row(self):\n # The expected values if we change a single row\n cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])\n idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])\n values1 = [1, 1, 2, 1, 1, 1, 1]\n exp_single_row = DataFrame({"cats": cats1, "values": values1}, index=idx1)\n return exp_single_row\n\n @pytest.fixture\n def exp_multi_row(self):\n # assign multiple rows (mixed values) (-> array) -> exp_multi_row\n # changed multiple rows\n cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])\n idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])\n values2 = [1, 1, 2, 2, 1, 1, 1]\n exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2)\n return exp_multi_row\n\n @pytest.fixture\n def exp_parts_cats_col(self):\n # changed part of the cats column\n cats3 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])\n idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])\n values3 = [1, 1, 1, 1, 1, 1, 1]\n exp_parts_cats_col = DataFrame({"cats": cats3, "values": values3}, index=idx3)\n return exp_parts_cats_col\n\n @pytest.fixture\n def exp_single_cats_value(self):\n # changed single value in cats col\n cats4 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])\n idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])\n values4 = [1, 1, 1, 1, 1, 1, 1]\n exp_single_cats_value = DataFrame(\n {"cats": cats4, "values": values4}, index=idx4\n )\n return exp_single_cats_value\n\n @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])\n def test_loc_iloc_setitem_list_of_lists(self, orig, exp_multi_row, indexer):\n # - assign multiple rows (mixed values) -> exp_multi_row\n df = orig.copy()\n\n key = slice(2, 4)\n if indexer is tm.loc:\n key = slice("j", "k")\n\n indexer(df)[key, :] = [["b", 2], ["b", 2]]\n tm.assert_frame_equal(df, exp_multi_row)\n\n df = orig.copy()\n with pytest.raises(TypeError, match=msg1):\n indexer(df)[key, :] = [["c", 2], ["c", 2]]\n\n @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc, tm.at, tm.iat])\n def test_loc_iloc_at_iat_setitem_single_value_in_categories(\n self, orig, exp_single_cats_value, indexer\n ):\n # - assign a single value -> exp_single_cats_value\n df = orig.copy()\n\n key = (2, 0)\n if indexer in [tm.loc, tm.at]:\n key = (df.index[2], df.columns[0])\n\n # "b" is among the categories for df["cat"}]\n indexer(df)[key] = "b"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n # "c" is not among the categories for df["cat"]\n with pytest.raises(TypeError, match=msg1):\n indexer(df)[key] = "c"\n\n @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])\n def test_loc_iloc_setitem_mask_single_value_in_categories(\n self, orig, exp_single_cats_value, indexer\n ):\n # mask with single True\n df = orig.copy()\n\n mask = df.index == "j"\n key = 0\n if indexer is tm.loc:\n key = df.columns[key]\n\n indexer(df)[mask, key] = "b"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])\n def test_loc_iloc_setitem_full_row_non_categorical_rhs(\n self, orig, exp_single_row, indexer\n ):\n # - assign a complete row (mixed values) -> exp_single_row\n df = orig.copy()\n\n key = 2\n if indexer is tm.loc:\n key = df.index[2]\n\n # not categorical dtype, but "b" _is_ among the categories for df["cat"]\n indexer(df)[key, :] = ["b", 2]\n tm.assert_frame_equal(df, exp_single_row)\n\n # "c" is not among the categories for df["cat"]\n with pytest.raises(TypeError, match=msg1):\n indexer(df)[key, :] = ["c", 2]\n\n @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])\n def test_loc_iloc_setitem_partial_col_categorical_rhs(\n self, orig, exp_parts_cats_col, indexer\n ):\n # assign a part of a column with dtype == categorical ->\n # exp_parts_cats_col\n df = orig.copy()\n\n key = (slice(2, 4), 0)\n if indexer is tm.loc:\n key = (slice("j", "k"), df.columns[0])\n\n # same categories as we currently have in df["cats"]\n compat = Categorical(["b", "b"], categories=["a", "b"])\n indexer(df)[key] = compat\n tm.assert_frame_equal(df, exp_parts_cats_col)\n\n # categories do not match df["cat"]'s, but "b" is among them\n semi_compat = Categorical(list("bb"), categories=list("abc"))\n with pytest.raises(TypeError, match=msg2):\n # different categories but holdable values\n # -> not sure if this should fail or pass\n indexer(df)[key] = semi_compat\n\n # categories do not match df["cat"]'s, and "c" is not among them\n incompat = Categorical(list("cc"), categories=list("abc"))\n with pytest.raises(TypeError, match=msg2):\n # different values\n indexer(df)[key] = incompat\n\n @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])\n def test_loc_iloc_setitem_non_categorical_rhs(\n self, orig, exp_parts_cats_col, indexer\n ):\n # assign a part of a column with dtype != categorical -> exp_parts_cats_col\n df = orig.copy()\n\n key = (slice(2, 4), 0)\n if indexer is tm.loc:\n key = (slice("j", "k"), df.columns[0])\n\n # "b" is among the categories for df["cat"]\n indexer(df)[key] = ["b", "b"]\n tm.assert_frame_equal(df, exp_parts_cats_col)\n\n # "c" not part of the categories\n with pytest.raises(TypeError, match=msg1):\n indexer(df)[key] = ["c", "c"]\n\n @pytest.mark.parametrize("indexer", [tm.getitem, tm.loc, tm.iloc])\n def test_getitem_preserve_object_index_with_dates(self, indexer):\n # https://github.com/pandas-dev/pandas/pull/42950 - when selecting a column\n # from dataframe, don't try to infer object dtype index on Series construction\n idx = date_range("2012", periods=3).astype(object)\n df = DataFrame({0: [1, 2, 3]}, index=idx)\n assert df.index.dtype == object\n\n if indexer is tm.getitem:\n ser = indexer(df)[0]\n else:\n ser = indexer(df)[:, 0]\n\n assert ser.index.dtype == object\n\n def test_loc_on_multiindex_one_level(self):\n # GH#45779\n df = DataFrame(\n data=[[0], [1]],\n index=MultiIndex.from_tuples([("a",), ("b",)], names=["first"]),\n )\n expected = DataFrame(\n data=[[0]], index=MultiIndex.from_tuples([("a",)], names=["first"])\n )\n result = df.loc["a"]\n tm.assert_frame_equal(result, expected)\n\n\nclass TestDeprecatedIndexers:\n @pytest.mark.parametrize(\n "key", [{1}, {1: 1}, ({1}, "a"), ({1: 1}, "a"), (1, {"a"}), (1, {"a": "a"})]\n )\n def test_getitem_dict_and_set_deprecated(self, key):\n # GH#42825 enforced in 2.0\n df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n df.loc[key]\n\n @pytest.mark.parametrize(\n "key",\n [\n {1},\n {1: 1},\n (({1}, 2), "a"),\n (({1: 1}, 2), "a"),\n ((1, 2), {"a"}),\n ((1, 2), {"a": "a"}),\n ],\n )\n def test_getitem_dict_and_set_deprecated_multiindex(self, key):\n # GH#42825 enforced in 2.0\n df = DataFrame(\n [[1, 2], [3, 4]],\n columns=["a", "b"],\n index=MultiIndex.from_tuples([(1, 2), (3, 4)]),\n )\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n df.loc[key]\n\n @pytest.mark.parametrize(\n "key", [{1}, {1: 1}, ({1}, "a"), ({1: 1}, "a"), (1, {"a"}), (1, {"a": "a"})]\n )\n def test_setitem_dict_and_set_disallowed(self, key):\n # GH#42825 enforced in 2.0\n df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n df.loc[key] = 1\n\n @pytest.mark.parametrize(\n "key",\n [\n {1},\n {1: 1},\n (({1}, 2), "a"),\n (({1: 1}, 2), "a"),\n ((1, 2), {"a"}),\n ((1, 2), {"a": "a"}),\n ],\n )\n def test_setitem_dict_and_set_disallowed_multiindex(self, key):\n # GH#42825 enforced in 2.0\n df = DataFrame(\n [[1, 2], [3, 4]],\n columns=["a", "b"],\n index=MultiIndex.from_tuples([(1, 2), (3, 4)]),\n )\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n df.loc[key] = 1\n\n\ndef test_adding_new_conditional_column() -> None:\n # https://github.com/pandas-dev/pandas/issues/55025\n df = DataFrame({"x": [1]})\n df.loc[df["x"] == 1, "y"] = "1"\n expected = DataFrame({"x": [1], "y": ["1"]})\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame({"x": [1]})\n # try inserting something which numpy would store as 'object'\n value = lambda x: x\n df.loc[df["x"] == 1, "y"] = value\n expected = DataFrame({"x": [1], "y": [value]})\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize(\n ("dtype", "infer_string"),\n [\n (object, False),\n (pd.StringDtype(na_value=np.nan), True),\n ],\n)\ndef test_adding_new_conditional_column_with_string(dtype, infer_string) -> None:\n # https://github.com/pandas-dev/pandas/issues/56204\n df = DataFrame({"a": [1, 2], "b": [3, 4]})\n with pd.option_context("future.infer_string", infer_string):\n df.loc[df["a"] == 1, "c"] = "1"\n expected = DataFrame({"a": [1, 2], "b": [3, 4], "c": ["1", float("nan")]}).astype(\n {"a": "int64", "b": "int64", "c": dtype}\n )\n tm.assert_frame_equal(df, expected)\n\n\ndef test_add_new_column_infer_string():\n # GH#55366\n df = DataFrame({"x": [1]})\n with pd.option_context("future.infer_string", True):\n df.loc[df["x"] == 1, "y"] = "1"\n expected = DataFrame(\n {"x": [1], "y": Series(["1"], dtype=pd.StringDtype(na_value=np.nan))},\n columns=Index(["x", "y"], dtype="str"),\n )\n tm.assert_frame_equal(df, expected)\n\n\nclass TestSetitemValidation:\n # This is adapted from pandas/tests/arrays/masked/test_indexing.py\n # but checks for warnings instead of errors.\n def _check_setitem_invalid(self, df, invalid, indexer, warn):\n msg = "Setting an item of incompatible dtype is deprecated"\n msg = re.escape(msg)\n\n orig_df = df.copy()\n\n # iloc\n with tm.assert_produces_warning(warn, match=msg):\n df.iloc[indexer, 0] = invalid\n df = orig_df.copy()\n\n # loc\n with tm.assert_produces_warning(warn, match=msg):\n df.loc[indexer, "a"] = invalid\n df = orig_df.copy()\n\n _invalid_scalars = [\n 1 + 2j,\n "True",\n "1",\n "1.0",\n pd.NaT,\n np.datetime64("NaT"),\n np.timedelta64("NaT"),\n ]\n _indexers = [0, [0], slice(0, 1), [True, False, False], slice(None, None, None)]\n\n @pytest.mark.parametrize(\n "invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)]\n )\n @pytest.mark.parametrize("indexer", _indexers)\n def test_setitem_validation_scalar_bool(self, invalid, indexer):\n df = DataFrame({"a": [True, False, False]}, dtype="bool")\n self._check_setitem_invalid(df, invalid, indexer, FutureWarning)\n\n @pytest.mark.parametrize("invalid", _invalid_scalars + [True, 1.5, np.float64(1.5)])\n @pytest.mark.parametrize("indexer", _indexers)\n def test_setitem_validation_scalar_int(self, invalid, any_int_numpy_dtype, indexer):\n df = DataFrame({"a": [1, 2, 3]}, dtype=any_int_numpy_dtype)\n if isna(invalid) and invalid is not pd.NaT and not np.isnat(invalid):\n warn = None\n else:\n warn = FutureWarning\n self._check_setitem_invalid(df, invalid, indexer, warn)\n\n @pytest.mark.parametrize("invalid", _invalid_scalars + [True])\n @pytest.mark.parametrize("indexer", _indexers)\n def test_setitem_validation_scalar_float(self, invalid, float_numpy_dtype, indexer):\n df = DataFrame({"a": [1, 2, None]}, dtype=float_numpy_dtype)\n self._check_setitem_invalid(df, invalid, indexer, FutureWarning)\n
.venv\Lib\site-packages\pandas\tests\frame\indexing\test_indexing.py
test_indexing.py
Python
70,442
0.75
0.077416
0.1
react-lib
22
2024-07-17T02:09:41.125932
MIT
true
de29f445a92b1966ce9bc3bbee4ddc42