content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
\n\n | .venv\Lib\site-packages\pandas\tests\frame\__pycache__\test_unary.cpython-313.pyc | test_unary.cpython-313.pyc | Other | 10,404 | 0.8 | 0.043103 | 0 | python-kit | 296 | 2024-12-15T02:02:39.637098 | Apache-2.0 | true | 38e0208c4374c6b13ce7da9b6250d91e |
\n\n | .venv\Lib\site-packages\pandas\tests\frame\__pycache__\test_validate.cpython-313.pyc | test_validate.cpython-313.pyc | Other | 1,937 | 0.7 | 0.037037 | 0 | node-utils | 855 | 2025-03-18T18:31:03.361623 | Apache-2.0 | true | 84bad174c30c5ec5638bf3029997a8ff |
\n\n | .venv\Lib\site-packages\pandas\tests\frame\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 193 | 0.7 | 0 | 0 | awesome-app | 177 | 2023-07-11T01:32:15.397902 | BSD-3-Clause | true | 6affc86cdbce684e68e22d9f61b5e8d9 |
"""Tests dealing with the NDFrame.allows_duplicates."""\nimport operator\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\nnot_implemented = pytest.mark.xfail(reason="Not implemented.")\n\n# ----------------------------------------------------------------------------\n# Preservation\n\n\nclass TestPreserves:\n @pytest.mark.parametrize(\n "cls, data",\n [\n (pd.Series, np.array([])),\n (pd.Series, [1, 2]),\n (pd.DataFrame, {}),\n (pd.DataFrame, {"A": [1, 2]}),\n ],\n )\n def test_construction_ok(self, cls, data):\n result = cls(data)\n assert result.flags.allows_duplicate_labels is True\n\n result = cls(data).set_flags(allows_duplicate_labels=False)\n assert result.flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize(\n "func",\n [\n operator.itemgetter(["a"]),\n operator.methodcaller("add", 1),\n operator.methodcaller("rename", str.upper),\n operator.methodcaller("rename", "name"),\n operator.methodcaller("abs"),\n np.abs,\n ],\n )\n def test_preserved_series(self, func):\n s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False)\n assert func(s).flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize(\n "other", [pd.Series(0, index=["a", "b", "c"]), pd.Series(0, index=["a", "b"])]\n )\n # TODO: frame\n @not_implemented\n def test_align(self, other):\n s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False)\n a, b = s.align(other)\n assert a.flags.allows_duplicate_labels is False\n assert b.flags.allows_duplicate_labels is False\n\n def test_preserved_frame(self):\n df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags(\n allows_duplicate_labels=False\n )\n assert df.loc[["a"]].flags.allows_duplicate_labels is False\n assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False\n\n def test_to_frame(self):\n ser = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False)\n assert ser.to_frame().flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize("func", ["add", "sub"])\n @pytest.mark.parametrize("frame", [False, True])\n @pytest.mark.parametrize("other", [1, pd.Series([1, 2], name="A")])\n def test_binops(self, func, other, frame):\n df = pd.Series([1, 2], name="A", index=["a", "b"]).set_flags(\n allows_duplicate_labels=False\n )\n if frame:\n df = df.to_frame()\n if isinstance(other, pd.Series) and frame:\n other = other.to_frame()\n func = operator.methodcaller(func, other)\n assert df.flags.allows_duplicate_labels is False\n assert func(df).flags.allows_duplicate_labels is False\n\n def test_preserve_getitem(self):\n df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False)\n assert df[["A"]].flags.allows_duplicate_labels is False\n assert df["A"].flags.allows_duplicate_labels is False\n assert df.loc[0].flags.allows_duplicate_labels is False\n assert df.loc[[0]].flags.allows_duplicate_labels is False\n assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False\n\n def test_ndframe_getitem_caching_issue(\n self, request, using_copy_on_write, warn_copy_on_write\n ):\n if not (using_copy_on_write or warn_copy_on_write):\n request.applymarker(pytest.mark.xfail(reason="Unclear behavior."))\n # NDFrame.__getitem__ will cache the first df['A']. May need to\n # invalidate that cache? Update the cached entries?\n df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False)\n assert df["A"].flags.allows_duplicate_labels is False\n df.flags.allows_duplicate_labels = True\n assert df["A"].flags.allows_duplicate_labels is True\n\n @pytest.mark.parametrize(\n "objs, kwargs",\n [\n # Series\n (\n [\n pd.Series(1, index=["a", "b"]),\n pd.Series(2, index=["c", "d"]),\n ],\n {},\n ),\n (\n [\n pd.Series(1, index=["a", "b"]),\n pd.Series(2, index=["a", "b"]),\n ],\n {"ignore_index": True},\n ),\n (\n [\n pd.Series(1, index=["a", "b"]),\n pd.Series(2, index=["a", "b"]),\n ],\n {"axis": 1},\n ),\n # Frame\n (\n [\n pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),\n pd.DataFrame({"A": [1, 2]}, index=["c", "d"]),\n ],\n {},\n ),\n (\n [\n pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),\n pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),\n ],\n {"ignore_index": True},\n ),\n (\n [\n pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),\n pd.DataFrame({"B": [1, 2]}, index=["a", "b"]),\n ],\n {"axis": 1},\n ),\n # Series / Frame\n (\n [\n pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),\n pd.Series([1, 2], index=["a", "b"], name="B"),\n ],\n {"axis": 1},\n ),\n ],\n )\n def test_concat(self, objs, kwargs):\n objs = [x.set_flags(allows_duplicate_labels=False) for x in objs]\n result = pd.concat(objs, **kwargs)\n assert result.flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize(\n "left, right, expected",\n [\n # false false false\n pytest.param(\n pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.DataFrame({"B": [0, 1]}, index=["a", "d"]).set_flags(\n allows_duplicate_labels=False\n ),\n False,\n marks=not_implemented,\n ),\n # false true false\n pytest.param(\n pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),\n False,\n marks=not_implemented,\n ),\n # true true true\n (\n pd.DataFrame({"A": [0, 1]}, index=["a", "b"]),\n pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),\n True,\n ),\n ],\n )\n def test_merge(self, left, right, expected):\n result = pd.merge(left, right, left_index=True, right_index=True)\n assert result.flags.allows_duplicate_labels is expected\n\n @not_implemented\n def test_groupby(self):\n # XXX: This is under tested\n # TODO:\n # - apply\n # - transform\n # - Should passing a grouper that disallows duplicates propagate?\n df = pd.DataFrame({"A": [1, 2, 3]}).set_flags(allows_duplicate_labels=False)\n result = df.groupby([0, 0, 1]).agg("count")\n assert result.flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize("frame", [True, False])\n @not_implemented\n def test_window(self, frame):\n df = pd.Series(\n 1,\n index=pd.date_range("2000", periods=12),\n name="A",\n allows_duplicate_labels=False,\n )\n if frame:\n df = df.to_frame()\n assert df.rolling(3).mean().flags.allows_duplicate_labels is False\n assert df.ewm(3).mean().flags.allows_duplicate_labels is False\n assert df.expanding(3).mean().flags.allows_duplicate_labels is False\n\n\n# ----------------------------------------------------------------------------\n# Raises\n\n\nclass TestRaises:\n @pytest.mark.parametrize(\n "cls, axes",\n [\n (pd.Series, {"index": ["a", "a"], "dtype": float}),\n (pd.DataFrame, {"index": ["a", "a"]}),\n (pd.DataFrame, {"index": ["a", "a"], "columns": ["b", "b"]}),\n (pd.DataFrame, {"columns": ["b", "b"]}),\n ],\n )\n def test_set_flags_with_duplicates(self, cls, axes):\n result = cls(**axes)\n assert result.flags.allows_duplicate_labels is True\n\n msg = "Index has duplicates."\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n cls(**axes).set_flags(allows_duplicate_labels=False)\n\n @pytest.mark.parametrize(\n "data",\n [\n pd.Series(index=[0, 0], dtype=float),\n pd.DataFrame(index=[0, 0]),\n pd.DataFrame(columns=[0, 0]),\n ],\n )\n def test_setting_allows_duplicate_labels_raises(self, data):\n msg = "Index has duplicates."\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n data.flags.allows_duplicate_labels = False\n\n assert data.flags.allows_duplicate_labels is True\n\n def test_series_raises(self):\n a = pd.Series(0, index=["a", "b"])\n b = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False)\n msg = "Index has duplicates."\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.concat([a, b])\n\n @pytest.mark.parametrize(\n "getter, target",\n [\n (operator.itemgetter(["A", "A"]), None),\n # loc\n (operator.itemgetter(["a", "a"]), "loc"),\n pytest.param(operator.itemgetter(("a", ["A", "A"])), "loc"),\n (operator.itemgetter((["a", "a"], "A")), "loc"),\n # iloc\n (operator.itemgetter([0, 0]), "iloc"),\n pytest.param(operator.itemgetter((0, [0, 0])), "iloc"),\n pytest.param(operator.itemgetter(([0, 0], 0)), "iloc"),\n ],\n )\n def test_getitem_raises(self, getter, target):\n df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags(\n allows_duplicate_labels=False\n )\n if target:\n # df, df.loc, or df.iloc\n target = getattr(df, target)\n else:\n target = df\n\n msg = "Index has duplicates."\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n getter(target)\n\n @pytest.mark.parametrize(\n "objs, kwargs",\n [\n (\n [\n pd.Series(1, index=[0, 1], name="a"),\n pd.Series(2, index=[0, 1], name="a"),\n ],\n {"axis": 1},\n )\n ],\n )\n def test_concat_raises(self, objs, kwargs):\n objs = [x.set_flags(allows_duplicate_labels=False) for x in objs]\n msg = "Index has duplicates."\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.concat(objs, **kwargs)\n\n @not_implemented\n def test_merge_raises(self):\n a = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "b", "c"]).set_flags(\n allows_duplicate_labels=False\n )\n b = pd.DataFrame({"B": [0, 1, 2]}, index=["a", "b", "b"])\n msg = "Index has duplicates."\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.merge(a, b, left_index=True, right_index=True)\n\n\n@pytest.mark.parametrize(\n "idx",\n [\n pd.Index([1, 1]),\n pd.Index(["a", "a"]),\n pd.Index([1.1, 1.1]),\n pd.PeriodIndex([pd.Period("2000", "D")] * 2),\n pd.DatetimeIndex([pd.Timestamp("2000")] * 2),\n pd.TimedeltaIndex([pd.Timedelta("1D")] * 2),\n pd.CategoricalIndex(["a", "a"]),\n pd.IntervalIndex([pd.Interval(0, 1)] * 2),\n pd.MultiIndex.from_tuples([("a", 1), ("a", 1)]),\n ],\n ids=lambda x: type(x).__name__,\n)\ndef test_raises_basic(idx):\n msg = "Index has duplicates."\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False)\n\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.DataFrame({"A": [1, 1]}, index=idx).set_flags(allows_duplicate_labels=False)\n\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False)\n\n\ndef test_format_duplicate_labels_message():\n idx = pd.Index(["a", "b", "a", "b", "c"])\n result = idx._format_duplicate_message()\n expected = pd.DataFrame(\n {"positions": [[0, 2], [1, 3]]}, index=pd.Index(["a", "b"], name="label")\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_format_duplicate_labels_message_multi():\n idx = pd.MultiIndex.from_product([["A"], ["a", "b", "a", "b", "c"]])\n result = idx._format_duplicate_message()\n expected = pd.DataFrame(\n {"positions": [[0, 2], [1, 3]]},\n index=pd.MultiIndex.from_product([["A"], ["a", "b"]]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dataframe_insert_raises():\n df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False)\n msg = "Cannot specify"\n with pytest.raises(ValueError, match=msg):\n df.insert(0, "A", [3, 4], allow_duplicates=True)\n\n\n@pytest.mark.parametrize(\n "method, frame_only",\n [\n (operator.methodcaller("set_index", "A", inplace=True), True),\n (operator.methodcaller("reset_index", inplace=True), True),\n (operator.methodcaller("rename", lambda x: x, inplace=True), False),\n ],\n)\ndef test_inplace_raises(method, frame_only):\n df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags(\n allows_duplicate_labels=False\n )\n s = df["A"]\n s.flags.allows_duplicate_labels = False\n msg = "Cannot specify"\n\n with pytest.raises(ValueError, match=msg):\n method(df)\n if not frame_only:\n with pytest.raises(ValueError, match=msg):\n method(s)\n\n\ndef test_pickle():\n a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False)\n b = tm.round_trip_pickle(a)\n tm.assert_series_equal(a, b)\n\n a = pd.DataFrame({"A": []}).set_flags(allows_duplicate_labels=False)\n b = tm.round_trip_pickle(a)\n tm.assert_frame_equal(a, b)\n | .venv\Lib\site-packages\pandas\tests\generic\test_duplicate_labels.py | test_duplicate_labels.py | Python | 14,506 | 0.95 | 0.082324 | 0.057221 | node-utils | 896 | 2025-03-13T01:15:41.293291 | MIT | true | be22057d219bc9ad8baa9998226aab95 |
"""\nAn exhaustive list of pandas methods exercising NDFrame.__finalize__.\n"""\nimport operator\nimport re\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n# TODO:\n# * Binary methods (mul, div, etc.)\n# * Binary outputs (align, etc.)\n# * top-level methods (concat, merge, get_dummies, etc.)\n# * window\n# * cumulative reductions\n\nnot_implemented_mark = pytest.mark.xfail(reason="not implemented")\n\nmi = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["A", "B"])\n\nframe_data = ({"A": [1]},)\nframe_mi_data = ({"A": [1, 2, 3, 4]}, mi)\n\n\n# Tuple of\n# - Callable: Constructor (Series, DataFrame)\n# - Tuple: Constructor args\n# - Callable: pass the constructed value with attrs set to this.\n\n_all_methods = [\n (pd.Series, ([0],), operator.methodcaller("take", [])),\n (pd.Series, ([0],), operator.methodcaller("__getitem__", [True])),\n (pd.Series, ([0],), operator.methodcaller("repeat", 2)),\n (pd.Series, ([0],), operator.methodcaller("reset_index")),\n (pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)),\n (pd.Series, ([0],), operator.methodcaller("to_frame")),\n (pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")),\n (pd.Series, ([0, 0],), operator.methodcaller("duplicated")),\n (pd.Series, ([0, 0],), operator.methodcaller("round")),\n (pd.Series, ([0, 0],), operator.methodcaller("rename", lambda x: x + 1)),\n (pd.Series, ([0, 0],), operator.methodcaller("rename", "name")),\n (pd.Series, ([0, 0],), operator.methodcaller("set_axis", ["a", "b"])),\n (pd.Series, ([0, 0],), operator.methodcaller("reindex", [1, 0])),\n (pd.Series, ([0, 0],), operator.methodcaller("drop", [0])),\n (pd.Series, (pd.array([0, pd.NA]),), operator.methodcaller("fillna", 0)),\n (pd.Series, ([0, 0],), operator.methodcaller("replace", {0: 1})),\n (pd.Series, ([0, 0],), operator.methodcaller("shift")),\n (pd.Series, ([0, 0],), operator.methodcaller("isin", [0, 1])),\n (pd.Series, ([0, 0],), operator.methodcaller("between", 0, 2)),\n (pd.Series, ([0, 0],), operator.methodcaller("isna")),\n (pd.Series, ([0, 0],), operator.methodcaller("isnull")),\n (pd.Series, ([0, 0],), operator.methodcaller("notna")),\n (pd.Series, ([0, 0],), operator.methodcaller("notnull")),\n (pd.Series, ([1],), operator.methodcaller("add", pd.Series([1]))),\n # TODO: mul, div, etc.\n (\n pd.Series,\n ([0], pd.period_range("2000", periods=1)),\n operator.methodcaller("to_timestamp"),\n ),\n (\n pd.Series,\n ([0], pd.date_range("2000", periods=1)),\n operator.methodcaller("to_period"),\n ),\n pytest.param(\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("dot", pd.DataFrame(index=["A"])),\n ),\n marks=pytest.mark.xfail(reason="Implement binary finalize"),\n ),\n (pd.DataFrame, frame_data, operator.methodcaller("transpose")),\n (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", "A")),\n (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", ["A"])),\n (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", np.array([True]))),\n (pd.DataFrame, ({("A", "a"): [1]},), operator.methodcaller("__getitem__", ["A"])),\n (pd.DataFrame, frame_data, operator.methodcaller("query", "A == 1")),\n (pd.DataFrame, frame_data, operator.methodcaller("eval", "A + 1", engine="python")),\n (pd.DataFrame, frame_data, operator.methodcaller("select_dtypes", include="int")),\n (pd.DataFrame, frame_data, operator.methodcaller("assign", b=1)),\n (pd.DataFrame, frame_data, operator.methodcaller("set_axis", ["A"])),\n (pd.DataFrame, frame_data, operator.methodcaller("reindex", [0, 1])),\n (pd.DataFrame, frame_data, operator.methodcaller("drop", columns=["A"])),\n (pd.DataFrame, frame_data, operator.methodcaller("drop", index=[0])),\n (pd.DataFrame, frame_data, operator.methodcaller("rename", columns={"A": "a"})),\n (pd.DataFrame, frame_data, operator.methodcaller("rename", index=lambda x: x)),\n (pd.DataFrame, frame_data, operator.methodcaller("fillna", "A")),\n (pd.DataFrame, frame_data, operator.methodcaller("fillna", method="ffill")),\n (pd.DataFrame, frame_data, operator.methodcaller("set_index", "A")),\n (pd.DataFrame, frame_data, operator.methodcaller("reset_index")),\n (pd.DataFrame, frame_data, operator.methodcaller("isna")),\n (pd.DataFrame, frame_data, operator.methodcaller("isnull")),\n (pd.DataFrame, frame_data, operator.methodcaller("notna")),\n (pd.DataFrame, frame_data, operator.methodcaller("notnull")),\n (pd.DataFrame, frame_data, operator.methodcaller("dropna")),\n (pd.DataFrame, frame_data, operator.methodcaller("drop_duplicates")),\n (pd.DataFrame, frame_data, operator.methodcaller("duplicated")),\n (pd.DataFrame, frame_data, operator.methodcaller("sort_values", by="A")),\n (pd.DataFrame, frame_data, operator.methodcaller("sort_index")),\n (pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")),\n (pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")),\n (pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel")),\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("add", pd.DataFrame(*frame_data)),\n ),\n # TODO: div, mul, etc.\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("combine", pd.DataFrame(*frame_data), operator.add),\n ),\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("combine_first", pd.DataFrame(*frame_data)),\n ),\n pytest.param(\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("update", pd.DataFrame(*frame_data)),\n ),\n marks=not_implemented_mark,\n ),\n (pd.DataFrame, frame_data, operator.methodcaller("pivot", columns="A")),\n (\n pd.DataFrame,\n ({"A": [1], "B": [1]},),\n operator.methodcaller("pivot_table", columns="A"),\n ),\n (\n pd.DataFrame,\n ({"A": [1], "B": [1]},),\n operator.methodcaller("pivot_table", columns="A", aggfunc=["mean", "sum"]),\n ),\n (pd.DataFrame, frame_data, operator.methodcaller("stack")),\n (pd.DataFrame, frame_data, operator.methodcaller("explode", "A")),\n (pd.DataFrame, frame_mi_data, operator.methodcaller("unstack")),\n (\n pd.DataFrame,\n ({"A": ["a", "b", "c"], "B": [1, 3, 5], "C": [2, 4, 6]},),\n operator.methodcaller("melt", id_vars=["A"], value_vars=["B"]),\n ),\n (pd.DataFrame, frame_data, operator.methodcaller("map", lambda x: x)),\n pytest.param(\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("merge", pd.DataFrame({"A": [1]})),\n ),\n marks=not_implemented_mark,\n ),\n (pd.DataFrame, frame_data, operator.methodcaller("round", 2)),\n (pd.DataFrame, frame_data, operator.methodcaller("corr")),\n pytest.param(\n (pd.DataFrame, frame_data, operator.methodcaller("cov")),\n marks=[\n pytest.mark.filterwarnings("ignore::RuntimeWarning"),\n ],\n ),\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("corrwith", pd.DataFrame(*frame_data)),\n ),\n (pd.DataFrame, frame_data, operator.methodcaller("count")),\n (pd.DataFrame, frame_data, operator.methodcaller("nunique")),\n (pd.DataFrame, frame_data, operator.methodcaller("idxmin")),\n (pd.DataFrame, frame_data, operator.methodcaller("idxmax")),\n (pd.DataFrame, frame_data, operator.methodcaller("mode")),\n (pd.Series, [0], operator.methodcaller("mode")),\n (pd.DataFrame, frame_data, operator.methodcaller("median")),\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("quantile", numeric_only=True),\n ),\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("quantile", q=[0.25, 0.75], numeric_only=True),\n ),\n (\n pd.DataFrame,\n ({"A": [pd.Timedelta(days=1), pd.Timedelta(days=2)]},),\n operator.methodcaller("quantile", numeric_only=False),\n ),\n (\n pd.DataFrame,\n ({"A": [np.datetime64("2022-01-01"), np.datetime64("2022-01-02")]},),\n operator.methodcaller("quantile", numeric_only=True),\n ),\n (\n pd.DataFrame,\n ({"A": [1]}, [pd.Period("2000", "D")]),\n operator.methodcaller("to_timestamp"),\n ),\n (\n pd.DataFrame,\n ({"A": [1]}, [pd.Timestamp("2000")]),\n operator.methodcaller("to_period", freq="D"),\n ),\n (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", [1])),\n (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", pd.Series([1]))),\n (\n pd.DataFrame,\n frame_mi_data,\n operator.methodcaller("isin", pd.DataFrame({"A": [1]})),\n ),\n (pd.DataFrame, frame_mi_data, operator.methodcaller("droplevel", "A")),\n (pd.DataFrame, frame_data, operator.methodcaller("pop", "A")),\n # Squeeze on columns, otherwise we'll end up with a scalar\n (pd.DataFrame, frame_data, operator.methodcaller("squeeze", axis="columns")),\n (pd.Series, ([1, 2],), operator.methodcaller("squeeze")),\n (pd.Series, ([1, 2],), operator.methodcaller("rename_axis", index="a")),\n (pd.DataFrame, frame_data, operator.methodcaller("rename_axis", columns="a")),\n # Unary ops\n (pd.DataFrame, frame_data, operator.neg),\n (pd.Series, [1], operator.neg),\n (pd.DataFrame, frame_data, operator.pos),\n (pd.Series, [1], operator.pos),\n (pd.DataFrame, frame_data, operator.inv),\n (pd.Series, [1], operator.inv),\n (pd.DataFrame, frame_data, abs),\n (pd.Series, [1], abs),\n (pd.DataFrame, frame_data, round),\n (pd.Series, [1], round),\n (pd.DataFrame, frame_data, operator.methodcaller("take", [0, 0])),\n (pd.DataFrame, frame_mi_data, operator.methodcaller("xs", "a")),\n (pd.Series, (1, mi), operator.methodcaller("xs", "a")),\n (pd.DataFrame, frame_data, operator.methodcaller("get", "A")),\n (\n pd.DataFrame,\n frame_data,\n operator.methodcaller("reindex_like", pd.DataFrame({"A": [1, 2, 3]})),\n ),\n (\n pd.Series,\n frame_data,\n operator.methodcaller("reindex_like", pd.Series([0, 1, 2])),\n ),\n (pd.DataFrame, frame_data, operator.methodcaller("add_prefix", "_")),\n (pd.DataFrame, frame_data, operator.methodcaller("add_suffix", "_")),\n (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_prefix", "_")),\n (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_suffix", "_")),\n (pd.Series, ([3, 2],), operator.methodcaller("sort_values")),\n (pd.Series, ([1] * 10,), operator.methodcaller("head")),\n (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("head")),\n (pd.Series, ([1] * 10,), operator.methodcaller("tail")),\n (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("tail")),\n (pd.Series, ([1, 2],), operator.methodcaller("sample", n=2, replace=True)),\n (pd.DataFrame, (frame_data,), operator.methodcaller("sample", n=2, replace=True)),\n (pd.Series, ([1, 2],), operator.methodcaller("astype", float)),\n (pd.DataFrame, frame_data, operator.methodcaller("astype", float)),\n (pd.Series, ([1, 2],), operator.methodcaller("copy")),\n (pd.DataFrame, frame_data, operator.methodcaller("copy")),\n (pd.Series, ([1, 2], None, object), operator.methodcaller("infer_objects")),\n (\n pd.DataFrame,\n ({"A": np.array([1, 2], dtype=object)},),\n operator.methodcaller("infer_objects"),\n ),\n (pd.Series, ([1, 2],), operator.methodcaller("convert_dtypes")),\n (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")),\n (pd.Series, ([1, None, 3],), operator.methodcaller("interpolate")),\n (pd.DataFrame, ({"A": [1, None, 3]},), operator.methodcaller("interpolate")),\n (pd.Series, ([1, 2],), operator.methodcaller("clip", lower=1)),\n (pd.DataFrame, frame_data, operator.methodcaller("clip", lower=1)),\n (\n pd.Series,\n (1, pd.date_range("2000", periods=4)),\n operator.methodcaller("asfreq", "h"),\n ),\n (\n pd.DataFrame,\n ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),\n operator.methodcaller("asfreq", "h"),\n ),\n (\n pd.Series,\n (1, pd.date_range("2000", periods=4)),\n operator.methodcaller("at_time", "12:00"),\n ),\n (\n pd.DataFrame,\n ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),\n operator.methodcaller("at_time", "12:00"),\n ),\n (\n pd.Series,\n (1, pd.date_range("2000", periods=4)),\n operator.methodcaller("between_time", "12:00", "13:00"),\n ),\n (\n pd.DataFrame,\n ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),\n operator.methodcaller("between_time", "12:00", "13:00"),\n ),\n (\n pd.Series,\n (1, pd.date_range("2000", periods=4)),\n operator.methodcaller("last", "3D"),\n ),\n (\n pd.DataFrame,\n ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),\n operator.methodcaller("last", "3D"),\n ),\n (pd.Series, ([1, 2],), operator.methodcaller("rank")),\n (pd.DataFrame, frame_data, operator.methodcaller("rank")),\n (pd.Series, ([1, 2],), operator.methodcaller("where", np.array([True, False]))),\n (pd.DataFrame, frame_data, operator.methodcaller("where", np.array([[True]]))),\n (pd.Series, ([1, 2],), operator.methodcaller("mask", np.array([True, False]))),\n (pd.DataFrame, frame_data, operator.methodcaller("mask", np.array([[True]]))),\n (pd.Series, ([1, 2],), operator.methodcaller("truncate", before=0)),\n (pd.DataFrame, frame_data, operator.methodcaller("truncate", before=0)),\n (\n pd.Series,\n (1, pd.date_range("2000", periods=4, tz="UTC")),\n operator.methodcaller("tz_convert", "CET"),\n ),\n (\n pd.DataFrame,\n ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4, tz="UTC")),\n operator.methodcaller("tz_convert", "CET"),\n ),\n (\n pd.Series,\n (1, pd.date_range("2000", periods=4)),\n operator.methodcaller("tz_localize", "CET"),\n ),\n (\n pd.DataFrame,\n ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),\n operator.methodcaller("tz_localize", "CET"),\n ),\n (pd.Series, ([1, 2],), operator.methodcaller("describe")),\n (pd.DataFrame, frame_data, operator.methodcaller("describe")),\n (pd.Series, ([1, 2],), operator.methodcaller("pct_change")),\n (pd.DataFrame, frame_data, operator.methodcaller("pct_change")),\n (pd.Series, ([1],), operator.methodcaller("transform", lambda x: x - x.min())),\n (\n pd.DataFrame,\n frame_mi_data,\n operator.methodcaller("transform", lambda x: x - x.min()),\n ),\n (pd.Series, ([1],), operator.methodcaller("apply", lambda x: x)),\n (pd.DataFrame, frame_mi_data, operator.methodcaller("apply", lambda x: x)),\n # Cumulative reductions\n (pd.Series, ([1],), operator.methodcaller("cumsum")),\n (pd.DataFrame, frame_data, operator.methodcaller("cumsum")),\n (pd.Series, ([1],), operator.methodcaller("cummin")),\n (pd.DataFrame, frame_data, operator.methodcaller("cummin")),\n (pd.Series, ([1],), operator.methodcaller("cummax")),\n (pd.DataFrame, frame_data, operator.methodcaller("cummax")),\n (pd.Series, ([1],), operator.methodcaller("cumprod")),\n (pd.DataFrame, frame_data, operator.methodcaller("cumprod")),\n # Reductions\n (pd.DataFrame, frame_data, operator.methodcaller("any")),\n (pd.DataFrame, frame_data, operator.methodcaller("all")),\n (pd.DataFrame, frame_data, operator.methodcaller("min")),\n (pd.DataFrame, frame_data, operator.methodcaller("max")),\n (pd.DataFrame, frame_data, operator.methodcaller("sum")),\n (pd.DataFrame, frame_data, operator.methodcaller("std")),\n (pd.DataFrame, frame_data, operator.methodcaller("mean")),\n (pd.DataFrame, frame_data, operator.methodcaller("prod")),\n (pd.DataFrame, frame_data, operator.methodcaller("sem")),\n (pd.DataFrame, frame_data, operator.methodcaller("skew")),\n (pd.DataFrame, frame_data, operator.methodcaller("kurt")),\n]\n\n\ndef idfn(x):\n xpr = re.compile(r"'(.*)?'")\n m = xpr.search(str(x))\n if m:\n return m.group(1)\n else:\n return str(x)\n\n\n@pytest.fixture(params=_all_methods, ids=lambda x: idfn(x[-1]))\ndef ndframe_method(request):\n """\n An NDFrame method returning an NDFrame.\n """\n return request.param\n\n\n@pytest.mark.filterwarnings(\n "ignore:DataFrame.fillna with 'method' is deprecated:FutureWarning",\n "ignore:last is deprecated:FutureWarning",\n)\ndef test_finalize_called(ndframe_method):\n cls, init_args, method = ndframe_method\n ndframe = cls(*init_args)\n\n ndframe.attrs = {"a": 1}\n result = method(ndframe)\n\n assert result.attrs == {"a": 1}\n\n\n@pytest.mark.parametrize(\n "data",\n [\n pd.Series(1, pd.date_range("2000", periods=4)),\n pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),\n ],\n)\ndef test_finalize_first(data):\n deprecated_msg = "first is deprecated"\n\n data.attrs = {"a": 1}\n with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):\n result = data.first("3D")\n assert result.attrs == {"a": 1}\n\n\n@pytest.mark.parametrize(\n "data",\n [\n pd.Series(1, pd.date_range("2000", periods=4)),\n pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),\n ],\n)\ndef test_finalize_last(data):\n # GH 53710\n deprecated_msg = "last is deprecated"\n\n data.attrs = {"a": 1}\n with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):\n result = data.last("3D")\n assert result.attrs == {"a": 1}\n\n\n@not_implemented_mark\ndef test_finalize_called_eval_numexpr():\n pytest.importorskip("numexpr")\n df = pd.DataFrame({"A": [1, 2]})\n df.attrs["A"] = 1\n result = df.eval("A + 1", engine="numexpr")\n assert result.attrs == {"A": 1}\n\n\n# ----------------------------------------------------------------------------\n# Binary operations\n\n\n@pytest.mark.parametrize("annotate", ["left", "right", "both"])\n@pytest.mark.parametrize(\n "args",\n [\n (1, pd.Series([1])),\n (1, pd.DataFrame({"A": [1]})),\n (pd.Series([1]), 1),\n (pd.DataFrame({"A": [1]}), 1),\n (pd.Series([1]), pd.Series([1])),\n (pd.DataFrame({"A": [1]}), pd.DataFrame({"A": [1]})),\n (pd.Series([1]), pd.DataFrame({"A": [1]})),\n (pd.DataFrame({"A": [1]}), pd.Series([1])),\n ],\n ids=lambda x: f"({type(x[0]).__name__},{type(x[1]).__name__})",\n)\ndef test_binops(request, args, annotate, all_binary_operators):\n # This generates 624 tests... Is that needed?\n left, right = args\n if isinstance(left, (pd.DataFrame, pd.Series)):\n left.attrs = {}\n if isinstance(right, (pd.DataFrame, pd.Series)):\n right.attrs = {}\n\n if annotate == "left" and isinstance(left, int):\n pytest.skip("left is an int and doesn't support .attrs")\n if annotate == "right" and isinstance(right, int):\n pytest.skip("right is an int and doesn't support .attrs")\n\n if not (isinstance(left, int) or isinstance(right, int)) and annotate != "both":\n if not all_binary_operators.__name__.startswith("r"):\n if annotate == "right" and isinstance(left, type(right)):\n request.applymarker(\n pytest.mark.xfail(\n reason=f"{all_binary_operators} doesn't work when right has "\n f"attrs and both are {type(left)}"\n )\n )\n if not isinstance(left, type(right)):\n if annotate == "left" and isinstance(left, pd.Series):\n request.applymarker(\n pytest.mark.xfail(\n reason=f"{all_binary_operators} doesn't work when the "\n "objects are different Series has attrs"\n )\n )\n elif annotate == "right" and isinstance(right, pd.Series):\n request.applymarker(\n pytest.mark.xfail(\n reason=f"{all_binary_operators} doesn't work when the "\n "objects are different Series has attrs"\n )\n )\n else:\n if annotate == "left" and isinstance(left, type(right)):\n request.applymarker(\n pytest.mark.xfail(\n reason=f"{all_binary_operators} doesn't work when left has "\n f"attrs and both are {type(left)}"\n )\n )\n if not isinstance(left, type(right)):\n if annotate == "right" and isinstance(right, pd.Series):\n request.applymarker(\n pytest.mark.xfail(\n reason=f"{all_binary_operators} doesn't work when the "\n "objects are different Series has attrs"\n )\n )\n elif annotate == "left" and isinstance(left, pd.Series):\n request.applymarker(\n pytest.mark.xfail(\n reason=f"{all_binary_operators} doesn't work when the "\n "objects are different Series has attrs"\n )\n )\n if annotate in {"left", "both"} and not isinstance(left, int):\n left.attrs = {"a": 1}\n if annotate in {"right", "both"} and not isinstance(right, int):\n right.attrs = {"a": 1}\n\n is_cmp = all_binary_operators in [\n operator.eq,\n operator.ne,\n operator.gt,\n operator.ge,\n operator.lt,\n operator.le,\n ]\n if is_cmp and isinstance(left, pd.DataFrame) and isinstance(right, pd.Series):\n # in 2.0 silent alignment on comparisons was removed xref GH#28759\n left, right = left.align(right, axis=1, copy=False)\n elif is_cmp and isinstance(left, pd.Series) and isinstance(right, pd.DataFrame):\n right, left = right.align(left, axis=1, copy=False)\n\n result = all_binary_operators(left, right)\n assert result.attrs == {"a": 1}\n\n\n# ----------------------------------------------------------------------------\n# Accessors\n\n\n@pytest.mark.parametrize(\n "method",\n [\n operator.methodcaller("capitalize"),\n operator.methodcaller("casefold"),\n operator.methodcaller("cat", ["a"]),\n operator.methodcaller("contains", "a"),\n operator.methodcaller("count", "a"),\n operator.methodcaller("encode", "utf-8"),\n operator.methodcaller("endswith", "a"),\n operator.methodcaller("extract", r"(\w)(\d)"),\n operator.methodcaller("extract", r"(\w)(\d)", expand=False),\n operator.methodcaller("find", "a"),\n operator.methodcaller("findall", "a"),\n operator.methodcaller("get", 0),\n operator.methodcaller("index", "a"),\n operator.methodcaller("len"),\n operator.methodcaller("ljust", 4),\n operator.methodcaller("lower"),\n operator.methodcaller("lstrip"),\n operator.methodcaller("match", r"\w"),\n operator.methodcaller("normalize", "NFC"),\n operator.methodcaller("pad", 4),\n operator.methodcaller("partition", "a"),\n operator.methodcaller("repeat", 2),\n operator.methodcaller("replace", "a", "b"),\n operator.methodcaller("rfind", "a"),\n operator.methodcaller("rindex", "a"),\n operator.methodcaller("rjust", 4),\n operator.methodcaller("rpartition", "a"),\n operator.methodcaller("rstrip"),\n operator.methodcaller("slice", 4),\n operator.methodcaller("slice_replace", 1, repl="a"),\n operator.methodcaller("startswith", "a"),\n operator.methodcaller("strip"),\n operator.methodcaller("swapcase"),\n operator.methodcaller("translate", {"a": "b"}),\n operator.methodcaller("upper"),\n operator.methodcaller("wrap", 4),\n operator.methodcaller("zfill", 4),\n operator.methodcaller("isalnum"),\n operator.methodcaller("isalpha"),\n operator.methodcaller("isdigit"),\n operator.methodcaller("isspace"),\n operator.methodcaller("islower"),\n operator.methodcaller("isupper"),\n operator.methodcaller("istitle"),\n operator.methodcaller("isnumeric"),\n operator.methodcaller("isdecimal"),\n operator.methodcaller("get_dummies"),\n ],\n ids=idfn,\n)\ndef test_string_method(method):\n s = pd.Series(["a1"])\n s.attrs = {"a": 1}\n result = method(s.str)\n assert result.attrs == {"a": 1}\n\n\n@pytest.mark.parametrize(\n "method",\n [\n operator.methodcaller("to_period"),\n operator.methodcaller("tz_localize", "CET"),\n operator.methodcaller("normalize"),\n operator.methodcaller("strftime", "%Y"),\n operator.methodcaller("round", "h"),\n operator.methodcaller("floor", "h"),\n operator.methodcaller("ceil", "h"),\n operator.methodcaller("month_name"),\n operator.methodcaller("day_name"),\n ],\n ids=idfn,\n)\ndef test_datetime_method(method):\n s = pd.Series(pd.date_range("2000", periods=4))\n s.attrs = {"a": 1}\n result = method(s.dt)\n assert result.attrs == {"a": 1}\n\n\n@pytest.mark.parametrize(\n "attr",\n [\n "date",\n "time",\n "timetz",\n "year",\n "month",\n "day",\n "hour",\n "minute",\n "second",\n "microsecond",\n "nanosecond",\n "dayofweek",\n "day_of_week",\n "dayofyear",\n "day_of_year",\n "quarter",\n "is_month_start",\n "is_month_end",\n "is_quarter_start",\n "is_quarter_end",\n "is_year_start",\n "is_year_end",\n "is_leap_year",\n "daysinmonth",\n "days_in_month",\n ],\n)\ndef test_datetime_property(attr):\n s = pd.Series(pd.date_range("2000", periods=4))\n s.attrs = {"a": 1}\n result = getattr(s.dt, attr)\n assert result.attrs == {"a": 1}\n\n\n@pytest.mark.parametrize(\n "attr", ["days", "seconds", "microseconds", "nanoseconds", "components"]\n)\ndef test_timedelta_property(attr):\n s = pd.Series(pd.timedelta_range("2000", periods=4))\n s.attrs = {"a": 1}\n result = getattr(s.dt, attr)\n assert result.attrs == {"a": 1}\n\n\n@pytest.mark.parametrize("method", [operator.methodcaller("total_seconds")])\ndef test_timedelta_methods(method):\n s = pd.Series(pd.timedelta_range("2000", periods=4))\n s.attrs = {"a": 1}\n result = method(s.dt)\n assert result.attrs == {"a": 1}\n\n\n@pytest.mark.parametrize(\n "method",\n [\n operator.methodcaller("add_categories", ["c"]),\n operator.methodcaller("as_ordered"),\n operator.methodcaller("as_unordered"),\n lambda x: getattr(x, "codes"),\n operator.methodcaller("remove_categories", "a"),\n operator.methodcaller("remove_unused_categories"),\n operator.methodcaller("rename_categories", {"a": "A", "b": "B"}),\n operator.methodcaller("reorder_categories", ["b", "a"]),\n operator.methodcaller("set_categories", ["A", "B"]),\n ],\n)\n@not_implemented_mark\ndef test_categorical_accessor(method):\n s = pd.Series(["a", "b"], dtype="category")\n s.attrs = {"a": 1}\n result = method(s.cat)\n assert result.attrs == {"a": 1}\n\n\n# ----------------------------------------------------------------------------\n# Groupby\n\n\n@pytest.mark.parametrize(\n "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})]\n)\n@pytest.mark.parametrize(\n "method",\n [\n operator.methodcaller("sum"),\n lambda x: x.apply(lambda y: y),\n lambda x: x.agg("sum"),\n lambda x: x.agg("mean"),\n lambda x: x.agg("median"),\n ],\n)\ndef test_groupby_finalize(obj, method):\n obj.attrs = {"a": 1}\n result = method(obj.groupby([0, 0], group_keys=False))\n assert result.attrs == {"a": 1}\n\n\n@pytest.mark.parametrize(\n "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})]\n)\n@pytest.mark.parametrize(\n "method",\n [\n lambda x: x.agg(["sum", "count"]),\n lambda x: x.agg("std"),\n lambda x: x.agg("var"),\n lambda x: x.agg("sem"),\n lambda x: x.agg("size"),\n lambda x: x.agg("ohlc"),\n ],\n)\n@not_implemented_mark\ndef test_groupby_finalize_not_implemented(obj, method):\n obj.attrs = {"a": 1}\n result = method(obj.groupby([0, 0]))\n assert result.attrs == {"a": 1}\n\n\ndef test_finalize_frame_series_name():\n # https://github.com/pandas-dev/pandas/pull/37186/files#r506978889\n # ensure we don't copy the column `name` to the Series.\n df = pd.DataFrame({"name": [1, 2]})\n result = pd.Series([1, 2]).__finalize__(df)\n assert result.name is None\n | .venv\Lib\site-packages\pandas\tests\generic\test_finalize.py | test_finalize.py | Python | 28,852 | 0.95 | 0.041721 | 0.037921 | react-lib | 706 | 2024-01-22T03:53:53.650757 | BSD-3-Clause | true | 01f624cc953c1b953b363e12d4c60aa3 |
from copy import deepcopy\nfrom operator import methodcaller\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n MultiIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDataFrame:\n @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])\n def test_set_axis_name(self, func):\n df = DataFrame([[1, 2], [3, 4]])\n\n result = methodcaller(func, "foo")(df)\n assert df.index.name is None\n assert result.index.name == "foo"\n\n result = methodcaller(func, "cols", axis=1)(df)\n assert df.columns.name is None\n assert result.columns.name == "cols"\n\n @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])\n def test_set_axis_name_mi(self, func):\n df = DataFrame(\n np.empty((3, 3)),\n index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]),\n columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]),\n )\n\n level_names = ["L1", "L2"]\n\n result = methodcaller(func, level_names)(df)\n assert result.index.names == level_names\n assert result.columns.names == [None, None]\n\n result = methodcaller(func, level_names, axis=1)(df)\n assert result.columns.names == ["L1", "L2"]\n assert result.index.names == [None, None]\n\n def test_nonzero_single_element(self):\n # allow single item via bool method\n msg_warn = (\n "DataFrame.bool is now deprecated and will be removed "\n "in future version of pandas"\n )\n df = DataFrame([[True]])\n df1 = DataFrame([[False]])\n with tm.assert_produces_warning(FutureWarning, match=msg_warn):\n assert df.bool()\n\n with tm.assert_produces_warning(FutureWarning, match=msg_warn):\n assert not df1.bool()\n\n df = DataFrame([[False, False]])\n msg_err = "The truth value of a DataFrame is ambiguous"\n with pytest.raises(ValueError, match=msg_err):\n bool(df)\n\n with tm.assert_produces_warning(FutureWarning, match=msg_warn):\n with pytest.raises(ValueError, match=msg_err):\n df.bool()\n\n def test_metadata_propagation_indiv_groupby(self):\n # groupby\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.random.default_rng(2).standard_normal(8),\n }\n )\n result = df.groupby("A").sum()\n tm.assert_metadata_equivalent(df, result)\n\n def test_metadata_propagation_indiv_resample(self):\n # resample\n df = DataFrame(\n np.random.default_rng(2).standard_normal((1000, 2)),\n index=date_range("20130101", periods=1000, freq="s"),\n )\n result = df.resample("1min")\n tm.assert_metadata_equivalent(df, result)\n\n def test_metadata_propagation_indiv(self, monkeypatch):\n # merging with override\n # GH 6923\n\n def finalize(self, other, method=None, **kwargs):\n for name in self._metadata:\n if method == "merge":\n left, right = other.left, other.right\n value = getattr(left, name, "") + "|" + getattr(right, name, "")\n object.__setattr__(self, name, value)\n elif method == "concat":\n value = "+".join(\n [getattr(o, name) for o in other.objs if getattr(o, name, None)]\n )\n object.__setattr__(self, name, value)\n else:\n object.__setattr__(self, name, getattr(other, name, ""))\n\n return self\n\n with monkeypatch.context() as m:\n m.setattr(DataFrame, "_metadata", ["filename"])\n m.setattr(DataFrame, "__finalize__", finalize)\n\n df1 = DataFrame(\n np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["a", "b"]\n )\n df2 = DataFrame(\n np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["c", "d"]\n )\n DataFrame._metadata = ["filename"]\n df1.filename = "fname1.csv"\n df2.filename = "fname2.csv"\n\n result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")\n assert result.filename == "fname1.csv|fname2.csv"\n\n # concat\n # GH#6927\n df1 = DataFrame(\n np.random.default_rng(2).integers(0, 4, (3, 2)), columns=list("ab")\n )\n df1.filename = "foo"\n\n result = pd.concat([df1, df1])\n assert result.filename == "foo+foo"\n\n def test_set_attribute(self):\n # Test for consistent setattr behavior when an attribute and a column\n # have the same name (Issue #8994)\n df = DataFrame({"x": [1, 2, 3]})\n\n df.y = 2\n df["y"] = [2, 4, 6]\n df.y = 5\n\n assert df.y == 5\n tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y"))\n\n def test_deepcopy_empty(self):\n # This test covers empty frame copying with non-empty column sets\n # as reported in issue GH15370\n empty_frame = DataFrame(data=[], index=[], columns=["A"])\n empty_frame_copy = deepcopy(empty_frame)\n\n tm.assert_frame_equal(empty_frame_copy, empty_frame)\n\n\n# formerly in Generic but only test DataFrame\nclass TestDataFrame2:\n @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])\n def test_validate_bool_args(self, value):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n\n msg = 'For argument "inplace" expected type bool, received type'\n with pytest.raises(ValueError, match=msg):\n df.copy().rename_axis(mapper={"a": "x", "b": "y"}, axis=1, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().drop("a", axis=1, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().fillna(value=0, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().replace(to_replace=1, value=7, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().interpolate(inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy()._where(cond=df.a > 2, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().mask(cond=df.a > 2, inplace=value)\n\n def test_unexpected_keyword(self):\n # GH8597\n df = DataFrame(\n np.random.default_rng(2).standard_normal((5, 2)), columns=["jim", "joe"]\n )\n ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])\n ts = df["joe"].copy()\n ts[2] = np.nan\n\n msg = "unexpected keyword"\n with pytest.raises(TypeError, match=msg):\n df.drop("joe", axis=1, in_place=True)\n\n with pytest.raises(TypeError, match=msg):\n df.reindex([1, 0], inplace=True)\n\n with pytest.raises(TypeError, match=msg):\n ca.fillna(0, inplace=True)\n\n with pytest.raises(TypeError, match=msg):\n ts.fillna(0, in_place=True)\n | .venv\Lib\site-packages\pandas\tests\generic\test_frame.py | test_frame.py | Python | 7,332 | 0.95 | 0.095694 | 0.078313 | node-utils | 935 | 2025-06-21T06:26:59.029402 | MIT | true | 1354f449aea389fd37a4b3d981c40ce7 |
from copy import (\n copy,\n deepcopy,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_scalar\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n# ----------------------------------------------------------------------\n# Generic types test cases\n\n\ndef construct(box, shape, value=None, dtype=None, **kwargs):\n """\n construct an object for the given shape\n if value is specified use that if its a scalar\n if value is an array, repeat it as needed\n """\n if isinstance(shape, int):\n shape = tuple([shape] * box._AXIS_LEN)\n if value is not None:\n if is_scalar(value):\n if value == "empty":\n arr = None\n dtype = np.float64\n\n # remove the info axis\n kwargs.pop(box._info_axis_name, None)\n else:\n arr = np.empty(shape, dtype=dtype)\n arr.fill(value)\n else:\n fshape = np.prod(shape)\n arr = value.ravel()\n new_shape = fshape / arr.shape[0]\n if fshape % arr.shape[0] != 0:\n raise Exception("invalid value passed in construct")\n\n arr = np.repeat(arr, new_shape).reshape(shape)\n else:\n arr = np.random.default_rng(2).standard_normal(shape)\n return box(arr, dtype=dtype, **kwargs)\n\n\nclass TestGeneric:\n @pytest.mark.parametrize(\n "func",\n [\n str.lower,\n {x: x.lower() for x in list("ABCD")},\n Series({x: x.lower() for x in list("ABCD")}),\n ],\n )\n def test_rename(self, frame_or_series, func):\n # single axis\n idx = list("ABCD")\n\n for axis in frame_or_series._AXIS_ORDERS:\n kwargs = {axis: idx}\n obj = construct(frame_or_series, 4, **kwargs)\n\n # rename a single axis\n result = obj.rename(**{axis: func})\n expected = obj.copy()\n setattr(expected, axis, list("abcd"))\n tm.assert_equal(result, expected)\n\n def test_get_numeric_data(self, frame_or_series):\n n = 4\n kwargs = {\n frame_or_series._get_axis_name(i): list(range(n))\n for i in range(frame_or_series._AXIS_LEN)\n }\n\n # get the numeric data\n o = construct(frame_or_series, n, **kwargs)\n result = o._get_numeric_data()\n tm.assert_equal(result, o)\n\n # non-inclusion\n result = o._get_bool_data()\n expected = construct(frame_or_series, n, value="empty", **kwargs)\n if isinstance(o, DataFrame):\n # preserve columns dtype\n expected.columns = o.columns[:0]\n # https://github.com/pandas-dev/pandas/issues/50862\n tm.assert_equal(result.reset_index(drop=True), expected)\n\n # get the bool data\n arr = np.array([True, True, False, True])\n o = construct(frame_or_series, n, value=arr, **kwargs)\n result = o._get_numeric_data()\n tm.assert_equal(result, o)\n\n def test_nonzero(self, frame_or_series):\n # GH 4633\n # look at the boolean/nonzero behavior for objects\n obj = construct(frame_or_series, shape=4)\n msg = f"The truth value of a {frame_or_series.__name__} is ambiguous"\n with pytest.raises(ValueError, match=msg):\n bool(obj == 0)\n with pytest.raises(ValueError, match=msg):\n bool(obj == 1)\n with pytest.raises(ValueError, match=msg):\n bool(obj)\n\n obj = construct(frame_or_series, shape=4, value=1)\n with pytest.raises(ValueError, match=msg):\n bool(obj == 0)\n with pytest.raises(ValueError, match=msg):\n bool(obj == 1)\n with pytest.raises(ValueError, match=msg):\n bool(obj)\n\n obj = construct(frame_or_series, shape=4, value=np.nan)\n with pytest.raises(ValueError, match=msg):\n bool(obj == 0)\n with pytest.raises(ValueError, match=msg):\n bool(obj == 1)\n with pytest.raises(ValueError, match=msg):\n bool(obj)\n\n # empty\n obj = construct(frame_or_series, shape=0)\n with pytest.raises(ValueError, match=msg):\n bool(obj)\n\n # invalid behaviors\n\n obj1 = construct(frame_or_series, shape=4, value=1)\n obj2 = construct(frame_or_series, shape=4, value=1)\n\n with pytest.raises(ValueError, match=msg):\n if obj1:\n pass\n\n with pytest.raises(ValueError, match=msg):\n obj1 and obj2\n with pytest.raises(ValueError, match=msg):\n obj1 or obj2\n with pytest.raises(ValueError, match=msg):\n not obj1\n\n def test_frame_or_series_compound_dtypes(self, frame_or_series):\n # see gh-5191\n # Compound dtypes should raise NotImplementedError.\n\n def f(dtype):\n return construct(frame_or_series, shape=3, value=1, dtype=dtype)\n\n msg = (\n "compound dtypes are not implemented "\n f"in the {frame_or_series.__name__} constructor"\n )\n\n with pytest.raises(NotImplementedError, match=msg):\n f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])\n\n # these work (though results may be unexpected)\n f("int64")\n f("float64")\n f("M8[ns]")\n\n def test_metadata_propagation(self, frame_or_series):\n # check that the metadata matches up on the resulting ops\n\n o = construct(frame_or_series, shape=3)\n o.name = "foo"\n o2 = construct(frame_or_series, shape=3)\n o2.name = "bar"\n\n # ----------\n # preserving\n # ----------\n\n # simple ops with scalars\n for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:\n result = getattr(o, op)(1)\n tm.assert_metadata_equivalent(o, result)\n\n # ops with like\n for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:\n result = getattr(o, op)(o)\n tm.assert_metadata_equivalent(o, result)\n\n # simple boolean\n for op in ["__eq__", "__le__", "__ge__"]:\n v1 = getattr(o, op)(o)\n tm.assert_metadata_equivalent(o, v1)\n tm.assert_metadata_equivalent(o, v1 & v1)\n tm.assert_metadata_equivalent(o, v1 | v1)\n\n # combine_first\n result = o.combine_first(o2)\n tm.assert_metadata_equivalent(o, result)\n\n # ---------------------------\n # non-preserving (by default)\n # ---------------------------\n\n # add non-like\n result = o + o2\n tm.assert_metadata_equivalent(result)\n\n # simple boolean\n for op in ["__eq__", "__le__", "__ge__"]:\n # this is a name matching op\n v1 = getattr(o, op)(o)\n v2 = getattr(o, op)(o2)\n tm.assert_metadata_equivalent(v2)\n tm.assert_metadata_equivalent(v1 & v2)\n tm.assert_metadata_equivalent(v1 | v2)\n\n def test_size_compat(self, frame_or_series):\n # GH8846\n # size property should be defined\n\n o = construct(frame_or_series, shape=10)\n assert o.size == np.prod(o.shape)\n assert o.size == 10 ** len(o.axes)\n\n def test_split_compat(self, frame_or_series):\n # xref GH8846\n o = construct(frame_or_series, shape=10)\n with tm.assert_produces_warning(\n FutureWarning, match=".swapaxes' is deprecated", check_stacklevel=False\n ):\n assert len(np.array_split(o, 5)) == 5\n assert len(np.array_split(o, 2)) == 2\n\n # See gh-12301\n def test_stat_unexpected_keyword(self, frame_or_series):\n obj = construct(frame_or_series, 5)\n starwars = "Star Wars"\n errmsg = "unexpected keyword"\n\n with pytest.raises(TypeError, match=errmsg):\n obj.max(epic=starwars) # stat_function\n with pytest.raises(TypeError, match=errmsg):\n obj.var(epic=starwars) # stat_function_ddof\n with pytest.raises(TypeError, match=errmsg):\n obj.sum(epic=starwars) # cum_function\n with pytest.raises(TypeError, match=errmsg):\n obj.any(epic=starwars) # logical_function\n\n @pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"])\n def test_api_compat(self, func, frame_or_series):\n # GH 12021\n # compat for __name__, __qualname__\n\n obj = construct(frame_or_series, 5)\n f = getattr(obj, func)\n assert f.__name__ == func\n assert f.__qualname__.endswith(func)\n\n def test_stat_non_defaults_args(self, frame_or_series):\n obj = construct(frame_or_series, 5)\n out = np.array([0])\n errmsg = "the 'out' parameter is not supported"\n\n with pytest.raises(ValueError, match=errmsg):\n obj.max(out=out) # stat_function\n with pytest.raises(ValueError, match=errmsg):\n obj.var(out=out) # stat_function_ddof\n with pytest.raises(ValueError, match=errmsg):\n obj.sum(out=out) # cum_function\n with pytest.raises(ValueError, match=errmsg):\n obj.any(out=out) # logical_function\n\n def test_truncate_out_of_bounds(self, frame_or_series):\n # GH11382\n\n # small\n shape = [2000] + ([1] * (frame_or_series._AXIS_LEN - 1))\n small = construct(frame_or_series, shape, dtype="int8", value=1)\n tm.assert_equal(small.truncate(), small)\n tm.assert_equal(small.truncate(before=0, after=3e3), small)\n tm.assert_equal(small.truncate(before=-1, after=2e3), small)\n\n # big\n shape = [2_000_000] + ([1] * (frame_or_series._AXIS_LEN - 1))\n big = construct(frame_or_series, shape, dtype="int8", value=1)\n tm.assert_equal(big.truncate(), big)\n tm.assert_equal(big.truncate(before=0, after=3e6), big)\n tm.assert_equal(big.truncate(before=-1, after=2e6), big)\n\n @pytest.mark.parametrize(\n "func",\n [copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],\n )\n @pytest.mark.parametrize("shape", [0, 1, 2])\n def test_copy_and_deepcopy(self, frame_or_series, shape, func):\n # GH 15444\n obj = construct(frame_or_series, shape)\n obj_copy = func(obj)\n assert obj_copy is not obj\n tm.assert_equal(obj_copy, obj)\n\n def test_data_deprecated(self, frame_or_series):\n obj = frame_or_series()\n msg = "(Series|DataFrame)._data is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n mgr = obj._data\n assert mgr is obj._mgr\n\n\nclass TestNDFrame:\n # tests that don't fit elsewhere\n\n @pytest.mark.parametrize(\n "ser",\n [\n Series(range(10), dtype=np.float64),\n Series([str(i) for i in range(10)], dtype=object),\n ],\n )\n def test_squeeze_series_noop(self, ser):\n # noop\n tm.assert_series_equal(ser.squeeze(), ser)\n\n def test_squeeze_frame_noop(self):\n # noop\n df = DataFrame(np.eye(2))\n tm.assert_frame_equal(df.squeeze(), df)\n\n def test_squeeze_frame_reindex(self):\n # squeezing\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=date_range("2000-01-01", periods=10, freq="B"),\n ).reindex(columns=["A"])\n tm.assert_series_equal(df.squeeze(), df["A"])\n\n def test_squeeze_0_len_dim(self):\n # don't fail with 0 length dimensions GH11229 & GH8999\n empty_series = Series([], name="five", dtype=np.float64)\n empty_frame = DataFrame([empty_series])\n tm.assert_series_equal(empty_series, empty_series.squeeze())\n tm.assert_series_equal(empty_series, empty_frame.squeeze())\n\n def test_squeeze_axis(self):\n # axis argument\n df = DataFrame(\n np.random.default_rng(2).standard_normal((1, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=date_range("2000-01-01", periods=1, freq="B"),\n ).iloc[:, :1]\n assert df.shape == (1, 1)\n tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])\n tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])\n tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])\n tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0])\n assert df.squeeze() == df.iloc[0, 0]\n msg = "No axis named 2 for object type DataFrame"\n with pytest.raises(ValueError, match=msg):\n df.squeeze(axis=2)\n msg = "No axis named x for object type DataFrame"\n with pytest.raises(ValueError, match=msg):\n df.squeeze(axis="x")\n\n def test_squeeze_axis_len_3(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((3, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=date_range("2000-01-01", periods=3, freq="B"),\n )\n tm.assert_frame_equal(df.squeeze(axis=0), df)\n\n def test_numpy_squeeze(self):\n s = Series(range(2), dtype=np.float64)\n tm.assert_series_equal(np.squeeze(s), s)\n\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=date_range("2000-01-01", periods=10, freq="B"),\n ).reindex(columns=["A"])\n tm.assert_series_equal(np.squeeze(df), df["A"])\n\n @pytest.mark.parametrize(\n "ser",\n [\n Series(range(10), dtype=np.float64),\n Series([str(i) for i in range(10)], dtype=object),\n ],\n )\n def test_transpose_series(self, ser):\n # calls implementation in pandas/core/base.py\n tm.assert_series_equal(ser.transpose(), ser)\n\n def test_transpose_frame(self):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=date_range("2000-01-01", periods=10, freq="B"),\n )\n tm.assert_frame_equal(df.transpose().transpose(), df)\n\n def test_numpy_transpose(self, frame_or_series):\n obj = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=date_range("2000-01-01", periods=10, freq="B"),\n )\n obj = tm.get_obj(obj, frame_or_series)\n\n if frame_or_series is Series:\n # 1D -> np.transpose is no-op\n tm.assert_series_equal(np.transpose(obj), obj)\n\n # round-trip preserved\n tm.assert_equal(np.transpose(np.transpose(obj)), obj)\n\n msg = "the 'axes' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n np.transpose(obj, axes=1)\n\n @pytest.mark.parametrize(\n "ser",\n [\n Series(range(10), dtype=np.float64),\n Series([str(i) for i in range(10)], dtype=object),\n ],\n )\n def test_take_series(self, ser):\n indices = [1, 5, -2, 6, 3, -1]\n out = ser.take(indices)\n expected = Series(\n data=ser.values.take(indices),\n index=ser.index.take(indices),\n dtype=ser.dtype,\n )\n tm.assert_series_equal(out, expected)\n\n def test_take_frame(self):\n indices = [1, 5, -2, 6, 3, -1]\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=date_range("2000-01-01", periods=10, freq="B"),\n )\n out = df.take(indices)\n expected = DataFrame(\n data=df.values.take(indices, axis=0),\n index=df.index.take(indices),\n columns=df.columns,\n )\n tm.assert_frame_equal(out, expected)\n\n def test_take_invalid_kwargs(self, frame_or_series):\n indices = [-3, 2, 0, 1]\n\n obj = DataFrame(range(5))\n obj = tm.get_obj(obj, frame_or_series)\n\n msg = r"take\(\) got an unexpected keyword argument 'foo'"\n with pytest.raises(TypeError, match=msg):\n obj.take(indices, foo=2)\n\n msg = "the 'out' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n obj.take(indices, out=indices)\n\n msg = "the 'mode' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n obj.take(indices, mode="clip")\n\n def test_axis_classmethods(self, frame_or_series):\n box = frame_or_series\n obj = box(dtype=object)\n values = box._AXIS_TO_AXIS_NUMBER.keys()\n for v in values:\n assert obj._get_axis_number(v) == box._get_axis_number(v)\n assert obj._get_axis_name(v) == box._get_axis_name(v)\n assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)\n\n def test_flags_identity(self, frame_or_series):\n obj = Series([1, 2])\n if frame_or_series is DataFrame:\n obj = obj.to_frame()\n\n assert obj.flags is obj.flags\n obj2 = obj.copy()\n assert obj2.flags is not obj.flags\n\n def test_bool_dep(self) -> None:\n # GH-51749\n msg_warn = (\n "DataFrame.bool is now deprecated and will be removed "\n "in future version of pandas"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg_warn):\n DataFrame({"col": [False]}).bool()\n | .venv\Lib\site-packages\pandas\tests\generic\test_generic.py | test_generic.py | Python | 17,447 | 0.95 | 0.123016 | 0.120283 | awesome-app | 871 | 2024-11-10T15:54:30.232883 | Apache-2.0 | true | 8c7eb4966add76d74a81ebbf20a3dc52 |
import pytest\n\nfrom pandas.core.dtypes.missing import array_equivalent\n\nimport pandas as pd\n\n\n# Fixtures\n# ========\n@pytest.fixture\ndef df():\n """DataFrame with columns 'L1', 'L2', and 'L3'"""\n return pd.DataFrame({"L1": [1, 2, 3], "L2": [11, 12, 13], "L3": ["A", "B", "C"]})\n\n\n@pytest.fixture(params=[[], ["L1"], ["L1", "L2"], ["L1", "L2", "L3"]])\ndef df_levels(request, df):\n """DataFrame with columns or index levels 'L1', 'L2', and 'L3'"""\n levels = request.param\n\n if levels:\n df = df.set_index(levels)\n\n return df\n\n\n@pytest.fixture\ndef df_ambig(df):\n """DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3'"""\n df = df.set_index(["L1", "L2"])\n\n df["L1"] = df["L3"]\n\n return df\n\n\n@pytest.fixture\ndef df_duplabels(df):\n """DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2'"""\n df = df.set_index(["L1"])\n df = pd.concat([df, df["L2"]], axis=1)\n\n return df\n\n\n# Test is label/level reference\n# =============================\ndef get_labels_levels(df_levels):\n expected_labels = list(df_levels.columns)\n expected_levels = [name for name in df_levels.index.names if name is not None]\n return expected_labels, expected_levels\n\n\ndef assert_label_reference(frame, labels, axis):\n for label in labels:\n assert frame._is_label_reference(label, axis=axis)\n assert not frame._is_level_reference(label, axis=axis)\n assert frame._is_label_or_level_reference(label, axis=axis)\n\n\ndef assert_level_reference(frame, levels, axis):\n for level in levels:\n assert frame._is_level_reference(level, axis=axis)\n assert not frame._is_label_reference(level, axis=axis)\n assert frame._is_label_or_level_reference(level, axis=axis)\n\n\n# DataFrame\n# ---------\ndef test_is_level_or_label_reference_df_simple(df_levels, axis):\n axis = df_levels._get_axis_number(axis)\n # Compute expected labels and levels\n expected_labels, expected_levels = get_labels_levels(df_levels)\n\n # Transpose frame if axis == 1\n if axis == 1:\n df_levels = df_levels.T\n\n # Perform checks\n assert_level_reference(df_levels, expected_levels, axis=axis)\n assert_label_reference(df_levels, expected_labels, axis=axis)\n\n\ndef test_is_level_reference_df_ambig(df_ambig, axis):\n axis = df_ambig._get_axis_number(axis)\n\n # Transpose frame if axis == 1\n if axis == 1:\n df_ambig = df_ambig.T\n\n # df has both an on-axis level and off-axis label named L1\n # Therefore L1 should reference the label, not the level\n assert_label_reference(df_ambig, ["L1"], axis=axis)\n\n # df has an on-axis level named L2 and it is not ambiguous\n # Therefore L2 is an level reference\n assert_level_reference(df_ambig, ["L2"], axis=axis)\n\n # df has a column named L3 and it not an level reference\n assert_label_reference(df_ambig, ["L3"], axis=axis)\n\n\n# Series\n# ------\ndef test_is_level_reference_series_simple_axis0(df):\n # Make series with L1 as index\n s = df.set_index("L1").L2\n assert_level_reference(s, ["L1"], axis=0)\n assert not s._is_level_reference("L2")\n\n # Make series with L1 and L2 as index\n s = df.set_index(["L1", "L2"]).L3\n assert_level_reference(s, ["L1", "L2"], axis=0)\n assert not s._is_level_reference("L3")\n\n\ndef test_is_level_reference_series_axis1_error(df):\n # Make series with L1 as index\n s = df.set_index("L1").L2\n\n with pytest.raises(ValueError, match="No axis named 1"):\n s._is_level_reference("L1", axis=1)\n\n\n# Test _check_label_or_level_ambiguity_df\n# =======================================\n\n\n# DataFrame\n# ---------\ndef test_check_label_or_level_ambiguity_df(df_ambig, axis):\n axis = df_ambig._get_axis_number(axis)\n # Transpose frame if axis == 1\n if axis == 1:\n df_ambig = df_ambig.T\n msg = "'L1' is both a column level and an index label"\n\n else:\n msg = "'L1' is both an index level and a column label"\n # df_ambig has both an on-axis level and off-axis label named L1\n # Therefore, L1 is ambiguous.\n with pytest.raises(ValueError, match=msg):\n df_ambig._check_label_or_level_ambiguity("L1", axis=axis)\n\n # df_ambig has an on-axis level named L2,, and it is not ambiguous.\n df_ambig._check_label_or_level_ambiguity("L2", axis=axis)\n\n # df_ambig has an off-axis label named L3, and it is not ambiguous\n assert not df_ambig._check_label_or_level_ambiguity("L3", axis=axis)\n\n\n# Series\n# ------\ndef test_check_label_or_level_ambiguity_series(df):\n # A series has no columns and therefore references are never ambiguous\n\n # Make series with L1 as index\n s = df.set_index("L1").L2\n s._check_label_or_level_ambiguity("L1", axis=0)\n s._check_label_or_level_ambiguity("L2", axis=0)\n\n # Make series with L1 and L2 as index\n s = df.set_index(["L1", "L2"]).L3\n s._check_label_or_level_ambiguity("L1", axis=0)\n s._check_label_or_level_ambiguity("L2", axis=0)\n s._check_label_or_level_ambiguity("L3", axis=0)\n\n\ndef test_check_label_or_level_ambiguity_series_axis1_error(df):\n # Make series with L1 as index\n s = df.set_index("L1").L2\n\n with pytest.raises(ValueError, match="No axis named 1"):\n s._check_label_or_level_ambiguity("L1", axis=1)\n\n\n# Test _get_label_or_level_values\n# ===============================\ndef assert_label_values(frame, labels, axis):\n axis = frame._get_axis_number(axis)\n for label in labels:\n if axis == 0:\n expected = frame[label]._values\n else:\n expected = frame.loc[label]._values\n\n result = frame._get_label_or_level_values(label, axis=axis)\n assert array_equivalent(expected, result)\n\n\ndef assert_level_values(frame, levels, axis):\n axis = frame._get_axis_number(axis)\n for level in levels:\n if axis == 0:\n expected = frame.index.get_level_values(level=level)._values\n else:\n expected = frame.columns.get_level_values(level=level)._values\n\n result = frame._get_label_or_level_values(level, axis=axis)\n assert array_equivalent(expected, result)\n\n\n# DataFrame\n# ---------\ndef test_get_label_or_level_values_df_simple(df_levels, axis):\n # Compute expected labels and levels\n expected_labels, expected_levels = get_labels_levels(df_levels)\n\n axis = df_levels._get_axis_number(axis)\n # Transpose frame if axis == 1\n if axis == 1:\n df_levels = df_levels.T\n\n # Perform checks\n assert_label_values(df_levels, expected_labels, axis=axis)\n assert_level_values(df_levels, expected_levels, axis=axis)\n\n\ndef test_get_label_or_level_values_df_ambig(df_ambig, axis):\n axis = df_ambig._get_axis_number(axis)\n # Transpose frame if axis == 1\n if axis == 1:\n df_ambig = df_ambig.T\n\n # df has an on-axis level named L2, and it is not ambiguous.\n assert_level_values(df_ambig, ["L2"], axis=axis)\n\n # df has an off-axis label named L3, and it is not ambiguous.\n assert_label_values(df_ambig, ["L3"], axis=axis)\n\n\ndef test_get_label_or_level_values_df_duplabels(df_duplabels, axis):\n axis = df_duplabels._get_axis_number(axis)\n # Transpose frame if axis == 1\n if axis == 1:\n df_duplabels = df_duplabels.T\n\n # df has unambiguous level 'L1'\n assert_level_values(df_duplabels, ["L1"], axis=axis)\n\n # df has unique label 'L3'\n assert_label_values(df_duplabels, ["L3"], axis=axis)\n\n # df has duplicate labels 'L2'\n if axis == 0:\n expected_msg = "The column label 'L2' is not unique"\n else:\n expected_msg = "The index label 'L2' is not unique"\n\n with pytest.raises(ValueError, match=expected_msg):\n assert_label_values(df_duplabels, ["L2"], axis=axis)\n\n\n# Series\n# ------\ndef test_get_label_or_level_values_series_axis0(df):\n # Make series with L1 as index\n s = df.set_index("L1").L2\n assert_level_values(s, ["L1"], axis=0)\n\n # Make series with L1 and L2 as index\n s = df.set_index(["L1", "L2"]).L3\n assert_level_values(s, ["L1", "L2"], axis=0)\n\n\ndef test_get_label_or_level_values_series_axis1_error(df):\n # Make series with L1 as index\n s = df.set_index("L1").L2\n\n with pytest.raises(ValueError, match="No axis named 1"):\n s._get_label_or_level_values("L1", axis=1)\n\n\n# Test _drop_labels_or_levels\n# ===========================\ndef assert_labels_dropped(frame, labels, axis):\n axis = frame._get_axis_number(axis)\n for label in labels:\n df_dropped = frame._drop_labels_or_levels(label, axis=axis)\n\n if axis == 0:\n assert label in frame.columns\n assert label not in df_dropped.columns\n else:\n assert label in frame.index\n assert label not in df_dropped.index\n\n\ndef assert_levels_dropped(frame, levels, axis):\n axis = frame._get_axis_number(axis)\n for level in levels:\n df_dropped = frame._drop_labels_or_levels(level, axis=axis)\n\n if axis == 0:\n assert level in frame.index.names\n assert level not in df_dropped.index.names\n else:\n assert level in frame.columns.names\n assert level not in df_dropped.columns.names\n\n\n# DataFrame\n# ---------\ndef test_drop_labels_or_levels_df(df_levels, axis):\n # Compute expected labels and levels\n expected_labels, expected_levels = get_labels_levels(df_levels)\n\n axis = df_levels._get_axis_number(axis)\n # Transpose frame if axis == 1\n if axis == 1:\n df_levels = df_levels.T\n\n # Perform checks\n assert_labels_dropped(df_levels, expected_labels, axis=axis)\n assert_levels_dropped(df_levels, expected_levels, axis=axis)\n\n with pytest.raises(ValueError, match="not valid labels or levels"):\n df_levels._drop_labels_or_levels("L4", axis=axis)\n\n\n# Series\n# ------\ndef test_drop_labels_or_levels_series(df):\n # Make series with L1 as index\n s = df.set_index("L1").L2\n assert_levels_dropped(s, ["L1"], axis=0)\n\n with pytest.raises(ValueError, match="not valid labels or levels"):\n s._drop_labels_or_levels("L4", axis=0)\n\n # Make series with L1 and L2 as index\n s = df.set_index(["L1", "L2"]).L3\n assert_levels_dropped(s, ["L1", "L2"], axis=0)\n\n with pytest.raises(ValueError, match="not valid labels or levels"):\n s._drop_labels_or_levels("L4", axis=0)\n | .venv\Lib\site-packages\pandas\tests\generic\test_label_or_level_utils.py | test_label_or_level_utils.py | Python | 10,244 | 0.95 | 0.157738 | 0.26749 | node-utils | 837 | 2024-02-20T11:54:47.245070 | MIT | true | f3afa8962ffb80df549d33bd86104fd7 |
from operator import methodcaller\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n MultiIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestSeries:\n @pytest.mark.parametrize("func", ["rename_axis", "_set_axis_name"])\n def test_set_axis_name_mi(self, func):\n ser = Series(\n [11, 21, 31],\n index=MultiIndex.from_tuples(\n [("A", x) for x in ["a", "B", "c"]], names=["l1", "l2"]\n ),\n )\n\n result = methodcaller(func, ["L1", "L2"])(ser)\n assert ser.index.name is None\n assert ser.index.names == ["l1", "l2"]\n assert result.index.name is None\n assert result.index.names, ["L1", "L2"]\n\n def test_set_axis_name_raises(self):\n ser = Series([1])\n msg = "No axis named 1 for object type Series"\n with pytest.raises(ValueError, match=msg):\n ser._set_axis_name(name="a", axis=1)\n\n def test_get_bool_data_preserve_dtype(self):\n ser = Series([True, False, True])\n result = ser._get_bool_data()\n tm.assert_series_equal(result, ser)\n\n def test_nonzero_single_element(self):\n # allow single item via bool method\n msg_warn = (\n "Series.bool is now deprecated and will be removed "\n "in future version of pandas"\n )\n ser = Series([True])\n ser1 = Series([False])\n with tm.assert_produces_warning(FutureWarning, match=msg_warn):\n assert ser.bool()\n with tm.assert_produces_warning(FutureWarning, match=msg_warn):\n assert not ser1.bool()\n\n @pytest.mark.parametrize("data", [np.nan, pd.NaT, True, False])\n def test_nonzero_single_element_raise_1(self, data):\n # single item nan to raise\n series = Series([data])\n\n msg = "The truth value of a Series is ambiguous"\n with pytest.raises(ValueError, match=msg):\n bool(series)\n\n @pytest.mark.parametrize("data", [np.nan, pd.NaT])\n def test_nonzero_single_element_raise_2(self, data):\n msg_warn = (\n "Series.bool is now deprecated and will be removed "\n "in future version of pandas"\n )\n msg_err = "bool cannot act on a non-boolean single element Series"\n series = Series([data])\n with tm.assert_produces_warning(FutureWarning, match=msg_warn):\n with pytest.raises(ValueError, match=msg_err):\n series.bool()\n\n @pytest.mark.parametrize("data", [(True, True), (False, False)])\n def test_nonzero_multiple_element_raise(self, data):\n # multiple bool are still an error\n msg_warn = (\n "Series.bool is now deprecated and will be removed "\n "in future version of pandas"\n )\n msg_err = "The truth value of a Series is ambiguous"\n series = Series([data])\n with pytest.raises(ValueError, match=msg_err):\n bool(series)\n with tm.assert_produces_warning(FutureWarning, match=msg_warn):\n with pytest.raises(ValueError, match=msg_err):\n series.bool()\n\n @pytest.mark.parametrize("data", [1, 0, "a", 0.0])\n def test_nonbool_single_element_raise(self, data):\n # single non-bool are an error\n msg_warn = (\n "Series.bool is now deprecated and will be removed "\n "in future version of pandas"\n )\n msg_err1 = "The truth value of a Series is ambiguous"\n msg_err2 = "bool cannot act on a non-boolean single element Series"\n series = Series([data])\n with pytest.raises(ValueError, match=msg_err1):\n bool(series)\n with tm.assert_produces_warning(FutureWarning, match=msg_warn):\n with pytest.raises(ValueError, match=msg_err2):\n series.bool()\n\n def test_metadata_propagation_indiv_resample(self):\n # resample\n ts = Series(\n np.random.default_rng(2).random(1000),\n index=date_range("20130101", periods=1000, freq="s"),\n name="foo",\n )\n result = ts.resample("1min").mean()\n tm.assert_metadata_equivalent(ts, result)\n\n result = ts.resample("1min").min()\n tm.assert_metadata_equivalent(ts, result)\n\n result = ts.resample("1min").apply(lambda x: x.sum())\n tm.assert_metadata_equivalent(ts, result)\n\n def test_metadata_propagation_indiv(self, monkeypatch):\n # check that the metadata matches up on the resulting ops\n\n ser = Series(range(3), range(3))\n ser.name = "foo"\n ser2 = Series(range(3), range(3))\n ser2.name = "bar"\n\n result = ser.T\n tm.assert_metadata_equivalent(ser, result)\n\n def finalize(self, other, method=None, **kwargs):\n for name in self._metadata:\n if method == "concat" and name == "filename":\n value = "+".join(\n [\n getattr(obj, name)\n for obj in other.objs\n if getattr(obj, name, None)\n ]\n )\n object.__setattr__(self, name, value)\n else:\n object.__setattr__(self, name, getattr(other, name, None))\n\n return self\n\n with monkeypatch.context() as m:\n m.setattr(Series, "_metadata", ["name", "filename"])\n m.setattr(Series, "__finalize__", finalize)\n\n ser.filename = "foo"\n ser2.filename = "bar"\n\n result = pd.concat([ser, ser2])\n assert result.filename == "foo+bar"\n assert result.name is None\n | .venv\Lib\site-packages\pandas\tests\generic\test_series.py | test_series.py | Python | 5,677 | 0.95 | 0.113208 | 0.044444 | python-kit | 224 | 2024-01-22T21:28:36.127713 | BSD-3-Clause | true | 6f7aa31fbac4203d4392658d8b13af31 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n DataFrame,\n MultiIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\npytest.importorskip("xarray")\n\n\nclass TestDataFrameToXArray:\n @pytest.fixture\n def df(self):\n return DataFrame(\n {\n "a": list("abcd"),\n "b": list(range(1, 5)),\n "c": np.arange(3, 7).astype("u1"),\n "d": np.arange(4.0, 8.0, dtype="float64"),\n "e": [True, False, True, False],\n "f": Categorical(list("abcd")),\n "g": date_range("20130101", periods=4),\n "h": date_range("20130101", periods=4, tz="US/Eastern"),\n }\n )\n\n def test_to_xarray_index_types(self, index_flat, df, using_infer_string):\n index = index_flat\n # MultiIndex is tested in test_to_xarray_with_multiindex\n if len(index) == 0:\n pytest.skip("Test doesn't make sense for empty index")\n\n from xarray import Dataset\n\n df.index = index[:4]\n df.index.name = "foo"\n df.columns.name = "bar"\n result = df.to_xarray()\n assert result.sizes["foo"] == 4\n assert len(result.coords) == 1\n assert len(result.data_vars) == 8\n tm.assert_almost_equal(list(result.coords.keys()), ["foo"])\n assert isinstance(result, Dataset)\n\n # idempotency\n # datetimes w/tz are preserved\n # column names are lost\n expected = df.copy()\n expected["f"] = expected["f"].astype(\n object if not using_infer_string else "str"\n )\n expected.columns.name = None\n tm.assert_frame_equal(result.to_dataframe(), expected)\n\n def test_to_xarray_empty(self, df):\n from xarray import Dataset\n\n df.index.name = "foo"\n result = df[0:0].to_xarray()\n assert result.sizes["foo"] == 0\n assert isinstance(result, Dataset)\n\n def test_to_xarray_with_multiindex(self, df, using_infer_string):\n from xarray import Dataset\n\n # MultiIndex\n df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"])\n result = df.to_xarray()\n assert result.sizes["one"] == 1\n assert result.sizes["two"] == 4\n assert len(result.coords) == 2\n assert len(result.data_vars) == 8\n tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])\n assert isinstance(result, Dataset)\n\n result = result.to_dataframe()\n expected = df.copy()\n expected["f"] = expected["f"].astype(\n object if not using_infer_string else "str"\n )\n expected.columns.name = None\n tm.assert_frame_equal(result, expected)\n\n\nclass TestSeriesToXArray:\n def test_to_xarray_index_types(self, index_flat):\n index = index_flat\n # MultiIndex is tested in test_to_xarray_with_multiindex\n\n from xarray import DataArray\n\n ser = Series(range(len(index)), index=index, dtype="int64")\n ser.index.name = "foo"\n result = ser.to_xarray()\n repr(result)\n assert len(result) == len(index)\n assert len(result.coords) == 1\n tm.assert_almost_equal(list(result.coords.keys()), ["foo"])\n assert isinstance(result, DataArray)\n\n # idempotency\n tm.assert_series_equal(result.to_series(), ser)\n\n def test_to_xarray_empty(self):\n from xarray import DataArray\n\n ser = Series([], dtype=object)\n ser.index.name = "foo"\n result = ser.to_xarray()\n assert len(result) == 0\n assert len(result.coords) == 1\n tm.assert_almost_equal(list(result.coords.keys()), ["foo"])\n assert isinstance(result, DataArray)\n\n def test_to_xarray_with_multiindex(self):\n from xarray import DataArray\n\n mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"])\n ser = Series(range(6), dtype="int64", index=mi)\n result = ser.to_xarray()\n assert len(result) == 2\n tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])\n assert isinstance(result, DataArray)\n res = result.to_series()\n tm.assert_series_equal(res, ser)\n | .venv\Lib\site-packages\pandas\tests\generic\test_to_xarray.py | test_to_xarray.py | Python | 4,229 | 0.95 | 0.1 | 0.064815 | node-utils | 144 | 2025-06-25T13:42:38.098661 | Apache-2.0 | true | 35ca03586edc57199c8fbf1763deb5a7 |
\n\n | .venv\Lib\site-packages\pandas\tests\generic\__pycache__\test_duplicate_labels.cpython-313.pyc | test_duplicate_labels.cpython-313.pyc | Other | 23,478 | 0.8 | 0 | 0.018868 | python-kit | 19 | 2024-05-07T05:36:33.158260 | GPL-3.0 | true | eb965b39338f0e9bab5fffda71e59cb3 |
\n\n | .venv\Lib\site-packages\pandas\tests\generic\__pycache__\test_finalize.cpython-313.pyc | test_finalize.cpython-313.pyc | Other | 44,283 | 0.95 | 0 | 0.014218 | awesome-app | 866 | 2024-03-10T17:29:56.839654 | Apache-2.0 | true | 798af66762ab766bb96799fc5230f72d |
\n\n | .venv\Lib\site-packages\pandas\tests\generic\__pycache__\test_frame.cpython-313.pyc | test_frame.cpython-313.pyc | Other | 13,318 | 0.8 | 0 | 0.006494 | vue-tools | 965 | 2023-10-19T17:52:21.415049 | Apache-2.0 | true | e6f56be8d3c083e1700847561c8e8a0c |
\n\n | .venv\Lib\site-packages\pandas\tests\generic\__pycache__\test_generic.cpython-313.pyc | test_generic.cpython-313.pyc | Other | 29,100 | 0.8 | 0.016304 | 0.00551 | python-kit | 504 | 2024-06-01T11:37:05.168409 | MIT | true | 8bb052d9ed166146f13355b136db1eab |
\n\n | .venv\Lib\site-packages\pandas\tests\generic\__pycache__\test_label_or_level_utils.cpython-313.pyc | test_label_or_level_utils.cpython-313.pyc | Other | 12,579 | 0.8 | 0 | 0 | python-kit | 410 | 2024-10-07T12:24:16.023526 | BSD-3-Clause | true | 63e87fc409ac38baa1ff32e71a2aa991 |
\n\n | .venv\Lib\site-packages\pandas\tests\generic\__pycache__\test_series.cpython-313.pyc | test_series.cpython-313.pyc | Other | 9,761 | 0.8 | 0.007576 | 0.031008 | react-lib | 533 | 2025-01-15T11:27:10.499403 | GPL-3.0 | true | 8bf5a5fb144f0a35bbe8e6a54323f285 |
\n\n | .venv\Lib\site-packages\pandas\tests\generic\__pycache__\test_to_xarray.cpython-313.pyc | test_to_xarray.cpython-313.pyc | Other | 7,651 | 0.95 | 0.015625 | 0 | vue-tools | 546 | 2023-08-02T00:09:16.217026 | GPL-3.0 | true | be627d9c002c70bcfbb0eba74874f01d |
\n\n | .venv\Lib\site-packages\pandas\tests\generic\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 195 | 0.7 | 0 | 0 | node-utils | 715 | 2023-10-09T14:05:45.834207 | BSD-3-Clause | true | db007a78c569ebd38e3fda59c16c874b |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n date_range,\n)\nfrom pandas.core.groupby.base import (\n reduction_kernels,\n transformation_kernels,\n)\n\n\n@pytest.fixture(params=[True, False])\ndef sort(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef as_index(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef dropna(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef observed(request):\n return request.param\n\n\n@pytest.fixture\ndef df():\n return DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.random.default_rng(2).standard_normal(8),\n }\n )\n\n\n@pytest.fixture\ndef ts():\n return Series(\n np.random.default_rng(2).standard_normal(30),\n index=date_range("2000-01-01", periods=30, freq="B"),\n )\n\n\n@pytest.fixture\ndef tsframe():\n return DataFrame(\n np.random.default_rng(2).standard_normal((30, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=date_range("2000-01-01", periods=30, freq="B"),\n )\n\n\n@pytest.fixture\ndef three_group():\n return DataFrame(\n {\n "A": [\n "foo",\n "foo",\n "foo",\n "foo",\n "bar",\n "bar",\n "bar",\n "bar",\n "foo",\n "foo",\n "foo",\n ],\n "B": [\n "one",\n "one",\n "one",\n "two",\n "one",\n "one",\n "one",\n "two",\n "two",\n "two",\n "one",\n ],\n "C": [\n "dull",\n "dull",\n "shiny",\n "dull",\n "dull",\n "shiny",\n "shiny",\n "dull",\n "shiny",\n "shiny",\n "shiny",\n ],\n "D": np.random.default_rng(2).standard_normal(11),\n "E": np.random.default_rng(2).standard_normal(11),\n "F": np.random.default_rng(2).standard_normal(11),\n }\n )\n\n\n@pytest.fixture()\ndef slice_test_df():\n data = [\n [0, "a", "a0_at_0"],\n [1, "b", "b0_at_1"],\n [2, "a", "a1_at_2"],\n [3, "b", "b1_at_3"],\n [4, "c", "c0_at_4"],\n [5, "a", "a2_at_5"],\n [6, "a", "a3_at_6"],\n [7, "a", "a4_at_7"],\n ]\n df = DataFrame(data, columns=["Index", "Group", "Value"])\n return df.set_index("Index")\n\n\n@pytest.fixture()\ndef slice_test_grouped(slice_test_df):\n return slice_test_df.groupby("Group", as_index=False)\n\n\n@pytest.fixture(params=sorted(reduction_kernels))\ndef reduction_func(request):\n """\n yields the string names of all groupby reduction functions, one at a time.\n """\n return request.param\n\n\n@pytest.fixture(params=sorted(transformation_kernels))\ndef transformation_func(request):\n """yields the string names of all groupby transformation functions."""\n return request.param\n\n\n@pytest.fixture(params=sorted(reduction_kernels) + sorted(transformation_kernels))\ndef groupby_func(request):\n """yields both aggregation and transformation functions."""\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef parallel(request):\n """parallel keyword argument for numba.jit"""\n return request.param\n\n\n# Can parameterize nogil & nopython over True | False, but limiting per\n# https://github.com/pandas-dev/pandas/pull/41971#issuecomment-860607472\n\n\n@pytest.fixture(params=[False])\ndef nogil(request):\n """nogil keyword argument for numba.jit"""\n return request.param\n\n\n@pytest.fixture(params=[True])\ndef nopython(request):\n """nopython keyword argument for numba.jit"""\n return request.param\n\n\n@pytest.fixture(\n params=[\n ("mean", {}),\n ("var", {"ddof": 1}),\n ("var", {"ddof": 0}),\n ("std", {"ddof": 1}),\n ("std", {"ddof": 0}),\n ("sum", {}),\n ("min", {}),\n ("max", {}),\n ("sum", {"min_count": 2}),\n ("min", {"min_count": 2}),\n ("max", {"min_count": 2}),\n ],\n ids=[\n "mean",\n "var_1",\n "var_0",\n "std_1",\n "std_0",\n "sum",\n "min",\n "max",\n "sum-min_count",\n "min-min_count",\n "max-min_count",\n ],\n)\ndef numba_supported_reductions(request):\n """reductions supported with engine='numba'"""\n return request.param\n | .venv\Lib\site-packages\pandas\tests\groupby\conftest.py | conftest.py | Python | 4,785 | 0.95 | 0.096154 | 0.011696 | react-lib | 329 | 2025-06-03T14:30:34.983738 | BSD-3-Clause | true | ed7731fde99338925d4c8163ef99f72d |
"""\nTests that apply to all groupby operation methods.\n\nThe only tests that should appear here are those that use the `groupby_func` fixture.\nEven if it does use that fixture, prefer a more specific test file if it available\nsuch as:\n\n - test_categorical\n - test_groupby_dropna\n - test_groupby_subclass\n - test_raises\n"""\n\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame\nimport pandas._testing as tm\nfrom pandas.tests.groupby import get_groupby_method_args\n\n\ndef test_multiindex_group_all_columns_when_empty(groupby_func):\n # GH 32464\n df = DataFrame({"a": [], "b": [], "c": []}).set_index(["a", "b", "c"])\n gb = df.groupby(["a", "b", "c"], group_keys=False)\n method = getattr(gb, groupby_func)\n args = get_groupby_method_args(groupby_func, df)\n\n warn = FutureWarning if groupby_func == "fillna" else None\n warn_msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=warn_msg):\n result = method(*args).index\n expected = df.index\n tm.assert_index_equal(result, expected)\n\n\ndef test_duplicate_columns(request, groupby_func, as_index):\n # GH#50806\n if groupby_func == "corrwith":\n msg = "GH#50845 - corrwith fails when there are duplicate columns"\n request.applymarker(pytest.mark.xfail(reason=msg))\n df = DataFrame([[1, 3, 6], [1, 4, 7], [2, 5, 8]], columns=list("abb"))\n args = get_groupby_method_args(groupby_func, df)\n gb = df.groupby("a", as_index=as_index)\n warn = FutureWarning if groupby_func == "fillna" else None\n warn_msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=warn_msg):\n result = getattr(gb, groupby_func)(*args)\n\n expected_df = df.set_axis(["a", "b", "c"], axis=1)\n expected_args = get_groupby_method_args(groupby_func, expected_df)\n expected_gb = expected_df.groupby("a", as_index=as_index)\n warn = FutureWarning if groupby_func == "fillna" else None\n warn_msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=warn_msg):\n expected = getattr(expected_gb, groupby_func)(*expected_args)\n if groupby_func not in ("size", "ngroup", "cumcount"):\n expected = expected.rename(columns={"c": "b"})\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "idx",\n [\n pd.Index(["a", "a"], name="foo"),\n pd.MultiIndex.from_tuples((("a", "a"), ("a", "a")), names=["foo", "bar"]),\n ],\n)\ndef test_dup_labels_output_shape(groupby_func, idx):\n if groupby_func in {"size", "ngroup", "cumcount"}:\n pytest.skip(f"Not applicable for {groupby_func}")\n\n df = DataFrame([[1, 1]], columns=idx)\n grp_by = df.groupby([0])\n\n args = get_groupby_method_args(groupby_func, df)\n warn = FutureWarning if groupby_func == "fillna" else None\n warn_msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=warn_msg):\n result = getattr(grp_by, groupby_func)(*args)\n\n assert result.shape == (1, 2)\n tm.assert_index_equal(result.columns, idx)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_all_methods.py | test_all_methods.py | Python | 3,077 | 0.95 | 0.156627 | 0.029412 | react-lib | 18 | 2024-07-11T15:18:11.169416 | BSD-3-Clause | true | fcd37a1bd235dd010289246e5a4e3f4a |
"""\nTests of the groupby API, including internal consistency and with other pandas objects.\n\nTests in this file should only check the existence, names, and arguments of groupby\nmethods. It should not test the results of any groupby operation.\n"""\n\nimport inspect\n\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Series,\n)\nfrom pandas.core.groupby.base import (\n groupby_other_methods,\n reduction_kernels,\n transformation_kernels,\n)\nfrom pandas.core.groupby.generic import (\n DataFrameGroupBy,\n SeriesGroupBy,\n)\n\n\ndef test_tab_completion(multiindex_dataframe_random_data):\n grp = multiindex_dataframe_random_data.groupby(level="second")\n results = {v for v in dir(grp) if not v.startswith("_")}\n expected = {\n "A",\n "B",\n "C",\n "agg",\n "aggregate",\n "apply",\n "boxplot",\n "filter",\n "first",\n "get_group",\n "groups",\n "hist",\n "indices",\n "last",\n "max",\n "mean",\n "median",\n "min",\n "ngroups",\n "nth",\n "ohlc",\n "plot",\n "prod",\n "size",\n "std",\n "sum",\n "transform",\n "var",\n "sem",\n "count",\n "nunique",\n "head",\n "describe",\n "cummax",\n "quantile",\n "rank",\n "cumprod",\n "tail",\n "resample",\n "cummin",\n "fillna",\n "cumsum",\n "cumcount",\n "ngroup",\n "all",\n "shift",\n "skew",\n "take",\n "pct_change",\n "any",\n "corr",\n "corrwith",\n "cov",\n "dtypes",\n "ndim",\n "diff",\n "idxmax",\n "idxmin",\n "ffill",\n "bfill",\n "rolling",\n "expanding",\n "pipe",\n "sample",\n "ewm",\n "value_counts",\n }\n assert results == expected\n\n\ndef test_all_methods_categorized(multiindex_dataframe_random_data):\n grp = multiindex_dataframe_random_data.groupby(\n multiindex_dataframe_random_data.iloc[:, 0]\n )\n names = {_ for _ in dir(grp) if not _.startswith("_")} - set(\n multiindex_dataframe_random_data.columns\n )\n new_names = set(names)\n new_names -= reduction_kernels\n new_names -= transformation_kernels\n new_names -= groupby_other_methods\n\n assert not reduction_kernels & transformation_kernels\n assert not reduction_kernels & groupby_other_methods\n assert not transformation_kernels & groupby_other_methods\n\n # new public method?\n if new_names:\n msg = f"""\nThere are uncategorized methods defined on the Grouper class:\n{new_names}.\n\nWas a new method recently added?\n\nEvery public method On Grouper must appear in exactly one the\nfollowing three lists defined in pandas.core.groupby.base:\n- `reduction_kernels`\n- `transformation_kernels`\n- `groupby_other_methods`\nsee the comments in pandas/core/groupby/base.py for guidance on\nhow to fix this test.\n """\n raise AssertionError(msg)\n\n # removed a public method?\n all_categorized = reduction_kernels | transformation_kernels | groupby_other_methods\n if names != all_categorized:\n msg = f"""\nSome methods which are supposed to be on the Grouper class\nare missing:\n{all_categorized - names}.\n\nThey're still defined in one of the lists that live in pandas/core/groupby/base.py.\nIf you removed a method, you should update them\n"""\n raise AssertionError(msg)\n\n\ndef test_frame_consistency(groupby_func):\n # GH#48028\n if groupby_func in ("first", "last"):\n msg = "first and last are entirely different between frame and groupby"\n pytest.skip(reason=msg)\n\n if groupby_func in ("cumcount", "ngroup"):\n assert not hasattr(DataFrame, groupby_func)\n return\n\n frame_method = getattr(DataFrame, groupby_func)\n gb_method = getattr(DataFrameGroupBy, groupby_func)\n result = set(inspect.signature(gb_method).parameters)\n if groupby_func == "size":\n # "size" is a method on GroupBy but property on DataFrame:\n expected = {"self"}\n else:\n expected = set(inspect.signature(frame_method).parameters)\n\n # Exclude certain arguments from result and expected depending on the operation\n # Some of these may be purposeful inconsistencies between the APIs\n exclude_expected, exclude_result = set(), set()\n if groupby_func in ("any", "all"):\n exclude_expected = {"kwargs", "bool_only", "axis"}\n elif groupby_func in ("count",):\n exclude_expected = {"numeric_only", "axis"}\n elif groupby_func in ("nunique",):\n exclude_expected = {"axis"}\n elif groupby_func in ("max", "min"):\n exclude_expected = {"axis", "kwargs", "skipna"}\n exclude_result = {"min_count", "engine", "engine_kwargs"}\n elif groupby_func in ("mean", "std", "sum", "var"):\n exclude_expected = {"axis", "kwargs", "skipna"}\n exclude_result = {"engine", "engine_kwargs"}\n elif groupby_func in ("median", "prod", "sem"):\n exclude_expected = {"axis", "kwargs", "skipna"}\n elif groupby_func in ("backfill", "bfill", "ffill", "pad"):\n exclude_expected = {"downcast", "inplace", "axis", "limit_area"}\n elif groupby_func in ("cummax", "cummin"):\n exclude_expected = {"skipna", "args"}\n exclude_result = {"numeric_only"}\n elif groupby_func in ("cumprod", "cumsum"):\n exclude_expected = {"skipna"}\n elif groupby_func in ("pct_change",):\n exclude_expected = {"kwargs"}\n exclude_result = {"axis"}\n elif groupby_func in ("rank",):\n exclude_expected = {"numeric_only"}\n elif groupby_func in ("quantile",):\n exclude_expected = {"method", "axis"}\n\n # Ensure excluded arguments are actually in the signatures\n assert result & exclude_result == exclude_result\n assert expected & exclude_expected == exclude_expected\n\n result -= exclude_result\n expected -= exclude_expected\n assert result == expected\n\n\ndef test_series_consistency(request, groupby_func):\n # GH#48028\n if groupby_func in ("first", "last"):\n pytest.skip("first and last are entirely different between Series and groupby")\n\n if groupby_func in ("cumcount", "corrwith", "ngroup"):\n assert not hasattr(Series, groupby_func)\n return\n\n series_method = getattr(Series, groupby_func)\n gb_method = getattr(SeriesGroupBy, groupby_func)\n result = set(inspect.signature(gb_method).parameters)\n if groupby_func == "size":\n # "size" is a method on GroupBy but property on Series\n expected = {"self"}\n else:\n expected = set(inspect.signature(series_method).parameters)\n\n # Exclude certain arguments from result and expected depending on the operation\n # Some of these may be purposeful inconsistencies between the APIs\n exclude_expected, exclude_result = set(), set()\n if groupby_func in ("any", "all"):\n exclude_expected = {"kwargs", "bool_only", "axis"}\n elif groupby_func in ("diff",):\n exclude_result = {"axis"}\n elif groupby_func in ("max", "min"):\n exclude_expected = {"axis", "kwargs", "skipna"}\n exclude_result = {"min_count", "engine", "engine_kwargs"}\n elif groupby_func in ("mean", "std", "sum", "var"):\n exclude_expected = {"axis", "kwargs", "skipna"}\n exclude_result = {"engine", "engine_kwargs"}\n elif groupby_func in ("median", "prod", "sem"):\n exclude_expected = {"axis", "kwargs", "skipna"}\n elif groupby_func in ("backfill", "bfill", "ffill", "pad"):\n exclude_expected = {"downcast", "inplace", "axis", "limit_area"}\n elif groupby_func in ("cummax", "cummin"):\n exclude_expected = {"skipna", "args"}\n exclude_result = {"numeric_only"}\n elif groupby_func in ("cumprod", "cumsum"):\n exclude_expected = {"skipna"}\n elif groupby_func in ("pct_change",):\n exclude_expected = {"kwargs"}\n exclude_result = {"axis"}\n elif groupby_func in ("rank",):\n exclude_expected = {"numeric_only"}\n elif groupby_func in ("idxmin", "idxmax"):\n exclude_expected = {"args", "kwargs"}\n elif groupby_func in ("quantile",):\n exclude_result = {"numeric_only"}\n\n # Ensure excluded arguments are actually in the signatures\n assert result & exclude_result == exclude_result\n assert expected & exclude_expected == exclude_expected\n\n result -= exclude_result\n expected -= exclude_expected\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\groupby\test_api.py | test_api.py | Python | 8,481 | 0.95 | 0.079245 | 0.050633 | vue-tools | 501 | 2023-10-12T11:09:09.214130 | BSD-3-Clause | true | e3ddcfd11e0e17b958a5739595b8d1c3 |
from datetime import (\n date,\n datetime,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n bdate_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.groupby import get_groupby_method_args\n\n\ndef test_apply_func_that_appends_group_to_list_without_copy():\n # GH: 17718\n\n df = DataFrame(1, index=list(range(10)) * 10, columns=[0]).reset_index()\n groups = []\n\n def store(group):\n groups.append(group)\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby("index").apply(store)\n expected_value = DataFrame(\n {"index": [0] * 10, 0: [1] * 10}, index=pd.RangeIndex(0, 100, 10)\n )\n\n tm.assert_frame_equal(groups[0], expected_value)\n\n\ndef test_apply_index_date(using_infer_string):\n # GH 5788\n ts = [\n "2011-05-16 00:00",\n "2011-05-16 01:00",\n "2011-05-16 02:00",\n "2011-05-16 03:00",\n "2011-05-17 02:00",\n "2011-05-17 03:00",\n "2011-05-17 04:00",\n "2011-05-17 05:00",\n "2011-05-18 02:00",\n "2011-05-18 03:00",\n "2011-05-18 04:00",\n "2011-05-18 05:00",\n ]\n df = DataFrame(\n {\n "value": [\n 1.40893,\n 1.40760,\n 1.40750,\n 1.40649,\n 1.40893,\n 1.40760,\n 1.40750,\n 1.40649,\n 1.40893,\n 1.40760,\n 1.40750,\n 1.40649,\n ],\n },\n index=Index(pd.to_datetime(ts), name="date_time"),\n )\n expected = df.groupby(df.index.date).idxmax()\n result = df.groupby(df.index.date).apply(lambda x: x.idxmax())\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_index_date_object():\n # GH 5789\n # don't auto coerce dates\n ts = [\n "2011-05-16 00:00",\n "2011-05-16 01:00",\n "2011-05-16 02:00",\n "2011-05-16 03:00",\n "2011-05-17 02:00",\n "2011-05-17 03:00",\n "2011-05-17 04:00",\n "2011-05-17 05:00",\n "2011-05-18 02:00",\n "2011-05-18 03:00",\n "2011-05-18 04:00",\n "2011-05-18 05:00",\n ]\n df = DataFrame([row.split() for row in ts], columns=["date", "time"])\n df["value"] = [\n 1.40893,\n 1.40760,\n 1.40750,\n 1.40649,\n 1.40893,\n 1.40760,\n 1.40750,\n 1.40649,\n 1.40893,\n 1.40760,\n 1.40750,\n 1.40649,\n ]\n exp_idx = Index(["2011-05-16", "2011-05-17", "2011-05-18"], name="date")\n expected = Series(["00:00", "02:00", "02:00"], index=exp_idx)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("date", group_keys=False).apply(\n lambda x: x["time"][x["value"].idxmax()]\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_trivial(using_infer_string):\n # GH 20066\n # trivial apply: ignore input and return a constant dataframe.\n df = DataFrame(\n {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},\n columns=["key", "data"],\n )\n dtype = "str" if using_infer_string else "object"\n expected = pd.concat([df.iloc[1:], df.iloc[1:]], axis=1, keys=["float64", dtype])\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby([str(x) for x in df.dtypes], axis=1)\n result = gb.apply(lambda x: df.iloc[1:])\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_trivial_fail(using_infer_string):\n # GH 20066\n df = DataFrame(\n {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},\n columns=["key", "data"],\n )\n dtype = "str" if using_infer_string else "object"\n expected = pd.concat([df, df], axis=1, keys=["float64", dtype])\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby([str(x) for x in df.dtypes], axis=1, group_keys=True)\n result = gb.apply(lambda x: df)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "df, group_names",\n [\n (DataFrame({"a": [1, 1, 1, 2, 3], "b": ["a", "a", "a", "b", "c"]}), [1, 2, 3]),\n (DataFrame({"a": [0, 0, 1, 1], "b": [0, 1, 0, 1]}), [0, 1]),\n (DataFrame({"a": [1]}), [1]),\n (DataFrame({"a": [1, 1, 1, 2, 2, 1, 1, 2], "b": range(8)}), [1, 2]),\n (DataFrame({"a": [1, 2, 3, 1, 2, 3], "two": [4, 5, 6, 7, 8, 9]}), [1, 2, 3]),\n (\n DataFrame(\n {\n "a": list("aaabbbcccc"),\n "B": [3, 4, 3, 6, 5, 2, 1, 9, 5, 4],\n "C": [4, 0, 2, 2, 2, 7, 8, 6, 2, 8],\n }\n ),\n ["a", "b", "c"],\n ),\n (DataFrame([[1, 2, 3], [2, 2, 3]], columns=["a", "b", "c"]), [1, 2]),\n ],\n ids=[\n "GH2936",\n "GH7739 & GH10519",\n "GH10519",\n "GH2656",\n "GH12155",\n "GH20084",\n "GH21417",\n ],\n)\ndef test_group_apply_once_per_group(df, group_names):\n # GH2936, GH7739, GH10519, GH2656, GH12155, GH20084, GH21417\n\n # This test should ensure that a function is only evaluated\n # once per group. Previously the function has been evaluated twice\n # on the first group to check if the Cython index slider is safe to use\n # This test ensures that the side effect (append to list) is only triggered\n # once per group\n\n names = []\n # cannot parameterize over the functions since they need external\n # `names` to detect side effects\n\n def f_copy(group):\n # this takes the fast apply path\n names.append(group.name)\n return group.copy()\n\n def f_nocopy(group):\n # this takes the slow apply path\n names.append(group.name)\n return group\n\n def f_scalar(group):\n # GH7739, GH2656\n names.append(group.name)\n return 0\n\n def f_none(group):\n # GH10519, GH12155, GH21417\n names.append(group.name)\n\n def f_constant_df(group):\n # GH2936, GH20084\n names.append(group.name)\n return DataFrame({"a": [1], "b": [1]})\n\n for func in [f_copy, f_nocopy, f_scalar, f_none, f_constant_df]:\n del names[:]\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby("a", group_keys=False).apply(func)\n assert names == group_names\n\n\ndef test_group_apply_once_per_group2(capsys):\n # GH: 31111\n # groupby-apply need to execute len(set(group_by_columns)) times\n\n expected = 2 # Number of times `apply` should call a function for the current test\n\n df = DataFrame(\n {\n "group_by_column": [0, 0, 0, 0, 1, 1, 1, 1],\n "test_column": ["0", "2", "4", "6", "8", "10", "12", "14"],\n },\n index=["0", "2", "4", "6", "8", "10", "12", "14"],\n )\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby("group_by_column", group_keys=False).apply(\n lambda df: print("function_called")\n )\n\n result = capsys.readouterr().out.count("function_called")\n # If `groupby` behaves unexpectedly, this test will break\n assert result == expected\n\n\ndef test_apply_fast_slow_identical():\n # GH 31613\n\n df = DataFrame({"A": [0, 0, 1], "b": range(3)})\n\n # For simple index structures we check for fast/slow apply using\n # an identity check on in/output\n def slow(group):\n return group\n\n def fast(group):\n return group.copy()\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n fast_df = df.groupby("A", group_keys=False).apply(fast)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n slow_df = df.groupby("A", group_keys=False).apply(slow)\n\n tm.assert_frame_equal(fast_df, slow_df)\n\n\n@pytest.mark.parametrize(\n "func",\n [\n lambda x: x,\n lambda x: x[:],\n lambda x: x.copy(deep=False),\n lambda x: x.copy(deep=True),\n ],\n)\ndef test_groupby_apply_identity_maybecopy_index_identical(func):\n # GH 14927\n # Whether the function returns a copy of the input data or not should not\n # have an impact on the index structure of the result since this is not\n # transparent to the user\n\n df = DataFrame({"g": [1, 2, 2, 2], "a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("g", group_keys=False).apply(func)\n tm.assert_frame_equal(result, df)\n\n\ndef test_apply_with_mixed_dtype():\n # GH3480, apply with mixed dtype on axis=1 breaks in 0.11\n df = DataFrame(\n {\n "foo1": np.random.default_rng(2).standard_normal(6),\n "foo2": ["one", "two", "two", "three", "one", "two"],\n }\n )\n result = df.apply(lambda x: x, axis=1).dtypes\n expected = df.dtypes\n tm.assert_series_equal(result, expected)\n\n # GH 3610 incorrect dtype conversion with as_index=False\n df = DataFrame({"c1": [1, 2, 6, 6, 8]})\n df["c2"] = df.c1 / 2.0\n result1 = df.groupby("c2").mean().reset_index().c2\n result2 = df.groupby("c2", as_index=False).mean().c2\n tm.assert_series_equal(result1, result2)\n\n\ndef test_groupby_as_index_apply():\n # GH #4648 and #3417\n df = DataFrame(\n {\n "item_id": ["b", "b", "a", "c", "a", "b"],\n "user_id": [1, 2, 1, 1, 3, 1],\n "time": range(6),\n }\n )\n\n g_as = df.groupby("user_id", as_index=True)\n g_not_as = df.groupby("user_id", as_index=False)\n\n res_as = g_as.head(2).index\n res_not_as = g_not_as.head(2).index\n exp = Index([0, 1, 2, 4])\n tm.assert_index_equal(res_as, exp)\n tm.assert_index_equal(res_not_as, exp)\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res_as_apply = g_as.apply(lambda x: x.head(2)).index\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index\n\n # apply doesn't maintain the original ordering\n # changed in GH5610 as the as_index=False returns a MI here\n exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (2, 4)])\n tp = [(1, 0), (1, 2), (2, 1), (3, 4)]\n exp_as_apply = MultiIndex.from_tuples(tp, names=["user_id", None])\n\n tm.assert_index_equal(res_as_apply, exp_as_apply)\n tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)\n\n ind = Index(list("abcde"))\n df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = df.groupby(0, as_index=False, group_keys=False).apply(lambda x: x).index\n tm.assert_index_equal(res, ind)\n\n\ndef test_apply_concat_preserve_names(three_group):\n grouped = three_group.groupby(["A", "B"])\n\n def desc(group):\n result = group.describe()\n result.index.name = "stat"\n return result\n\n def desc2(group):\n result = group.describe()\n result.index.name = "stat"\n result = result[: len(group)]\n # weirdo\n return result\n\n def desc3(group):\n result = group.describe()\n\n # names are different\n result.index.name = f"stat_{len(group):d}"\n\n result = result[: len(group)]\n # weirdo\n return result\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grouped.apply(desc)\n assert result.index.names == ("A", "B", "stat")\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result2 = grouped.apply(desc2)\n assert result2.index.names == ("A", "B", "stat")\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result3 = grouped.apply(desc3)\n assert result3.index.names == ("A", "B", None)\n\n\ndef test_apply_series_to_frame():\n def f(piece):\n with np.errstate(invalid="ignore"):\n logged = np.log(piece)\n return DataFrame(\n {"value": piece, "demeaned": piece - piece.mean(), "logged": logged}\n )\n\n dr = bdate_range("1/1/2000", periods=100)\n ts = Series(np.random.default_rng(2).standard_normal(100), index=dr)\n\n grouped = ts.groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(f)\n\n assert isinstance(result, DataFrame)\n assert not hasattr(result, "name") # GH49907\n tm.assert_index_equal(result.index, ts.index)\n\n\ndef test_apply_series_yield_constant(df):\n result = df.groupby(["A", "B"])["C"].apply(len)\n assert result.index.names[:2] == ("A", "B")\n\n\ndef test_apply_frame_yield_constant(df):\n # GH13568\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby(["A", "B"]).apply(len)\n assert isinstance(result, Series)\n assert result.name is None\n\n result = df.groupby(["A", "B"])[["C", "D"]].apply(len)\n assert isinstance(result, Series)\n assert result.name is None\n\n\ndef test_apply_frame_to_series(df):\n grouped = df.groupby(["A", "B"])\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grouped.apply(len)\n expected = grouped.count()["C"]\n tm.assert_index_equal(result.index, expected.index)\n tm.assert_numpy_array_equal(result.values, expected.values)\n\n\ndef test_apply_frame_not_as_index_column_name(df):\n # GH 35964 - path within _wrap_applied_output not hit by a test\n grouped = df.groupby(["A", "B"], as_index=False)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grouped.apply(len)\n expected = grouped.count().rename(columns={"C": np.nan}).drop(columns="D")\n # TODO(GH#34306): Use assert_frame_equal when column name is not np.nan\n tm.assert_index_equal(result.index, expected.index)\n tm.assert_numpy_array_equal(result.values, expected.values)\n\n\ndef test_apply_frame_concat_series():\n def trans(group):\n return group.groupby("B")["C"].sum().sort_values().iloc[:2]\n\n def trans2(group):\n grouped = group.groupby(df.reindex(group.index)["B"])\n return grouped.sum().sort_values().iloc[:2]\n\n df = DataFrame(\n {\n "A": np.random.default_rng(2).integers(0, 5, 1000),\n "B": np.random.default_rng(2).integers(0, 5, 1000),\n "C": np.random.default_rng(2).standard_normal(1000),\n }\n )\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("A").apply(trans)\n exp = df.groupby("A")["C"].apply(trans2)\n tm.assert_series_equal(result, exp, check_names=False)\n assert result.name == "C"\n\n\ndef test_apply_transform(ts):\n grouped = ts.groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(lambda x: x * 2)\n expected = grouped.transform(lambda x: x * 2)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_multikey_corner(tsframe):\n grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])\n\n def f(group):\n return group.sort_values("A")[-5:]\n\n result = grouped.apply(f)\n for key, group in grouped:\n tm.assert_frame_equal(result.loc[key], f(group))\n\n\n@pytest.mark.parametrize("group_keys", [True, False])\ndef test_apply_chunk_view(group_keys):\n # Low level tinkering could be unsafe, make sure not\n df = DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("key", group_keys=group_keys).apply(lambda x: x.iloc[:2])\n expected = df.take([0, 1, 3, 4, 6, 7])\n if group_keys:\n expected.index = MultiIndex.from_arrays(\n [[1, 1, 2, 2, 3, 3], expected.index], names=["key", None]\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_no_name_column_conflict():\n df = DataFrame(\n {\n "name": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],\n "name2": [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],\n "value": range(9, -1, -1),\n }\n )\n\n # it works! #2605\n grouped = df.groupby(["name", "name2"])\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouped.apply(lambda x: x.sort_values("value", inplace=True))\n\n\ndef test_apply_typecast_fail():\n df = DataFrame(\n {\n "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],\n "c": np.tile(["a", "b", "c"], 2),\n "v": np.arange(1.0, 7.0),\n }\n )\n\n def f(group):\n v = group["v"]\n group["v2"] = (v - v.min()) / (v.max() - v.min())\n return group\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("d", group_keys=False).apply(f)\n\n expected = df.copy()\n expected["v2"] = np.tile([0.0, 0.5, 1], 2)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_multiindex_fail():\n index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]])\n df = DataFrame(\n {\n "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],\n "c": np.tile(["a", "b", "c"], 2),\n "v": np.arange(1.0, 7.0),\n },\n index=index,\n )\n\n def f(group):\n v = group["v"]\n group["v2"] = (v - v.min()) / (v.max() - v.min())\n return group\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("d", group_keys=False).apply(f)\n\n expected = df.copy()\n expected["v2"] = np.tile([0.0, 0.5, 1], 2)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_corner(tsframe):\n result = tsframe.groupby(lambda x: x.year, group_keys=False).apply(lambda x: x * 2)\n expected = tsframe * 2\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_without_copy():\n # GH 5545\n # returning a non-copy in an applied function fails\n\n data = DataFrame(\n {\n "id_field": [100, 100, 200, 300],\n "category": ["a", "b", "c", "c"],\n "value": [1, 2, 3, 4],\n }\n )\n\n def filt1(x):\n if x.shape[0] == 1:\n return x.copy()\n else:\n return x[x.category == "c"]\n\n def filt2(x):\n if x.shape[0] == 1:\n return x\n else:\n return x[x.category == "c"]\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = data.groupby("id_field").apply(filt1)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = data.groupby("id_field").apply(filt2)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("test_series", [True, False])\ndef test_apply_with_duplicated_non_sorted_axis(test_series):\n # GH 30667\n df = DataFrame(\n [["x", "p"], ["x", "p"], ["x", "o"]], columns=["X", "Y"], index=[1, 2, 2]\n )\n if test_series:\n ser = df.set_index("Y")["X"]\n result = ser.groupby(level=0, group_keys=False).apply(lambda x: x)\n\n # not expecting the order to remain the same for duplicated axis\n result = result.sort_index()\n expected = ser.sort_index()\n tm.assert_series_equal(result, expected)\n else:\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("Y", group_keys=False).apply(lambda x: x)\n\n # not expecting the order to remain the same for duplicated axis\n result = result.sort_values("Y")\n expected = df.sort_values("Y")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_reindex_values():\n # GH: 26209\n # reindexing from a single column of a groupby object with duplicate indices caused\n # a ValueError (cannot reindex from duplicate axis) in 0.24.2, the problem was\n # solved in #30679\n values = [1, 2, 3, 4]\n indices = [1, 1, 2, 2]\n df = DataFrame({"group": ["Group1", "Group2"] * 2, "value": values}, index=indices)\n expected = Series(values, index=indices, name="value")\n\n def reindex_helper(x):\n return x.reindex(np.arange(x.index.min(), x.index.max() + 1))\n\n # the following group by raised a ValueError\n result = df.groupby("group", group_keys=False).value.apply(reindex_helper)\n tm.assert_series_equal(expected, result)\n\n\ndef test_apply_corner_cases():\n # #535, can't use sliding iterator\n\n N = 1000\n labels = np.random.default_rng(2).integers(0, 100, size=N)\n df = DataFrame(\n {\n "key": labels,\n "value1": np.random.default_rng(2).standard_normal(N),\n "value2": ["foo", "bar", "baz", "qux"] * (N // 4),\n }\n )\n\n grouped = df.groupby("key", group_keys=False)\n\n def f(g):\n g["value3"] = g["value1"] * 2\n return g\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grouped.apply(f)\n assert "value3" in result\n\n\ndef test_apply_numeric_coercion_when_datetime():\n # In the past, group-by/apply operations have been over-eager\n # in converting dtypes to numeric, in the presence of datetime\n # columns. Various GH issues were filed, the reproductions\n # for which are here.\n\n # GH 15670\n df = DataFrame(\n {"Number": [1, 2], "Date": ["2017-03-02"] * 2, "Str": ["foo", "inf"]}\n )\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df.groupby(["Number"]).apply(lambda x: x.iloc[0])\n df.Date = pd.to_datetime(df.Date)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby(["Number"]).apply(lambda x: x.iloc[0])\n tm.assert_series_equal(result["Str"], expected["Str"])\n\n # GH 15421\n df = DataFrame(\n {"A": [10, 20, 30], "B": ["foo", "3", "4"], "T": [pd.Timestamp("12:31:22")] * 3}\n )\n\n def get_B(g):\n return g.iloc[0][["B"]]\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("A").apply(get_B)["B"]\n expected = df.B\n expected.index = df.A\n tm.assert_series_equal(result, expected)\n\n # GH 14423\n def predictions(tool):\n out = Series(index=["p1", "p2", "useTime"], dtype=object)\n if "step1" in list(tool.State):\n out["p1"] = str(tool[tool.State == "step1"].Machine.values[0])\n if "step2" in list(tool.State):\n out["p2"] = str(tool[tool.State == "step2"].Machine.values[0])\n out["useTime"] = str(tool[tool.State == "step2"].oTime.values[0])\n return out\n\n df1 = DataFrame(\n {\n "Key": ["B", "B", "A", "A"],\n "State": ["step1", "step2", "step1", "step2"],\n "oTime": ["", "2016-09-19 05:24:33", "", "2016-09-19 23:59:04"],\n "Machine": ["23", "36L", "36R", "36R"],\n }\n )\n df2 = df1.copy()\n df2.oTime = pd.to_datetime(df2.oTime)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df1.groupby("Key").apply(predictions).p1\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df2.groupby("Key").apply(predictions).p1\n tm.assert_series_equal(expected, result)\n\n\ndef test_apply_aggregating_timedelta_and_datetime():\n # Regression test for GH 15562\n # The following groupby caused ValueErrors and IndexErrors pre 0.20.0\n\n df = DataFrame(\n {\n "clientid": ["A", "B", "C"],\n "datetime": [np.datetime64("2017-02-01 00:00:00")] * 3,\n }\n )\n df["time_delta_zero"] = df.datetime - df.datetime\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("clientid").apply(\n lambda ddf: Series(\n {"clientid_age": ddf.time_delta_zero.min(), "date": ddf.datetime.min()}\n )\n )\n expected = DataFrame(\n {\n "clientid": ["A", "B", "C"],\n "clientid_age": [np.timedelta64(0, "D")] * 3,\n "date": [np.datetime64("2017-02-01 00:00:00")] * 3,\n }\n ).set_index("clientid")\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_groupby_datetimeindex():\n # GH 26182\n # groupby apply failed on dataframe with DatetimeIndex\n\n data = [["A", 10], ["B", 20], ["B", 30], ["C", 40], ["C", 50]]\n df = DataFrame(\n data, columns=["Name", "Value"], index=pd.date_range("2020-09-01", "2020-09-05")\n )\n\n result = df.groupby("Name").sum()\n\n expected = DataFrame({"Name": ["A", "B", "C"], "Value": [10, 50, 90]})\n expected.set_index("Name", inplace=True)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_time_field_bug():\n # Test a fix for the following error related to GH issue 11324 When\n # non-key fields in a group-by dataframe contained time-based fields\n # that were not returned by the apply function, an exception would be\n # raised.\n\n df = DataFrame({"a": 1, "b": [datetime.now() for nn in range(10)]})\n\n def func_with_no_date(batch):\n return Series({"c": 2})\n\n def func_with_date(batch):\n return Series({"b": datetime(2015, 1, 1), "c": 2})\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n dfg_no_conversion = df.groupby(by=["a"]).apply(func_with_no_date)\n dfg_no_conversion_expected = DataFrame({"c": 2}, index=[1])\n dfg_no_conversion_expected.index.name = "a"\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n dfg_conversion = df.groupby(by=["a"]).apply(func_with_date)\n dfg_conversion_expected = DataFrame(\n {"b": pd.Timestamp(2015, 1, 1).as_unit("ns"), "c": 2}, index=[1]\n )\n dfg_conversion_expected.index.name = "a"\n\n tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)\n tm.assert_frame_equal(dfg_conversion, dfg_conversion_expected)\n\n\ndef test_gb_apply_list_of_unequal_len_arrays():\n # GH1738\n df = DataFrame(\n {\n "group1": ["a", "a", "a", "b", "b", "b", "a", "a", "a", "b", "b", "b"],\n "group2": ["c", "c", "d", "d", "d", "e", "c", "c", "d", "d", "d", "e"],\n "weight": [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],\n "value": [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3],\n }\n )\n df = df.set_index(["group1", "group2"])\n df_grouped = df.groupby(level=["group1", "group2"], sort=True)\n\n def noddy(value, weight):\n out = np.array(value * weight).repeat(3)\n return out\n\n # the kernel function returns arrays of unequal length\n # pandas sniffs the first one, sees it's an array and not\n # a list, and assumed the rest are of equal length\n # and so tries a vstack\n\n # don't die\n df_grouped.apply(lambda x: noddy(x.value, x.weight))\n\n\ndef test_groupby_apply_all_none():\n # Tests to make sure no errors if apply function returns all None\n # values. Issue 9684.\n test_df = DataFrame({"groups": [0, 0, 1, 1], "random_vars": [8, 7, 4, 5]})\n\n def test_func(x):\n pass\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = test_df.groupby("groups").apply(test_func)\n expected = DataFrame()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_apply_none_first():\n # GH 12824. Tests if apply returns None first.\n test_df1 = DataFrame({"groups": [1, 1, 1, 2], "vars": [0, 1, 2, 3]})\n test_df2 = DataFrame({"groups": [1, 2, 2, 2], "vars": [0, 1, 2, 3]})\n\n def test_func(x):\n if x.shape[0] < 2:\n return None\n return x.iloc[[0, -1]]\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result1 = test_df1.groupby("groups").apply(test_func)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result2 = test_df2.groupby("groups").apply(test_func)\n index1 = MultiIndex.from_arrays([[1, 1], [0, 2]], names=["groups", None])\n index2 = MultiIndex.from_arrays([[2, 2], [1, 3]], names=["groups", None])\n expected1 = DataFrame({"groups": [1, 1], "vars": [0, 2]}, index=index1)\n expected2 = DataFrame({"groups": [2, 2], "vars": [1, 3]}, index=index2)\n tm.assert_frame_equal(result1, expected1)\n tm.assert_frame_equal(result2, expected2)\n\n\ndef test_groupby_apply_return_empty_chunk():\n # GH 22221: apply filter which returns some empty groups\n df = DataFrame({"value": [0, 1], "group": ["filled", "empty"]})\n groups = df.groupby("group")\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = groups.apply(lambda group: group[group.value != 1]["value"])\n expected = Series(\n [0],\n name="value",\n index=MultiIndex.from_product(\n [["empty", "filled"], [0]], names=["group", None]\n ).drop("empty"),\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_with_mixed_types():\n # gh-20949\n df = DataFrame({"A": "a a b".split(), "B": [1, 2, 3], "C": [4, 6, 5]})\n g = df.groupby("A", group_keys=False)\n\n result = g.transform(lambda x: x / x.sum())\n expected = DataFrame({"B": [1 / 3.0, 2 / 3.0, 1], "C": [0.4, 0.6, 1.0]})\n tm.assert_frame_equal(result, expected)\n\n result = g.apply(lambda x: x / x.sum())\n tm.assert_frame_equal(result, expected)\n\n\ndef test_func_returns_object():\n # GH 28652\n df = DataFrame({"a": [1, 2]}, index=Index([1, 2]))\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("a").apply(lambda g: g.index)\n expected = Series([Index([1]), Index([2])], index=Index([1, 2], name="a"))\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "group_column_dtlike",\n [datetime.today(), datetime.today().date(), datetime.today().time()],\n)\ndef test_apply_datetime_issue(group_column_dtlike):\n # GH-28247\n # groupby-apply throws an error if one of the columns in the DataFrame\n # is a datetime object and the column labels are different from\n # standard int values in range(len(num_columns))\n\n df = DataFrame({"a": ["foo"], "b": [group_column_dtlike]})\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("a").apply(lambda x: Series(["spam"], index=[42]))\n\n expected = DataFrame(["spam"], Index(["foo"], dtype="str", name="a"), columns=[42])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_series_return_dataframe_groups():\n # GH 10078\n tdf = DataFrame(\n {\n "day": {\n 0: pd.Timestamp("2015-02-24 00:00:00"),\n 1: pd.Timestamp("2015-02-24 00:00:00"),\n 2: pd.Timestamp("2015-02-24 00:00:00"),\n 3: pd.Timestamp("2015-02-24 00:00:00"),\n 4: pd.Timestamp("2015-02-24 00:00:00"),\n },\n "userAgent": {\n 0: "some UA string",\n 1: "some UA string",\n 2: "some UA string",\n 3: "another UA string",\n 4: "some UA string",\n },\n "userId": {\n 0: "17661101",\n 1: "17661101",\n 2: "17661101",\n 3: "17661101",\n 4: "17661101",\n },\n }\n )\n\n def most_common_values(df):\n return Series({c: s.value_counts().index[0] for c, s in df.items()})\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = tdf.groupby("day").apply(most_common_values)["userId"]\n expected = Series(\n ["17661101"], index=pd.DatetimeIndex(["2015-02-24"], name="day"), name="userId"\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("category", [False, True])\ndef test_apply_multi_level_name(category):\n # https://github.com/pandas-dev/pandas/issues/31068\n b = [1, 2] * 5\n if category:\n b = pd.Categorical(b, categories=[1, 2, 3])\n expected_index = pd.CategoricalIndex([1, 2, 3], categories=[1, 2, 3], name="B")\n expected_values = [20, 25, 0]\n else:\n expected_index = Index([1, 2], name="B")\n expected_values = [20, 25]\n expected = DataFrame(\n {"C": expected_values, "D": expected_values}, index=expected_index\n )\n\n df = DataFrame(\n {"A": np.arange(10), "B": b, "C": list(range(10)), "D": list(range(10))}\n ).set_index(["A", "B"])\n result = df.groupby("B", observed=False).apply(lambda x: x.sum())\n tm.assert_frame_equal(result, expected)\n assert df.index.names == ["A", "B"]\n\n\ndef test_groupby_apply_datetime_result_dtypes(using_infer_string):\n # GH 14849\n data = DataFrame.from_records(\n [\n (pd.Timestamp(2016, 1, 1), "red", "dark", 1, "8"),\n (pd.Timestamp(2015, 1, 1), "green", "stormy", 2, "9"),\n (pd.Timestamp(2014, 1, 1), "blue", "bright", 3, "10"),\n (pd.Timestamp(2013, 1, 1), "blue", "calm", 4, "potato"),\n ],\n columns=["observation", "color", "mood", "intensity", "score"],\n )\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = data.groupby("color").apply(lambda g: g.iloc[0]).dtypes\n dtype = pd.StringDtype(na_value=np.nan) if using_infer_string else object\n expected = Series(\n [np.dtype("datetime64[ns]"), dtype, dtype, np.int64, dtype],\n index=["observation", "color", "mood", "intensity", "score"],\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "index",\n [\n pd.CategoricalIndex(list("abc")),\n pd.interval_range(0, 3),\n pd.period_range("2020", periods=3, freq="D"),\n MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),\n ],\n)\ndef test_apply_index_has_complex_internals(index):\n # GH 31248\n df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("group", group_keys=False).apply(lambda x: x)\n tm.assert_frame_equal(result, df)\n\n\n@pytest.mark.parametrize(\n "function, expected_values",\n [\n (lambda x: x.index.to_list(), [[0, 1], [2, 3]]),\n (lambda x: set(x.index.to_list()), [{0, 1}, {2, 3}]),\n (lambda x: tuple(x.index.to_list()), [(0, 1), (2, 3)]),\n (\n lambda x: dict(enumerate(x.index.to_list())),\n [{0: 0, 1: 1}, {0: 2, 1: 3}],\n ),\n (\n lambda x: [{n: i} for (n, i) in enumerate(x.index.to_list())],\n [[{0: 0}, {1: 1}], [{0: 2}, {1: 3}]],\n ),\n ],\n)\ndef test_apply_function_returns_non_pandas_non_scalar(function, expected_values):\n # GH 31441\n df = DataFrame(["A", "A", "B", "B"], columns=["groups"])\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("groups").apply(function)\n expected = Series(expected_values, index=Index(["A", "B"], name="groups"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_function_returns_numpy_array():\n # GH 31605\n def fct(group):\n return group["B"].values.flatten()\n\n df = DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]})\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("A").apply(fct)\n expected = Series(\n [[1.0, 2.0], [3.0], [np.nan]], index=Index(["a", "b", "none"], name="A")\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1])\ndef test_apply_function_index_return(function):\n # GH: 22541\n df = DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"])\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("id").apply(function)\n expected = Series(\n [Index([0, 4, 7, 9]), Index([1, 2, 3, 5]), Index([6, 8])],\n index=Index([1, 2, 3], name="id"),\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_function_with_indexing_return_column():\n # GH#7002, GH#41480, GH#49256\n df = DataFrame(\n {\n "foo1": ["one", "two", "two", "three", "one", "two"],\n "foo2": [1, 2, 4, 4, 5, 6],\n }\n )\n result = df.groupby("foo1", as_index=False).apply(lambda x: x.mean())\n expected = DataFrame(\n {\n "foo1": ["one", "three", "two"],\n "foo2": [3.0, 4.0, 4.0],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "udf",\n [(lambda x: x.copy()), (lambda x: x.copy().rename(lambda y: y + 1))],\n)\n@pytest.mark.parametrize("group_keys", [True, False])\ndef test_apply_result_type(group_keys, udf):\n # https://github.com/pandas-dev/pandas/issues/34809\n # We'd like to control whether the group keys end up in the index\n # regardless of whether the UDF happens to be a transform.\n df = DataFrame({"A": ["a", "b"], "B": [1, 2]})\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df_result = df.groupby("A", group_keys=group_keys).apply(udf)\n series_result = df.B.groupby(df.A, group_keys=group_keys).apply(udf)\n\n if group_keys:\n assert df_result.index.nlevels == 2\n assert series_result.index.nlevels == 2\n else:\n assert df_result.index.nlevels == 1\n assert series_result.index.nlevels == 1\n\n\ndef test_result_order_group_keys_false():\n # GH 34998\n # apply result order should not depend on whether index is the same or just equal\n df = DataFrame({"A": [2, 1, 2], "B": [1, 2, 3]})\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("A", group_keys=False).apply(lambda x: x)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df.groupby("A", group_keys=False).apply(lambda x: x.copy())\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_with_timezones_aware():\n # GH: 27212\n dates = ["2001-01-01"] * 2 + ["2001-01-02"] * 2 + ["2001-01-03"] * 2\n index_no_tz = pd.DatetimeIndex(dates)\n index_tz = pd.DatetimeIndex(dates, tz="UTC")\n df1 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_no_tz})\n df2 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_tz})\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result1 = df1.groupby("x", group_keys=False).apply(\n lambda df: df[["x", "y"]].copy()\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result2 = df2.groupby("x", group_keys=False).apply(\n lambda df: df[["x", "y"]].copy()\n )\n\n tm.assert_frame_equal(result1, result2)\n\n\ndef test_apply_is_unchanged_when_other_methods_are_called_first(reduction_func):\n # GH #34656\n # GH #34271\n df = DataFrame(\n {\n "a": [99, 99, 99, 88, 88, 88],\n "b": [1, 2, 3, 4, 5, 6],\n "c": [10, 20, 30, 40, 50, 60],\n }\n )\n\n expected = DataFrame(\n {"b": [15, 6], "c": [150, 60]},\n index=Index([88, 99], name="a"),\n )\n\n # Check output when no other methods are called before .apply()\n grp = df.groupby(by="a")\n msg = "The behavior of DataFrame.sum with axis=None is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):\n result = grp.apply(sum, include_groups=False)\n tm.assert_frame_equal(result, expected)\n\n # Check output when another method is called before .apply()\n grp = df.groupby(by="a")\n args = get_groupby_method_args(reduction_func, df)\n _ = getattr(grp, reduction_func)(*args)\n with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):\n result = grp.apply(sum, include_groups=False)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_with_date_in_multiindex_does_not_convert_to_timestamp():\n # GH 29617\n\n df = DataFrame(\n {\n "A": ["a", "a", "a", "b"],\n "B": [\n date(2020, 1, 10),\n date(2020, 1, 10),\n date(2020, 2, 10),\n date(2020, 2, 10),\n ],\n "C": [1, 2, 3, 4],\n },\n index=Index([100, 101, 102, 103], name="idx"),\n )\n\n grp = df.groupby(["A", "B"])\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grp.apply(lambda x: x.head(1))\n\n expected = df.iloc[[0, 2, 3]]\n expected = expected.reset_index()\n expected.index = MultiIndex.from_frame(expected[["A", "B", "idx"]])\n expected = expected.drop(columns="idx")\n\n tm.assert_frame_equal(result, expected)\n for val in result.index.levels[1]:\n assert type(val) is date\n\n\ndef test_apply_by_cols_equals_apply_by_rows_transposed():\n # GH 16646\n # Operating on the columns, or transposing and operating on the rows\n # should give the same result. There was previously a bug where the\n # by_rows operation would work fine, but by_cols would throw a ValueError\n\n df = DataFrame(\n np.random.default_rng(2).random([6, 4]),\n columns=MultiIndex.from_product([["A", "B"], [1, 2]]),\n )\n\n msg = "The 'axis' keyword in DataFrame.groupby is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.T.groupby(axis=0, level=0)\n by_rows = gb.apply(lambda x: x.droplevel(axis=0, level=0))\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb2 = df.groupby(axis=1, level=0)\n by_cols = gb2.apply(lambda x: x.droplevel(axis=1, level=0))\n\n tm.assert_frame_equal(by_cols, by_rows.T)\n tm.assert_frame_equal(by_cols, df)\n\n\n@pytest.mark.parametrize("dropna", [True, False])\ndef test_apply_dropna_with_indexed_same(dropna):\n # GH 38227\n # GH#43205\n df = DataFrame(\n {\n "col": [1, 2, 3, 4, 5],\n "group": ["a", np.nan, np.nan, "b", "b"],\n },\n index=list("xxyxz"),\n )\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("group", dropna=dropna, group_keys=False).apply(lambda x: x)\n expected = df.dropna() if dropna else df.iloc[[0, 3, 1, 2, 4]]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "as_index, expected",\n [\n pytest.param(\n False,\n DataFrame(\n [[1, 1, 1], [2, 2, 1]], columns=Index(["a", "b", None], dtype=object)\n ),\n marks=pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string)"),\n ),\n [\n True,\n Series(\n [1, 1], index=MultiIndex.from_tuples([(1, 1), (2, 2)], names=["a", "b"])\n ),\n ],\n ],\n)\ndef test_apply_as_index_constant_lambda(as_index, expected):\n # GH 13217\n df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 1, 2, 2], "c": [1, 1, 1, 1]})\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby(["a", "b"], as_index=as_index).apply(lambda x: 1)\n tm.assert_equal(result, expected)\n\n\ndef test_sort_index_groups():\n # GH 20420\n df = DataFrame(\n {"A": [1, 2, 3, 4, 5], "B": [6, 7, 8, 9, 0], "C": [1, 1, 1, 2, 2]},\n index=range(5),\n )\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("C").apply(lambda x: x.A.sort_index())\n expected = Series(\n range(1, 6),\n index=MultiIndex.from_tuples(\n [(1, 0), (1, 1), (1, 2), (2, 3), (2, 4)], names=["C", None]\n ),\n name="A",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_positional_slice_groups_datetimelike():\n # GH 21651\n expected = DataFrame(\n {\n "date": pd.date_range("2010-01-01", freq="12h", periods=5),\n "vals": range(5),\n "let": list("abcde"),\n }\n )\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = expected.groupby(\n [expected.let, expected.date.dt.date], group_keys=False\n ).apply(lambda x: x.iloc[0:])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_apply_shape_cache_safety():\n # GH#42702 this fails if we cache_readonly Block.shape\n df = DataFrame({"A": ["a", "a", "b"], "B": [1, 2, 3], "C": [4, 6, 5]})\n gb = df.groupby("A")\n result = gb[["B", "C"]].apply(lambda x: x.astype(float).max() - x.min())\n\n expected = DataFrame(\n {"B": [1.0, 0.0], "C": [2.0, 0.0]}, index=Index(["a", "b"], name="A")\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_apply_to_series_name():\n # GH52444\n df = DataFrame.from_dict(\n {\n "a": ["a", "b", "a", "b"],\n "b1": ["aa", "ac", "ac", "ad"],\n "b2": ["aa", "aa", "aa", "ac"],\n }\n )\n grp = df.groupby("a")[["b1", "b2"]]\n result = grp.apply(lambda x: x.unstack().value_counts())\n\n expected_idx = MultiIndex.from_arrays(\n arrays=[["a", "a", "b", "b", "b"], ["aa", "ac", "ac", "ad", "aa"]],\n names=["a", None],\n )\n expected = Series([3, 1, 2, 1, 1], index=expected_idx, name="count")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("dropna", [True, False])\ndef test_apply_na(dropna):\n # GH#28984\n df = DataFrame(\n {"grp": [1, 1, 2, 2], "y": [1, 0, 2, 5], "z": [1, 2, np.nan, np.nan]}\n )\n dfgrp = df.groupby("grp", dropna=dropna)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = dfgrp.apply(lambda grp_df: grp_df.nlargest(1, "z"))\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = dfgrp.apply(lambda x: x.sort_values("z", ascending=False).head(1))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_empty_string_nan_coerce_bug():\n # GH#24903\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = (\n DataFrame(\n {\n "a": [1, 1, 2, 2],\n "b": ["", "", "", ""],\n "c": pd.to_datetime([1, 2, 3, 4], unit="s"),\n }\n )\n .groupby(["a", "b"])\n .apply(lambda df: df.iloc[-1])\n )\n expected = DataFrame(\n [[1, "", pd.to_datetime(2, unit="s")], [2, "", pd.to_datetime(4, unit="s")]],\n columns=["a", "b", "c"],\n index=MultiIndex.from_tuples([(1, ""), (2, "")], names=["a", "b"]),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("index_values", [[1, 2, 3], [1.0, 2.0, 3.0]])\ndef test_apply_index_key_error_bug(index_values):\n # GH 44310\n result = DataFrame(\n {\n "a": ["aa", "a2", "a3"],\n "b": [1, 2, 3],\n },\n index=Index(index_values),\n )\n expected = DataFrame(\n {\n "b_mean": [2.0, 3.0, 1.0],\n },\n index=Index(["a2", "a3", "aa"], name="a"),\n )\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = result.groupby("a").apply(\n lambda df: Series([df["b"].mean()], index=["b_mean"])\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "arg,idx",\n [\n [\n [\n 1,\n 2,\n 3,\n ],\n [\n 0.1,\n 0.3,\n 0.2,\n ],\n ],\n [\n [\n 1,\n 2,\n 3,\n ],\n [\n 0.1,\n 0.2,\n 0.3,\n ],\n ],\n [\n [\n 1,\n 4,\n 3,\n ],\n [\n 0.1,\n 0.4,\n 0.2,\n ],\n ],\n ],\n)\ndef test_apply_nonmonotonic_float_index(arg, idx):\n # GH 34455\n expected = DataFrame({"col": arg}, index=idx)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = expected.groupby("col", group_keys=False).apply(lambda x: x)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("args, kwargs", [([True], {}), ([], {"numeric_only": True})])\ndef test_apply_str_with_args(df, args, kwargs):\n # GH#46479\n gb = df.groupby("A")\n result = gb.apply("sum", *args, **kwargs)\n expected = gb.sum(numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("name", ["some_name", None])\ndef test_result_name_when_one_group(name):\n # GH 46369\n ser = Series([1, 2], name=name)\n result = ser.groupby(["a", "a"], group_keys=False).apply(lambda x: x)\n expected = Series([1, 2], name=name)\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method, op",\n [\n ("apply", lambda gb: gb.values[-1]),\n ("apply", lambda gb: gb["b"].iloc[0]),\n ("agg", "skew"),\n ("agg", "prod"),\n ("agg", "sum"),\n ],\n)\ndef test_empty_df(method, op):\n # GH 47985\n empty_df = DataFrame({"a": [], "b": []})\n gb = empty_df.groupby("a", group_keys=True)\n group = getattr(gb, "b")\n\n result = getattr(group, method)(op)\n expected = Series(\n [], name="b", dtype="float64", index=Index([], dtype="float64", name="a")\n )\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("include_groups", [True, False])\ndef test_include_groups(include_groups):\n # GH#7155\n df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})\n gb = df.groupby("a")\n warn = FutureWarning if include_groups else None\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(warn, match=msg):\n result = gb.apply(lambda x: x.sum(), include_groups=include_groups)\n expected = DataFrame({"a": [2, 2], "b": [7, 5]}, index=Index([1, 2], name="a"))\n if not include_groups:\n expected = expected[["b"]]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("f", [max, min, sum])\n@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key\ndef test_builtins_apply(keys, f):\n # see gh-8155\n rs = np.random.default_rng(2)\n df = DataFrame(rs.integers(1, 7, (10, 2)), columns=["jim", "joe"])\n df["jolie"] = rs.standard_normal(10)\n\n gb = df.groupby(keys)\n\n fname = f.__name__\n\n warn = None if f is not sum else FutureWarning\n msg = "The behavior of DataFrame.sum with axis=None is deprecated"\n with tm.assert_produces_warning(\n warn, match=msg, check_stacklevel=False, raise_on_extra_warnings=False\n ):\n # Also warns on deprecation GH#53425\n result = gb.apply(f)\n ngroups = len(df.drop_duplicates(subset=keys))\n\n assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"\n assert result.shape == (ngroups, 3), assert_msg\n\n npfunc = lambda x: getattr(np, fname)(x, axis=0) # numpy's equivalent function\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = gb.apply(npfunc)\n tm.assert_frame_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected2 = gb.apply(lambda x: npfunc(x))\n tm.assert_frame_equal(result, expected2)\n\n if f != sum:\n expected = gb.agg(fname).reset_index()\n expected.set_index(keys, inplace=True, drop=False)\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n tm.assert_series_equal(getattr(result, fname)(axis=0), getattr(df, fname)(axis=0))\n | .venv\Lib\site-packages\pandas\tests\groupby\test_apply.py | test_apply.py | Python | 54,516 | 0.75 | 0.094704 | 0.094127 | react-lib | 655 | 2024-04-08T09:58:23.862940 | Apache-2.0 | true | 62fa6acb7445d9fe069d6a7ff35bfa43 |
import numpy as np\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\ndef test_group_by_copy():\n # GH#44803\n df = pd.DataFrame(\n {\n "name": ["Alice", "Bob", "Carl"],\n "age": [20, 21, 20],\n }\n ).set_index("name")\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grp_by_same_value = df.groupby(["age"], group_keys=False).apply(\n lambda group: group\n )\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grp_by_copy = df.groupby(["age"], group_keys=False).apply(\n lambda group: group.copy()\n )\n tm.assert_frame_equal(grp_by_same_value, grp_by_copy)\n\n\ndef test_mutate_groups():\n # GH3380\n\n df = pd.DataFrame(\n {\n "cat1": ["a"] * 8 + ["b"] * 6,\n "cat2": ["c"] * 2\n + ["d"] * 2\n + ["e"] * 2\n + ["f"] * 2\n + ["c"] * 2\n + ["d"] * 2\n + ["e"] * 2,\n "cat3": [f"g{x}" for x in range(1, 15)],\n "val": np.random.default_rng(2).integers(100, size=14),\n }\n )\n\n def f_copy(x):\n x = x.copy()\n x["rank"] = x.val.rank(method="min")\n return x.groupby("cat2")["rank"].min()\n\n def f_no_copy(x):\n x["rank"] = x.val.rank(method="min")\n return x.groupby("cat2")["rank"].min()\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grpby_copy = df.groupby("cat1").apply(f_copy)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grpby_no_copy = df.groupby("cat1").apply(f_no_copy)\n tm.assert_series_equal(grpby_copy, grpby_no_copy)\n\n\ndef test_no_mutate_but_looks_like():\n # GH 8467\n # first show's mutation indicator\n # second does not, but should yield the same results\n df = pd.DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result1 = df.groupby("key", group_keys=True).apply(lambda x: x[:].key)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result2 = df.groupby("key", group_keys=True).apply(lambda x: x.key)\n tm.assert_series_equal(result1, result2)\n\n\ndef test_apply_function_with_indexing(warn_copy_on_write):\n # GH: 33058\n df = pd.DataFrame(\n {"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]}\n )\n\n def fn(x):\n x.loc[x.index[-1], "col2"] = 0\n return x.col2\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(\n FutureWarning, match=msg, raise_on_extra_warnings=not warn_copy_on_write\n ):\n result = df.groupby(["col1"], as_index=False).apply(fn)\n expected = pd.Series(\n [1, 2, 0, 4, 5, 0],\n index=pd.MultiIndex.from_tuples(\n [(0, 0), (0, 1), (0, 2), (1, 3), (1, 4), (1, 5)]\n ),\n name="col2",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_mutate_columns_multiindex():\n # GH 12652\n df = pd.DataFrame(\n {\n ("C", "julian"): [1, 2, 3],\n ("B", "geoffrey"): [1, 2, 3],\n ("A", "julian"): [1, 2, 3],\n ("B", "julian"): [1, 2, 3],\n ("A", "geoffrey"): [1, 2, 3],\n ("C", "geoffrey"): [1, 2, 3],\n },\n columns=pd.MultiIndex.from_tuples(\n [\n ("A", "julian"),\n ("A", "geoffrey"),\n ("B", "julian"),\n ("B", "geoffrey"),\n ("C", "julian"),\n ("C", "geoffrey"),\n ]\n ),\n )\n\n def add_column(grouped):\n name = grouped.columns[0][1]\n grouped["sum", name] = grouped.sum(axis=1)\n return grouped\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby(level=1, axis=1)\n result = gb.apply(add_column)\n expected = pd.DataFrame(\n [\n [1, 1, 1, 3, 1, 1, 1, 3],\n [2, 2, 2, 6, 2, 2, 2, 6],\n [\n 3,\n 3,\n 3,\n 9,\n 3,\n 3,\n 3,\n 9,\n ],\n ],\n columns=pd.MultiIndex.from_tuples(\n [\n ("geoffrey", "A", "geoffrey"),\n ("geoffrey", "B", "geoffrey"),\n ("geoffrey", "C", "geoffrey"),\n ("geoffrey", "sum", "geoffrey"),\n ("julian", "A", "julian"),\n ("julian", "B", "julian"),\n ("julian", "C", "julian"),\n ("julian", "sum", "julian"),\n ]\n ),\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_apply_mutate.py | test_apply_mutate.py | Python | 5,012 | 0.95 | 0.06135 | 0.049296 | awesome-app | 669 | 2025-01-22T03:38:20.912834 | BSD-3-Clause | true | aaa38cb25dc0f75a9560f19d77c4336f |
import numpy as np\nimport pytest\n\nfrom pandas._libs import lib\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\ndef assert_block_lengths(x):\n assert len(x) == len(x._mgr.blocks[0].mgr_locs)\n return 0\n\n\ndef cumsum_max(x):\n x.cumsum().max()\n return 0\n\n\n@pytest.mark.parametrize(\n "func",\n [\n cumsum_max,\n pytest.param(assert_block_lengths, marks=td.skip_array_manager_invalid_test),\n ],\n)\ndef test_mgr_locs_updated(func):\n # https://github.com/pandas-dev/pandas/issues/31802\n # Some operations may require creating new blocks, which requires\n # valid mgr_locs\n df = pd.DataFrame({"A": ["a", "a", "a"], "B": ["a", "b", "b"], "C": [1, 1, 1]})\n result = df.groupby(["A", "B"]).agg(func)\n expected = pd.DataFrame(\n {"C": [0, 0]},\n index=pd.MultiIndex.from_product([["a"], ["a", "b"]], names=["A", "B"]),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "binner,closed,expected",\n [\n (\n np.array([0, 3, 6, 9], dtype=np.int64),\n "left",\n np.array([2, 5, 6], dtype=np.int64),\n ),\n (\n np.array([0, 3, 6, 9], dtype=np.int64),\n "right",\n np.array([3, 6, 6], dtype=np.int64),\n ),\n (np.array([0, 3, 6], dtype=np.int64), "left", np.array([2, 5], dtype=np.int64)),\n (\n np.array([0, 3, 6], dtype=np.int64),\n "right",\n np.array([3, 6], dtype=np.int64),\n ),\n ],\n)\ndef test_generate_bins(binner, closed, expected):\n values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)\n result = lib.generate_bins_dt64(values, binner, closed=closed)\n tm.assert_numpy_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_bin_groupby.py | test_bin_groupby.py | Python | 1,769 | 0.95 | 0.061538 | 0.054545 | python-kit | 799 | 2025-02-06T05:48:11.734903 | BSD-3-Clause | true | 55fa7e4f8c4e1c635bb082f37759318b |
from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n Index,\n MultiIndex,\n Series,\n qcut,\n)\nimport pandas._testing as tm\nfrom pandas.api.typing import SeriesGroupBy\nfrom pandas.tests.groupby import get_groupby_method_args\n\n\ndef cartesian_product_for_groupers(result, args, names, fill_value=np.nan):\n """Reindex to a cartesian production for the groupers,\n preserving the nature (Categorical) of each grouper\n """\n\n def f(a):\n if isinstance(a, (CategoricalIndex, Categorical)):\n categories = a.categories\n a = Categorical.from_codes(\n np.arange(len(categories)), categories=categories, ordered=a.ordered\n )\n return a\n\n index = MultiIndex.from_product(map(f, args), names=names)\n return result.reindex(index, fill_value=fill_value).sort_index()\n\n\n_results_for_groupbys_with_missing_categories = {\n # This maps the builtin groupby functions to their expected outputs for\n # missing categories when they are called on a categorical grouper with\n # observed=False. Some functions are expected to return NaN, some zero.\n # These expected values can be used across several tests (i.e. they are\n # the same for SeriesGroupBy and DataFrameGroupBy) but they should only be\n # hardcoded in one place.\n "all": np.nan,\n "any": np.nan,\n "count": 0,\n "corrwith": np.nan,\n "first": np.nan,\n "idxmax": np.nan,\n "idxmin": np.nan,\n "last": np.nan,\n "max": np.nan,\n "mean": np.nan,\n "median": np.nan,\n "min": np.nan,\n "nth": np.nan,\n "nunique": 0,\n "prod": np.nan,\n "quantile": np.nan,\n "sem": np.nan,\n "size": 0,\n "skew": np.nan,\n "std": np.nan,\n "sum": 0,\n "var": np.nan,\n}\n\n\n@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning")\ndef test_apply_use_categorical_name(df):\n cats = qcut(df.C, 4)\n\n def get_stats(group):\n return {\n "min": group.min(),\n "max": group.max(),\n "count": group.count(),\n "mean": group.mean(),\n }\n\n result = df.groupby(cats, observed=False).D.apply(get_stats)\n assert result.index.names[0] == "C"\n\n\ndef test_basic(using_infer_string): # TODO: split this test\n cats = Categorical(\n ["a", "a", "a", "b", "b", "b", "c", "c", "c"],\n categories=["a", "b", "c", "d"],\n ordered=True,\n )\n data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})\n\n exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)\n expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)\n result = data.groupby("b", observed=False).mean()\n tm.assert_frame_equal(result, expected)\n\n cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)\n cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)\n df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})\n\n # single grouper\n gb = df.groupby("A", observed=False)\n exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)\n expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})\n result = gb.sum(numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n # GH 8623\n x = DataFrame(\n [[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]],\n columns=["person_id", "person_name"],\n )\n x["person_name"] = Categorical(x.person_name)\n\n g = x.groupby(["person_id"], observed=False)\n result = g.transform(lambda x: x)\n tm.assert_frame_equal(result, x[["person_name"]])\n\n result = x.drop_duplicates("person_name")\n expected = x.iloc[[0, 1]]\n tm.assert_frame_equal(result, expected)\n\n def f(x):\n return x.drop_duplicates("person_name").iloc[0]\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = g.apply(f)\n expected = x.iloc[[0, 1]].copy()\n expected.index = Index([1, 2], name="person_id")\n dtype = "str" if using_infer_string else object\n expected["person_name"] = expected["person_name"].astype(dtype)\n tm.assert_frame_equal(result, expected)\n\n # GH 9921\n # Monotonic\n df = DataFrame({"a": [5, 15, 25]})\n c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])\n\n msg = "using SeriesGroupBy.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df["a"])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]\n )\n msg = "using DataFrameGroupBy.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result = df.groupby(c, observed=False).transform(sum)\n expected = df[["a"]]\n tm.assert_frame_equal(result, expected)\n\n gbc = df.groupby(c, observed=False)\n result = gbc.transform(lambda xs: np.max(xs, axis=0))\n tm.assert_frame_equal(result, df[["a"]])\n\n result2 = gbc.transform(lambda xs: np.max(xs, axis=0))\n msg = "using DataFrameGroupBy.max"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result3 = gbc.transform(max)\n result4 = gbc.transform(np.maximum.reduce)\n result5 = gbc.transform(lambda xs: np.maximum.reduce(xs))\n tm.assert_frame_equal(result2, df[["a"]], check_dtype=False)\n tm.assert_frame_equal(result3, df[["a"]], check_dtype=False)\n tm.assert_frame_equal(result4, df[["a"]])\n tm.assert_frame_equal(result5, df[["a"]])\n\n # Filter\n tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])\n tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)\n\n # Non-monotonic\n df = DataFrame({"a": [5, 15, 25, -5]})\n c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])\n\n msg = "using SeriesGroupBy.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df["a"])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]\n )\n msg = "using DataFrameGroupBy.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result = df.groupby(c, observed=False).transform(sum)\n expected = df[["a"]]\n tm.assert_frame_equal(result, expected)\n\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]\n )\n\n # GH 9603\n df = DataFrame({"a": [1, 0, 0, 0]})\n c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))\n result = df.groupby(c, observed=False).apply(len)\n\n exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)\n expected = Series([1, 0, 0, 0], index=exp_index)\n expected.index.name = "a"\n tm.assert_series_equal(result, expected)\n\n # more basic\n levels = ["foo", "bar", "baz", "qux"]\n codes = np.random.default_rng(2).integers(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.default_rng(2).standard_normal((100, 4)))\n\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)\n expected = expected.reindex(exp_idx)\n\n tm.assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = np.asarray(cats).take(idx)\n ord_data = data.take(idx)\n\n exp_cats = Categorical(\n ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]\n )\n expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()\n tm.assert_frame_equal(desc_result, expected)\n\n # GH 10460\n expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal(\n (desc_result.stack(future_stack=True).index.get_level_values(0)), exp\n )\n exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)\n tm.assert_index_equal(\n (desc_result.stack(future_stack=True).index.get_level_values(1)), exp\n )\n\n\ndef test_level_get_group(observed):\n # GH15155\n df = DataFrame(\n data=np.arange(2, 22, 2),\n index=MultiIndex(\n levels=[CategoricalIndex(["a", "b"]), range(10)],\n codes=[[0] * 5 + [1] * 5, range(10)],\n names=["Index1", "Index2"],\n ),\n )\n g = df.groupby(level=["Index1"], observed=observed)\n\n # expected should equal test.loc[["a"]]\n # GH15166\n expected = DataFrame(\n data=np.arange(2, 12, 2),\n index=MultiIndex(\n levels=[CategoricalIndex(["a", "b"]), range(5)],\n codes=[[0] * 5, range(5)],\n names=["Index1", "Index2"],\n ),\n )\n msg = "you will need to pass a length-1 tuple"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#25971 - warn when not passing a length-1 tuple\n result = g.get_group("a")\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_sorting_with_different_categoricals():\n # GH 24271\n df = DataFrame(\n {\n "group": ["A"] * 6 + ["B"] * 6,\n "dose": ["high", "med", "low"] * 4,\n "outcomes": np.arange(12.0),\n }\n )\n\n df.dose = Categorical(df.dose, categories=["low", "med", "high"], ordered=True)\n\n result = df.groupby("group")["dose"].value_counts()\n result = result.sort_index(level=0, sort_remaining=True)\n index = ["low", "med", "high", "low", "med", "high"]\n index = Categorical(index, categories=["low", "med", "high"], ordered=True)\n index = [["A", "A", "A", "B", "B", "B"], CategoricalIndex(index)]\n index = MultiIndex.from_arrays(index, names=["group", "dose"])\n expected = Series([2] * 6, index=index, name="count")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("ordered", [True, False])\ndef test_apply(ordered):\n # GH 10138\n\n dense = Categorical(list("abc"), ordered=ordered)\n\n # 'b' is in the categories but not in the list\n missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)\n values = np.arange(len(dense))\n df = DataFrame({"missing": missing, "dense": dense, "values": values})\n grouped = df.groupby(["missing", "dense"], observed=True)\n\n # missing category 'b' should still exist in the output index\n idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])\n expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])\n\n result = grouped.apply(lambda x: np.mean(x, axis=0))\n tm.assert_frame_equal(result, expected)\n\n result = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n msg = "using DataFrameGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result = grouped.agg(np.mean)\n tm.assert_frame_equal(result, expected)\n\n # but for transform we should still get back the original index\n idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])\n expected = Series(1, index=idx)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grouped.apply(lambda x: 1)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning")\ndef test_observed(request, using_infer_string, observed):\n # multiple groupers, don't re-expand the output space\n # of the grouper\n # gh-14942 (implement)\n # gh-10132 (back-compat)\n # gh-8138 (back-compat)\n # gh-8869\n\n if using_infer_string and not observed:\n # TODO(infer_string) this fails with filling the string column with 0\n request.applymarker(pytest.mark.xfail(reason="TODO(infer_string)"))\n\n cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)\n cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)\n df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})\n df["C"] = ["foo", "bar"] * 2\n\n # multiple groupers with a non-cat\n gb = df.groupby(["A", "B", "C"], observed=observed)\n exp_index = MultiIndex.from_arrays(\n [cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]\n )\n expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0\n )\n\n tm.assert_frame_equal(result, expected)\n\n gb = df.groupby(["A", "B"], observed=observed)\n exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])\n expected = DataFrame(\n {"values": [1, 2, 3, 4], "C": ["foo", "bar", "foo", "bar"]}, index=exp_index\n )\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [cat1, cat2], list("AB"), fill_value=0\n )\n\n tm.assert_frame_equal(result, expected)\n\n # https://github.com/pandas-dev/pandas/issues/8138\n d = {\n "cat": Categorical(\n ["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True\n ),\n "ints": [1, 1, 2, 2],\n "val": [10, 20, 30, 40],\n }\n df = DataFrame(d)\n\n # Grouping on a single column\n groups_single_key = df.groupby("cat", observed=observed)\n result = groups_single_key.mean()\n\n exp_index = CategoricalIndex(\n list("ab"), name="cat", categories=list("abc"), ordered=True\n )\n expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)\n if not observed:\n index = CategoricalIndex(\n list("abc"), name="cat", categories=list("abc"), ordered=True\n )\n expected = expected.reindex(index)\n\n tm.assert_frame_equal(result, expected)\n\n # Grouping on two columns\n groups_double_key = df.groupby(["cat", "ints"], observed=observed)\n result = groups_double_key.agg("mean")\n expected = DataFrame(\n {\n "val": [10.0, 30.0, 20.0, 40.0],\n "cat": Categorical(\n ["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True\n ),\n "ints": [1, 2, 1, 2],\n }\n ).set_index(["cat", "ints"])\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [df.cat.values, [1, 2]], ["cat", "ints"]\n )\n\n tm.assert_frame_equal(result, expected)\n\n # GH 10132\n for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:\n c, i = key\n result = groups_double_key.get_group(key)\n expected = df[(df.cat == c) & (df.ints == i)]\n tm.assert_frame_equal(result, expected)\n\n # gh-8869\n # with as_index\n d = {\n "foo": [10, 8, 4, 8, 4, 1, 1],\n "bar": [10, 20, 30, 40, 50, 60, 70],\n "baz": ["d", "c", "e", "a", "a", "d", "c"],\n }\n df = DataFrame(d)\n cat = pd.cut(df["foo"], np.linspace(0, 10, 3))\n df["range"] = cat\n groups = df.groupby(["range", "baz"], as_index=False, observed=observed)\n result = groups.agg("mean")\n\n groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)\n expected = groups2.agg("mean").reset_index()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_codes_remap(observed):\n d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}\n df = DataFrame(d)\n values = pd.cut(df["C1"], [1, 2, 3, 6])\n values.name = "cat"\n groups_double_key = df.groupby([values, "C2"], observed=observed)\n\n idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])\n expected = DataFrame(\n {"C1": [3.0, 3.0, 4.0, 5.0], "C3": [10.0, 100.0, 200.0, 34.0]}, index=idx\n )\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]\n )\n\n result = groups_double_key.agg("mean")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_perf():\n # we create a cartesian product, so this is\n # non-performant if we don't use observed values\n # gh-14942\n df = DataFrame(\n {\n "cat": np.random.default_rng(2).integers(0, 255, size=30000),\n "int_id": np.random.default_rng(2).integers(0, 255, size=30000),\n "other_id": np.random.default_rng(2).integers(0, 10000, size=30000),\n "foo": 0,\n }\n )\n df["cat"] = df.cat.astype(str).astype("category")\n\n grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)\n result = grouped.count()\n assert result.index.levels[0].nunique() == df.cat.nunique()\n assert result.index.levels[1].nunique() == df.int_id.nunique()\n assert result.index.levels[2].nunique() == df.other_id.nunique()\n\n\ndef test_observed_groups(observed):\n # gh-20583\n # test that we have the appropriate groups\n\n cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])\n df = DataFrame({"cat": cat, "vals": [1, 2, 3]})\n g = df.groupby("cat", observed=observed)\n\n result = g.groups\n if observed:\n expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}\n else:\n expected = {\n "a": Index([0, 2], dtype="int64"),\n "b": Index([], dtype="int64"),\n "c": Index([1], dtype="int64"),\n }\n\n tm.assert_dict_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "keys, expected_values, expected_index_levels",\n [\n ("a", [15, 9, 0], CategoricalIndex([1, 2, 3], name="a")),\n (\n ["a", "b"],\n [7, 8, 0, 0, 0, 9, 0, 0, 0],\n [CategoricalIndex([1, 2, 3], name="a"), Index([4, 5, 6])],\n ),\n (\n ["a", "a2"],\n [15, 0, 0, 0, 9, 0, 0, 0, 0],\n [\n CategoricalIndex([1, 2, 3], name="a"),\n CategoricalIndex([1, 2, 3], name="a"),\n ],\n ),\n ],\n)\n@pytest.mark.parametrize("test_series", [True, False])\ndef test_unobserved_in_index(keys, expected_values, expected_index_levels, test_series):\n # GH#49354 - ensure unobserved cats occur when grouping by index levels\n df = DataFrame(\n {\n "a": Categorical([1, 1, 2], categories=[1, 2, 3]),\n "a2": Categorical([1, 1, 2], categories=[1, 2, 3]),\n "b": [4, 5, 6],\n "c": [7, 8, 9],\n }\n ).set_index(["a", "a2"])\n if "b" not in keys:\n # Only keep b when it is used for grouping for consistent columns in the result\n df = df.drop(columns="b")\n\n gb = df.groupby(keys, observed=False)\n if test_series:\n gb = gb["c"]\n result = gb.sum()\n\n if len(keys) == 1:\n index = expected_index_levels\n else:\n codes = [[0, 0, 0, 1, 1, 1, 2, 2, 2], 3 * [0, 1, 2]]\n index = MultiIndex(\n expected_index_levels,\n codes=codes,\n names=keys,\n )\n expected = DataFrame({"c": expected_values}, index=index)\n if test_series:\n expected = expected["c"]\n tm.assert_equal(result, expected)\n\n\ndef test_observed_groups_with_nan(observed):\n # GH 24740\n df = DataFrame(\n {\n "cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),\n "vals": [1, 2, 3],\n }\n )\n g = df.groupby("cat", observed=observed)\n result = g.groups\n if observed:\n expected = {"a": Index([0, 2], dtype="int64")}\n else:\n expected = {\n "a": Index([0, 2], dtype="int64"),\n "b": Index([], dtype="int64"),\n "d": Index([], dtype="int64"),\n }\n tm.assert_dict_equal(result, expected)\n\n\ndef test_observed_nth():\n # GH 26385\n cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])\n ser = Series([1, 2, 3])\n df = DataFrame({"cat": cat, "ser": ser})\n\n result = df.groupby("cat", observed=False)["ser"].nth(0)\n expected = df["ser"].iloc[[0]]\n tm.assert_series_equal(result, expected)\n\n\ndef test_dataframe_categorical_with_nan(observed):\n # GH 21151\n s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])\n s2 = Series([1, 2, 3, 4])\n df = DataFrame({"s1": s1, "s2": s2})\n result = df.groupby("s1", observed=observed).first().reset_index()\n if observed:\n expected = DataFrame(\n {"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}\n )\n else:\n expected = DataFrame(\n {\n "s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),\n "s2": [2, np.nan, np.nan],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("ordered", [True, False])\n@pytest.mark.parametrize("observed", [True, False])\n@pytest.mark.parametrize("sort", [True, False])\ndef test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):\n # GH 25871: Fix groupby sorting on ordered Categoricals\n # GH 25167: Groupby with observed=True doesn't sort\n\n # Build a dataframe with cat having one unobserved category ('missing'),\n # and a Series with identical values\n label = Categorical(\n ["d", "a", "b", "a", "d", "b"],\n categories=["a", "b", "missing", "d"],\n ordered=ordered,\n )\n val = Series(["d", "a", "b", "a", "d", "b"])\n df = DataFrame({"label": label, "val": val})\n\n # aggregate on the Categorical\n result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")\n\n # If ordering works, we expect index labels equal to aggregation results,\n # except for 'observed=False': label 'missing' has aggregation None\n label = Series(result.index.array, dtype="object")\n aggr = Series(result.array)\n if not observed:\n aggr[aggr.isna()] = "missing"\n if not all(label == aggr):\n msg = (\n "Labels and aggregation results not consistently sorted\n"\n f"for (ordered={ordered}, observed={observed}, sort={sort})\n"\n f"Result:\n{result}"\n )\n assert False, msg\n\n\ndef test_datetime():\n # GH9049: ensure backward compatibility\n levels = pd.date_range("2014-01-01", periods=4)\n codes = np.random.default_rng(2).integers(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.default_rng(2).standard_normal((100, 4)))\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n expected = expected.reindex(levels)\n expected.index = CategoricalIndex(\n expected.index, categories=expected.index, ordered=True\n )\n\n tm.assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = cats.take(idx)\n ord_data = data.take(idx)\n expected = ord_data.groupby(ord_labels, observed=False).describe()\n tm.assert_frame_equal(desc_result, expected)\n tm.assert_index_equal(desc_result.index, expected.index)\n tm.assert_index_equal(\n desc_result.index.get_level_values(0), expected.index.get_level_values(0)\n )\n\n # GH 10460\n expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal(\n (desc_result.stack(future_stack=True).index.get_level_values(0)), exp\n )\n exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)\n tm.assert_index_equal(\n (desc_result.stack(future_stack=True).index.get_level_values(1)), exp\n )\n\n\ndef test_categorical_index():\n s = np.random.default_rng(2)\n levels = ["foo", "bar", "baz", "qux"]\n codes = s.integers(0, 4, size=20)\n cats = Categorical.from_codes(codes, levels, ordered=True)\n df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))\n df["cats"] = cats\n\n # with a cat index\n result = df.set_index("cats").groupby(level=0, observed=False).sum()\n expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"\n )\n tm.assert_frame_equal(result, expected)\n\n # with a cat column, should produce a cat index\n result = df.groupby("cats", observed=False).sum()\n expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_describe_categorical_columns():\n # GH 11558\n cats = CategoricalIndex(\n ["qux", "foo", "baz", "bar"],\n categories=["foo", "bar", "baz", "qux"],\n ordered=True,\n )\n df = DataFrame(np.random.default_rng(2).standard_normal((20, 4)), columns=cats)\n result = df.groupby([1, 2, 3, 4] * 5).describe()\n\n tm.assert_index_equal(result.stack(future_stack=True).columns, cats)\n tm.assert_categorical_equal(\n result.stack(future_stack=True).columns.values, cats.values\n )\n\n\ndef test_unstack_categorical():\n # GH11558 (example is taken from the original issue)\n df = DataFrame(\n {"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}\n )\n df["medium"] = df["medium"].astype("category")\n\n gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()\n result = gcat.describe()\n\n exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")\n tm.assert_index_equal(result.columns, exp_columns)\n tm.assert_categorical_equal(result.columns.values, exp_columns.values)\n\n result = gcat["A"] + gcat["B"]\n expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_bins_unequal_len():\n # GH3011\n series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])\n bins = pd.cut(series.dropna().values, 4)\n\n # len(bins) != len(series) here\n with pytest.raises(ValueError, match="Grouper and axis must be same length"):\n series.groupby(bins).mean()\n\n\n@pytest.mark.parametrize(\n ["series", "data"],\n [\n # Group a series with length and index equal to those of the grouper.\n (Series(range(4)), {"A": [0, 3], "B": [1, 2]}),\n # Group a series with length equal to that of the grouper and index unequal to\n # that of the grouper.\n (Series(range(4)).rename(lambda idx: idx + 1), {"A": [2], "B": [0, 1]}),\n # GH44179: Group a series with length unequal to that of the grouper.\n (Series(range(7)), {"A": [0, 3], "B": [1, 2]}),\n ],\n)\ndef test_categorical_series(series, data):\n # Group the given series by a series with categorical data type such that group A\n # takes indices 0 and 3 and group B indices 1 and 2, obtaining the values mapped in\n # the given data.\n groupby = series.groupby(Series(list("ABBA"), dtype="category"), observed=False)\n result = groupby.aggregate(list)\n expected = Series(data, index=CategoricalIndex(data.keys()))\n tm.assert_series_equal(result, expected)\n\n\ndef test_as_index():\n # GH13204\n df = DataFrame(\n {\n "cat": Categorical([1, 2, 2], [1, 2, 3]),\n "A": [10, 11, 11],\n "B": [101, 102, 103],\n }\n )\n result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()\n expected = DataFrame(\n {\n "cat": Categorical([1, 2], categories=df.cat.cat.categories),\n "A": [10, 11],\n "B": [101, 205],\n },\n columns=["cat", "A", "B"],\n )\n tm.assert_frame_equal(result, expected)\n\n # function grouper\n f = lambda r: df.loc[r, "A"]\n msg = "A grouping .* was excluded from the result"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby(["cat", f], as_index=False, observed=True).sum()\n expected = DataFrame(\n {\n "cat": Categorical([1, 2], categories=df.cat.cat.categories),\n "A": [10, 22],\n "B": [101, 205],\n },\n columns=["cat", "A", "B"],\n )\n tm.assert_frame_equal(result, expected)\n\n # another not in-axis grouper (conflicting names in index)\n s = Series(["a", "b", "b"], name="cat")\n msg = "A grouping .* was excluded from the result"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby(["cat", s], as_index=False, observed=True).sum()\n tm.assert_frame_equal(result, expected)\n\n # is original index dropped?\n group_columns = ["cat", "A"]\n expected = DataFrame(\n {\n "cat": Categorical([1, 2], categories=df.cat.cat.categories),\n "A": [10, 11],\n "B": [101, 205],\n },\n columns=["cat", "A", "B"],\n )\n\n for name in [None, "X", "B"]:\n df.index = Index(list("abc"), name=name)\n result = df.groupby(group_columns, as_index=False, observed=True).sum()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_preserve_categories():\n # GH-13179\n categories = list("abc")\n\n # ordered=True\n df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})\n sort_index = CategoricalIndex(categories, categories, ordered=True, name="A")\n nosort_index = CategoricalIndex(list("bac"), categories, ordered=True, name="A")\n tm.assert_index_equal(\n df.groupby("A", sort=True, observed=False).first().index, sort_index\n )\n # GH#42482 - don't sort result when sort=False, even when ordered=True\n tm.assert_index_equal(\n df.groupby("A", sort=False, observed=False).first().index, nosort_index\n )\n\n # ordered=False\n df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})\n sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")\n # GH#48749 - don't change order of categories\n # GH#42482 - don't sort result when sort=False, even when ordered=True\n nosort_index = CategoricalIndex(list("bac"), list("abc"), ordered=False, name="A")\n tm.assert_index_equal(\n df.groupby("A", sort=True, observed=False).first().index, sort_index\n )\n tm.assert_index_equal(\n df.groupby("A", sort=False, observed=False).first().index, nosort_index\n )\n\n\ndef test_preserve_categorical_dtype():\n # GH13743, GH13854\n df = DataFrame(\n {\n "A": [1, 2, 1, 1, 2],\n "B": [10, 16, 22, 28, 34],\n "C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),\n "C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),\n }\n )\n # single grouper\n exp_full = DataFrame(\n {\n "A": [2.0, 1.0, np.nan],\n "B": [25.0, 20.0, np.nan],\n "C1": Categorical(list("bac"), categories=list("bac"), ordered=False),\n "C2": Categorical(list("bac"), categories=list("bac"), ordered=True),\n }\n )\n for col in ["C1", "C2"]:\n result1 = df.groupby(by=col, as_index=False, observed=False).mean(\n numeric_only=True\n )\n result2 = (\n df.groupby(by=col, as_index=True, observed=False)\n .mean(numeric_only=True)\n .reset_index()\n )\n expected = exp_full.reindex(columns=result1.columns)\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n\n\n@pytest.mark.parametrize(\n "func, values",\n [\n ("first", ["second", "first"]),\n ("last", ["fourth", "third"]),\n ("min", ["fourth", "first"]),\n ("max", ["second", "third"]),\n ],\n)\ndef test_preserve_on_ordered_ops(func, values):\n # gh-18502\n # preserve the categoricals on ops\n c = Categorical(["first", "second", "third", "fourth"], ordered=True)\n df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})\n g = df.groupby("payload")\n result = getattr(g, func)()\n expected = DataFrame(\n {"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}\n ).set_index("payload")\n tm.assert_frame_equal(result, expected)\n\n # we should also preserve categorical for SeriesGroupBy\n sgb = df.groupby("payload")["col"]\n result = getattr(sgb, func)()\n expected = expected["col"]\n tm.assert_series_equal(result, expected)\n\n\ndef test_categorical_no_compress():\n data = Series(np.random.default_rng(2).standard_normal(9))\n\n codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean()\n\n exp.index = CategoricalIndex(\n exp.index, categories=cats.categories, ordered=cats.ordered\n )\n tm.assert_series_equal(result, exp)\n\n codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])\n cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)\n exp.index = CategoricalIndex(\n exp.index, categories=cats.categories, ordered=cats.ordered\n )\n tm.assert_series_equal(result, exp)\n\n cats = Categorical(\n ["a", "a", "a", "b", "b", "b", "c", "c", "c"],\n categories=["a", "b", "c", "d"],\n ordered=True,\n )\n data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})\n\n result = data.groupby("b", observed=False).mean()\n result = result["a"].values\n exp = np.array([1, 2, 4, np.nan])\n tm.assert_numpy_array_equal(result, exp)\n\n\ndef test_groupby_empty_with_category():\n # GH-9614\n # test fix for when group by on None resulted in\n # coercion of dtype categorical -> float\n df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})\n result = df.groupby("A").first()["B"]\n expected = Series(\n Categorical([], categories=["test", "train"]),\n index=Series([], dtype="object", name="A"),\n name="B",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_sort():\n # https://stackoverflow.com/questions/23814368/sorting-pandas-\n # categorical-labels-after-groupby\n # This should result in a properly sorted Series so that the plot\n # has a sorted x axis\n # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')\n\n df = DataFrame({"value": np.random.default_rng(2).integers(0, 10000, 100)})\n labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]\n cat_labels = Categorical(labels, labels)\n\n df = df.sort_values(by=["value"], ascending=True)\n df["value_group"] = pd.cut(\n df.value, range(0, 10500, 500), right=False, labels=cat_labels\n )\n\n res = df.groupby(["value_group"], observed=False)["value_group"].count()\n exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]\n exp.index = CategoricalIndex(exp.index, name=exp.index.name)\n tm.assert_series_equal(res, exp)\n\n\n@pytest.mark.parametrize("ordered", [True, False])\ndef test_sort2(sort, ordered):\n # dataframe groupby sort was being ignored # GH 8868\n # GH#48749 - don't change order of categories\n # GH#42482 - don't sort result when sort=False, even when ordered=True\n df = DataFrame(\n [\n ["(7.5, 10]", 10, 10],\n ["(7.5, 10]", 8, 20],\n ["(2.5, 5]", 5, 30],\n ["(5, 7.5]", 6, 40],\n ["(2.5, 5]", 4, 50],\n ["(0, 2.5]", 1, 60],\n ["(5, 7.5]", 7, 70],\n ],\n columns=["range", "foo", "bar"],\n )\n df["range"] = Categorical(df["range"], ordered=ordered)\n result = df.groupby("range", sort=sort, observed=False).first()\n\n if sort:\n data_values = [[1, 60], [5, 30], [6, 40], [10, 10]]\n index_values = ["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"]\n else:\n data_values = [[10, 10], [5, 30], [6, 40], [1, 60]]\n index_values = ["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"]\n expected = DataFrame(\n data_values,\n columns=["foo", "bar"],\n index=CategoricalIndex(index_values, name="range", ordered=ordered),\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("ordered", [True, False])\ndef test_sort_datetimelike(sort, ordered):\n # GH10505\n # GH#42482 - don't sort result when sort=False, even when ordered=True\n\n # use same data as test_groupby_sort_categorical, which category is\n # corresponding to datetime.month\n df = DataFrame(\n {\n "dt": [\n datetime(2011, 7, 1),\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 2, 1),\n datetime(2011, 1, 1),\n datetime(2011, 5, 1),\n ],\n "foo": [10, 8, 5, 6, 4, 1, 7],\n "bar": [10, 20, 30, 40, 50, 60, 70],\n },\n columns=["dt", "foo", "bar"],\n )\n\n # ordered=True\n df["dt"] = Categorical(df["dt"], ordered=ordered)\n if sort:\n data_values = [[1, 60], [5, 30], [6, 40], [10, 10]]\n index_values = [\n datetime(2011, 1, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 7, 1),\n ]\n else:\n data_values = [[10, 10], [5, 30], [6, 40], [1, 60]]\n index_values = [\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 1, 1),\n ]\n expected = DataFrame(\n data_values,\n columns=["foo", "bar"],\n index=CategoricalIndex(index_values, name="dt", ordered=ordered),\n )\n result = df.groupby("dt", sort=sort, observed=False).first()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_sum():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = DataFrame(\n {"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}\n )\n expected_idx = CategoricalIndex(["a", "b", "c"], name="A")\n\n # 0 by default\n result = df.groupby("A", observed=False).B.sum()\n expected = Series([3, 1, 0], expected_idx, name="B")\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby("A", observed=False).B.sum(min_count=0)\n expected = Series([3, 1, 0], expected_idx, name="B")\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby("A", observed=False).B.sum(min_count=1)\n expected = Series([3, 1, np.nan], expected_idx, name="B")\n tm.assert_series_equal(result, expected)\n\n # min_count>1\n result = df.groupby("A", observed=False).B.sum(min_count=2)\n expected = Series([3, np.nan, np.nan], expected_idx, name="B")\n tm.assert_series_equal(result, expected)\n\n\ndef test_empty_prod():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = DataFrame(\n {"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}\n )\n\n expected_idx = CategoricalIndex(["a", "b", "c"], name="A")\n\n # 1 by default\n result = df.groupby("A", observed=False).B.prod()\n expected = Series([2, 1, 1], expected_idx, name="B")\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby("A", observed=False).B.prod(min_count=0)\n expected = Series([2, 1, 1], expected_idx, name="B")\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby("A", observed=False).B.prod(min_count=1)\n expected = Series([2, 1, np.nan], expected_idx, name="B")\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_multiindex_categorical_datetime():\n # https://github.com/pandas-dev/pandas/issues/21390\n\n df = DataFrame(\n {\n "key1": Categorical(list("abcbabcba")),\n "key2": Categorical(\n list(pd.date_range("2018-06-01 00", freq="1min", periods=3)) * 3\n ),\n "values": np.arange(9),\n }\n )\n result = df.groupby(["key1", "key2"], observed=False).mean()\n\n idx = MultiIndex.from_product(\n [\n Categorical(["a", "b", "c"]),\n Categorical(pd.date_range("2018-06-01 00", freq="1min", periods=3)),\n ],\n names=["key1", "key2"],\n )\n expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "as_index, expected",\n [\n (\n True,\n Series(\n index=MultiIndex.from_arrays(\n [Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]\n ),\n data=[1, 2, 3],\n name="x",\n ),\n ),\n (\n False,\n DataFrame(\n {\n "a": Series([1, 1, 2], dtype="category"),\n "b": [1, 2, 2],\n "x": [1, 2, 3],\n }\n ),\n ),\n ],\n)\ndef test_groupby_agg_observed_true_single_column(as_index, expected):\n # GH-23970\n df = DataFrame(\n {"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}\n )\n\n result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])\ndef test_shift(fill_value):\n ct = Categorical(\n ["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False\n )\n expected = Categorical(\n [None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False\n )\n res = ct.shift(1, fill_value=fill_value)\n tm.assert_equal(res, expected)\n\n\n@pytest.fixture\ndef df_cat(df):\n """\n DataFrame with multiple categorical columns and a column of integers.\n Shortened so as not to contain all possible combinations of categories.\n Useful for testing `observed` kwarg functionality on GroupBy objects.\n\n Parameters\n ----------\n df: DataFrame\n Non-categorical, longer DataFrame from another fixture, used to derive\n this one\n\n Returns\n -------\n df_cat: DataFrame\n """\n df_cat = df.copy()[:4] # leave out some groups\n df_cat["A"] = df_cat["A"].astype("category")\n df_cat["B"] = df_cat["B"].astype("category")\n df_cat["C"] = Series([1, 2, 3, 4])\n df_cat = df_cat.drop(["D"], axis=1)\n return df_cat\n\n\n@pytest.mark.parametrize("operation", ["agg", "apply"])\ndef test_seriesgroupby_observed_true(df_cat, operation):\n # GH#24880\n # GH#49223 - order of results was wrong when grouping by index levels\n lev_a = Index(["bar", "bar", "foo", "foo"], dtype=df_cat["A"].dtype, name="A")\n lev_b = Index(["one", "three", "one", "two"], dtype=df_cat["B"].dtype, name="B")\n index = MultiIndex.from_arrays([lev_a, lev_b])\n expected = Series(data=[2, 4, 1, 3], index=index, name="C").sort_index()\n\n grouped = df_cat.groupby(["A", "B"], observed=True)["C"]\n msg = "using np.sum" if operation == "apply" else "using SeriesGroupBy.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result = getattr(grouped, operation)(sum)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("operation", ["agg", "apply"])\n@pytest.mark.parametrize("observed", [False, None])\ndef test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):\n # GH 24880\n # GH#49223 - order of results was wrong when grouping by index levels\n index, _ = MultiIndex.from_product(\n [\n CategoricalIndex(["bar", "foo"], ordered=False),\n CategoricalIndex(["one", "three", "two"], ordered=False),\n ],\n names=["A", "B"],\n ).sortlevel()\n\n expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")\n if operation == "agg":\n msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = expected.fillna(0, downcast="infer")\n grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]\n msg = "using SeriesGroupBy.sum" if operation == "agg" else "using np.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result = getattr(grouped, operation)(sum)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "observed, index, data",\n [\n (\n True,\n MultiIndex.from_arrays(\n [\n Index(["bar"] * 4 + ["foo"] * 4, dtype="category", name="A"),\n Index(\n ["one", "one", "three", "three", "one", "one", "two", "two"],\n dtype="category",\n name="B",\n ),\n Index(["min", "max"] * 4),\n ]\n ),\n [2, 2, 4, 4, 1, 1, 3, 3],\n ),\n (\n False,\n MultiIndex.from_product(\n [\n CategoricalIndex(["bar", "foo"], ordered=False),\n CategoricalIndex(["one", "three", "two"], ordered=False),\n Index(["min", "max"]),\n ],\n names=["A", "B", None],\n ),\n [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],\n ),\n (\n None,\n MultiIndex.from_product(\n [\n CategoricalIndex(["bar", "foo"], ordered=False),\n CategoricalIndex(["one", "three", "two"], ordered=False),\n Index(["min", "max"]),\n ],\n names=["A", "B", None],\n ),\n [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],\n ),\n ],\n)\ndef test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):\n # GH 24880\n expected = Series(data=data, index=index, name="C")\n result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(\n lambda x: {"min": x.min(), "max": x.max()}\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_categorical_series_dataframe_consistent(df_cat):\n # GH 20416\n expected = df_cat.groupby(["A", "B"], observed=False)["C"].mean()\n result = df_cat.groupby(["A", "B"], observed=False).mean()["C"]\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])\ndef test_groupby_categorical_axis_1(code):\n # GH 13420\n df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})\n cat = Categorical.from_codes(code, categories=list("abc"))\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby(cat, axis=1, observed=False)\n result = gb.mean()\n msg = "The 'axis' keyword in DataFrame.groupby is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb2 = df.T.groupby(cat, axis=0, observed=False)\n expected = gb2.mean().T\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_cat_preserves_structure(observed, ordered):\n # GH 28787\n df = DataFrame(\n {"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},\n columns=["Name", "Item"],\n )\n expected = df.copy()\n\n result = (\n df.groupby("Name", observed=observed)\n .agg(DataFrame.sum, skipna=True)\n .reset_index()\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_get_nonexistent_category():\n # Accessing a Category that is not in the dataframe\n df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)})\n with pytest.raises(KeyError, match="'vau'"):\n df.groupby("var").apply(\n lambda rows: DataFrame(\n {"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]}\n )\n )\n\n\ndef test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed):\n # GH 17605\n if reduction_func == "ngroup":\n pytest.skip("ngroup is not truly a reduction")\n\n df = DataFrame(\n {\n "cat_1": Categorical(list("AABB"), categories=list("ABCD")),\n "cat_2": Categorical(list("AB") * 2, categories=list("ABCD")),\n "value": [0.1] * 4,\n }\n )\n args = get_groupby_method_args(reduction_func, df)\n\n expected_length = 4 if observed else 16\n\n series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"]\n\n if reduction_func == "corrwith":\n # TODO: implemented SeriesGroupBy.corrwith. See GH 32293\n assert not hasattr(series_groupby, reduction_func)\n return\n\n agg = getattr(series_groupby, reduction_func)\n\n if not observed and reduction_func in ["idxmin", "idxmax"]:\n # idxmin and idxmax are designed to fail on empty inputs\n with pytest.raises(\n ValueError, match="empty group due to unobserved categories"\n ):\n agg(*args)\n return\n\n result = agg(*args)\n\n assert len(result) == expected_length\n\n\ndef test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(\n reduction_func, request\n):\n # GH 17605\n # Tests whether the unobserved categories in the result contain 0 or NaN\n\n if reduction_func == "ngroup":\n pytest.skip("ngroup is not truly a reduction")\n\n if reduction_func == "corrwith": # GH 32293\n mark = pytest.mark.xfail(\n reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"\n )\n request.applymarker(mark)\n\n df = DataFrame(\n {\n "cat_1": Categorical(list("AABB"), categories=list("ABC")),\n "cat_2": Categorical(list("AB") * 2, categories=list("ABC")),\n "value": [0.1] * 4,\n }\n )\n unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]\n args = get_groupby_method_args(reduction_func, df)\n\n series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]\n agg = getattr(series_groupby, reduction_func)\n\n if reduction_func in ["idxmin", "idxmax"]:\n # idxmin and idxmax are designed to fail on empty inputs\n with pytest.raises(\n ValueError, match="empty group due to unobserved categories"\n ):\n agg(*args)\n return\n\n result = agg(*args)\n\n zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func]\n\n for idx in unobserved:\n val = result.loc[idx]\n assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)\n\n # If we expect unobserved values to be zero, we also expect the dtype to be int.\n # Except for .sum(). If the observed categories sum to dtype=float (i.e. their\n # sums have decimals), then the zeros for the missing categories should also be\n # floats.\n if zero_or_nan == 0 and reduction_func != "sum":\n assert np.issubdtype(result.dtype, np.integer)\n\n\ndef test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):\n # GH 23865\n # GH 27075\n # Ensure that df.groupby, when 'by' is two Categorical variables,\n # does not return the categories that are not in df when observed=True\n if reduction_func == "ngroup":\n pytest.skip("ngroup does not return the Categories on the index")\n\n df = DataFrame(\n {\n "cat_1": Categorical(list("AABB"), categories=list("ABC")),\n "cat_2": Categorical(list("1111"), categories=list("12")),\n "value": [0.1, 0.1, 0.1, 0.1],\n }\n )\n unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]\n\n df_grp = df.groupby(["cat_1", "cat_2"], observed=True)\n\n args = get_groupby_method_args(reduction_func, df)\n res = getattr(df_grp, reduction_func)(*args)\n\n for cat in unobserved_cats:\n assert cat not in res.index\n\n\n@pytest.mark.parametrize("observed", [False, None])\ndef test_dataframe_groupby_on_2_categoricals_when_observed_is_false(\n reduction_func, observed\n):\n # GH 23865\n # GH 27075\n # Ensure that df.groupby, when 'by' is two Categorical variables,\n # returns the categories that are not in df when observed=False/None\n\n if reduction_func == "ngroup":\n pytest.skip("ngroup does not return the Categories on the index")\n\n df = DataFrame(\n {\n "cat_1": Categorical(list("AABB"), categories=list("ABC")),\n "cat_2": Categorical(list("1111"), categories=list("12")),\n "value": [0.1, 0.1, 0.1, 0.1],\n }\n )\n unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]\n\n df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)\n\n args = get_groupby_method_args(reduction_func, df)\n\n if not observed and reduction_func in ["idxmin", "idxmax"]:\n # idxmin and idxmax are designed to fail on empty inputs\n with pytest.raises(\n ValueError, match="empty group due to unobserved categories"\n ):\n getattr(df_grp, reduction_func)(*args)\n return\n\n res = getattr(df_grp, reduction_func)(*args)\n\n expected = _results_for_groupbys_with_missing_categories[reduction_func]\n\n if expected is np.nan:\n assert res.loc[unobserved_cats].isnull().all().all()\n else:\n assert (res.loc[unobserved_cats] == expected).all().all()\n\n\n@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning")\ndef test_series_groupby_categorical_aggregation_getitem():\n # GH 8870\n d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]}\n df = DataFrame(d)\n cat = pd.cut(df["foo"], np.linspace(0, 20, 5))\n df["range"] = cat\n groups = df.groupby(["range", "baz"], as_index=True, sort=True, observed=False)\n result = groups["foo"].agg("mean")\n expected = groups.agg("mean")["foo"]\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func, expected_values",\n [(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])],\n)\ndef test_groupby_agg_categorical_columns(func, expected_values):\n # 31256\n df = DataFrame(\n {\n "id": [0, 1, 2, 3, 4],\n "groups": [0, 1, 1, 2, 2],\n "value": Categorical([0, 0, 0, 0, 1]),\n }\n ).set_index("id")\n result = df.groupby("groups").agg(func)\n\n expected = DataFrame(\n {"value": expected_values}, index=Index([0, 1, 2], name="groups")\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_agg_non_numeric():\n df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])})\n expected = DataFrame({"A": [2, 1]}, index=np.array([1, 2]))\n\n result = df.groupby([1, 2, 1]).agg(Series.nunique)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby([1, 2, 1]).nunique()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["first", "last"])\ndef test_groupby_first_returned_categorical_instead_of_dataframe(func):\n # GH 28641: groupby drops index, when grouping over categorical column with\n # first/last. Renamed Categorical instead of DataFrame previously.\n df = DataFrame({"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()})\n df_grouped = df.groupby("A")["B"]\n result = getattr(df_grouped, func)()\n\n # ordered categorical dtype should be preserved\n expected = Series(\n ["b"], index=Index([1997], name="A"), name="B", dtype=df["B"].dtype\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_read_only_category_no_sort():\n # GH33410\n cats = np.array([1, 2])\n cats.flags.writeable = False\n df = DataFrame(\n {"a": [1, 3, 5, 7], "b": Categorical([1, 1, 2, 2], categories=Index(cats))}\n )\n expected = DataFrame(data={"a": [2.0, 6.0]}, index=CategoricalIndex(cats, name="b"))\n result = df.groupby("b", sort=False, observed=False).mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_sorted_missing_category_values():\n # GH 28597\n df = DataFrame(\n {\n "foo": [\n "small",\n "large",\n "large",\n "large",\n "medium",\n "large",\n "large",\n "medium",\n ],\n "bar": ["C", "A", "A", "C", "A", "C", "A", "C"],\n }\n )\n df["foo"] = (\n df["foo"]\n .astype("category")\n .cat.set_categories(["tiny", "small", "medium", "large"], ordered=True)\n )\n\n expected = DataFrame(\n {\n "tiny": {"A": 0, "C": 0},\n "small": {"A": 0, "C": 1},\n "medium": {"A": 1, "C": 1},\n "large": {"A": 3, "C": 2},\n }\n )\n expected = expected.rename_axis("bar", axis="index")\n expected.columns = CategoricalIndex(\n ["tiny", "small", "medium", "large"],\n categories=["tiny", "small", "medium", "large"],\n ordered=True,\n name="foo",\n dtype="category",\n )\n\n result = df.groupby(["bar", "foo"], observed=False).size().unstack()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_cython_category_not_implemented_fallback():\n # https://github.com/pandas-dev/pandas/issues/31450\n df = DataFrame({"col_num": [1, 1, 2, 3]})\n df["col_cat"] = df["col_num"].astype("category")\n\n result = df.groupby("col_num").col_cat.first()\n\n # ordered categorical dtype should definitely be preserved;\n # this is unordered, so is less-clear case (if anything, it should raise)\n expected = Series(\n [1, 2, 3],\n index=Index([1, 2, 3], name="col_num"),\n name="col_cat",\n dtype=df["col_cat"].dtype,\n )\n tm.assert_series_equal(result, expected)\n\n result = df.groupby("col_num").agg({"col_cat": "first"})\n expected = expected.to_frame()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_categorical_with_isnan():\n # GH 29837\n df = DataFrame(\n {\n "A": [1, 1, 1, 1],\n "B": [1, 2, 1, 2],\n "numerical_col": [0.1, 0.2, np.nan, 0.3],\n "object_col": ["foo", "bar", "foo", "fee"],\n "categorical_col": ["foo", "bar", "foo", "fee"],\n }\n )\n\n df = df.astype({"categorical_col": "category"})\n\n result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum())\n index = MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B"))\n expected = DataFrame(\n data={\n "numerical_col": [1, 0],\n "object_col": [0, 0],\n "categorical_col": [0, 0],\n },\n index=index,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_transform():\n # GH 29037\n df = DataFrame(\n {\n "package_id": [1, 1, 1, 2, 2, 3],\n "status": [\n "Waiting",\n "OnTheWay",\n "Delivered",\n "Waiting",\n "OnTheWay",\n "Waiting",\n ],\n }\n )\n\n delivery_status_type = pd.CategoricalDtype(\n categories=["Waiting", "OnTheWay", "Delivered"], ordered=True\n )\n df["status"] = df["status"].astype(delivery_status_type)\n msg = "using SeriesGroupBy.max"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n df["last_status"] = df.groupby("package_id")["status"].transform(max)\n result = df.copy()\n\n expected = DataFrame(\n {\n "package_id": [1, 1, 1, 2, 2, 3],\n "status": [\n "Waiting",\n "OnTheWay",\n "Delivered",\n "Waiting",\n "OnTheWay",\n "Waiting",\n ],\n "last_status": [\n "Delivered",\n "Delivered",\n "Delivered",\n "OnTheWay",\n "OnTheWay",\n "Waiting",\n ],\n }\n )\n\n expected["status"] = expected["status"].astype(delivery_status_type)\n\n # .transform(max) should preserve ordered categoricals\n expected["last_status"] = expected["last_status"].astype(delivery_status_type)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["first", "last"])\ndef test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals(\n func: str, observed: bool\n):\n # GH 34951\n cat = Categorical([0, 0, 1, 1])\n val = [0, 1, 1, 0]\n df = DataFrame({"a": cat, "b": cat, "c": val})\n\n cat2 = Categorical([0, 1])\n idx = MultiIndex.from_product([cat2, cat2], names=["a", "b"])\n expected_dict = {\n "first": Series([0, np.nan, np.nan, 1], idx, name="c"),\n "last": Series([1, np.nan, np.nan, 0], idx, name="c"),\n }\n\n expected = expected_dict[func]\n if observed:\n expected = expected.dropna().astype(np.int64)\n\n srs_grp = df.groupby(["a", "b"], observed=observed)["c"]\n result = getattr(srs_grp, func)()\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["first", "last"])\ndef test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals(\n func: str, observed: bool\n):\n # GH 34951\n cat = Categorical([0, 0, 1, 1])\n val = [0, 1, 1, 0]\n df = DataFrame({"a": cat, "b": cat, "c": val})\n\n cat2 = Categorical([0, 1])\n idx = MultiIndex.from_product([cat2, cat2], names=["a", "b"])\n expected_dict = {\n "first": Series([0, np.nan, np.nan, 1], idx, name="c"),\n "last": Series([1, np.nan, np.nan, 0], idx, name="c"),\n }\n\n expected = expected_dict[func].to_frame()\n if observed:\n expected = expected.dropna().astype(np.int64)\n\n df_grp = df.groupby(["a", "b"], observed=observed)\n result = getattr(df_grp, func)()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_categorical_indices_unused_categories():\n # GH#38642\n df = DataFrame(\n {\n "key": Categorical(["b", "b", "a"], categories=["a", "b", "c"]),\n "col": range(3),\n }\n )\n grouped = df.groupby("key", sort=False, observed=False)\n result = grouped.indices\n expected = {\n "b": np.array([0, 1], dtype="intp"),\n "a": np.array([2], dtype="intp"),\n "c": np.array([], dtype="intp"),\n }\n assert result.keys() == expected.keys()\n for key in result.keys():\n tm.assert_numpy_array_equal(result[key], expected[key])\n\n\n@pytest.mark.parametrize("func", ["first", "last"])\ndef test_groupby_last_first_preserve_categoricaldtype(func):\n # GH#33090\n df = DataFrame({"a": [1, 2, 3]})\n df["b"] = df["a"].astype("category")\n result = getattr(df.groupby("a")["b"], func)()\n expected = Series(\n Categorical([1, 2, 3]), name="b", index=Index([1, 2, 3], name="a")\n )\n tm.assert_series_equal(expected, result)\n\n\ndef test_groupby_categorical_observed_nunique():\n # GH#45128\n df = DataFrame({"a": [1, 2], "b": [1, 2], "c": [10, 11]})\n df = df.astype(dtype={"a": "category", "b": "category"})\n result = df.groupby(["a", "b"], observed=True).nunique()["c"]\n expected = Series(\n [1, 1],\n index=MultiIndex.from_arrays(\n [CategoricalIndex([1, 2], name="a"), CategoricalIndex([1, 2], name="b")]\n ),\n name="c",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_categorical_aggregate_functions():\n # GH#37275\n dtype = pd.CategoricalDtype(categories=["small", "big"], ordered=True)\n df = DataFrame(\n [[1, "small"], [1, "big"], [2, "small"]], columns=["grp", "description"]\n ).astype({"description": dtype})\n\n result = df.groupby("grp")["description"].max()\n expected = Series(\n ["big", "small"],\n index=Index([1, 2], name="grp"),\n name="description",\n dtype=pd.CategoricalDtype(categories=["small", "big"], ordered=True),\n )\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_categorical_dropna(observed, dropna):\n # GH#48645 - dropna should have no impact on the result when there are no NA values\n cat = Categorical([1, 2], categories=[1, 2, 3])\n df = DataFrame({"x": Categorical([1, 2], categories=[1, 2, 3]), "y": [3, 4]})\n gb = df.groupby("x", observed=observed, dropna=dropna)\n result = gb.sum()\n\n if observed:\n expected = DataFrame({"y": [3, 4]}, index=cat)\n else:\n index = CategoricalIndex([1, 2, 3], [1, 2, 3])\n expected = DataFrame({"y": [3, 4, 0]}, index=index)\n expected.index.name = "x"\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("index_kind", ["range", "single", "multi"])\n@pytest.mark.parametrize("ordered", [True, False])\ndef test_category_order_reducer(\n request, as_index, sort, observed, reduction_func, index_kind, ordered\n):\n # GH#48749\n if reduction_func == "corrwith" and not as_index:\n msg = "GH#49950 - corrwith with as_index=False may not have grouping column"\n request.applymarker(pytest.mark.xfail(reason=msg))\n elif index_kind != "range" and not as_index:\n pytest.skip(reason="Result doesn't have categories, nothing to test")\n df = DataFrame(\n {\n "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),\n "b": range(4),\n }\n )\n if index_kind == "range":\n keys = ["a"]\n elif index_kind == "single":\n keys = ["a"]\n df = df.set_index(keys)\n elif index_kind == "multi":\n keys = ["a", "a2"]\n df["a2"] = df["a"]\n df = df.set_index(keys)\n args = get_groupby_method_args(reduction_func, df)\n gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)\n\n if not observed and reduction_func in ["idxmin", "idxmax"]:\n # idxmin and idxmax are designed to fail on empty inputs\n with pytest.raises(\n ValueError, match="empty group due to unobserved categories"\n ):\n getattr(gb, reduction_func)(*args)\n return\n\n op_result = getattr(gb, reduction_func)(*args)\n if as_index:\n result = op_result.index.get_level_values("a").categories\n else:\n result = op_result["a"].cat.categories\n expected = Index([1, 4, 3, 2])\n tm.assert_index_equal(result, expected)\n\n if index_kind == "multi":\n result = op_result.index.get_level_values("a2").categories\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize("index_kind", ["single", "multi"])\n@pytest.mark.parametrize("ordered", [True, False])\ndef test_category_order_transformer(\n as_index, sort, observed, transformation_func, index_kind, ordered\n):\n # GH#48749\n df = DataFrame(\n {\n "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),\n "b": range(4),\n }\n )\n if index_kind == "single":\n keys = ["a"]\n df = df.set_index(keys)\n elif index_kind == "multi":\n keys = ["a", "a2"]\n df["a2"] = df["a"]\n df = df.set_index(keys)\n args = get_groupby_method_args(transformation_func, df)\n gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)\n warn = FutureWarning if transformation_func == "fillna" else None\n msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n op_result = getattr(gb, transformation_func)(*args)\n result = op_result.index.get_level_values("a").categories\n expected = Index([1, 4, 3, 2])\n tm.assert_index_equal(result, expected)\n\n if index_kind == "multi":\n result = op_result.index.get_level_values("a2").categories\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize("index_kind", ["range", "single", "multi"])\n@pytest.mark.parametrize("method", ["head", "tail"])\n@pytest.mark.parametrize("ordered", [True, False])\ndef test_category_order_head_tail(\n as_index, sort, observed, method, index_kind, ordered\n):\n # GH#48749\n df = DataFrame(\n {\n "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),\n "b": range(4),\n }\n )\n if index_kind == "range":\n keys = ["a"]\n elif index_kind == "single":\n keys = ["a"]\n df = df.set_index(keys)\n elif index_kind == "multi":\n keys = ["a", "a2"]\n df["a2"] = df["a"]\n df = df.set_index(keys)\n gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)\n op_result = getattr(gb, method)()\n if index_kind == "range":\n result = op_result["a"].cat.categories\n else:\n result = op_result.index.get_level_values("a").categories\n expected = Index([1, 4, 3, 2])\n tm.assert_index_equal(result, expected)\n\n if index_kind == "multi":\n result = op_result.index.get_level_values("a2").categories\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize("index_kind", ["range", "single", "multi"])\n@pytest.mark.parametrize("method", ["apply", "agg", "transform"])\n@pytest.mark.parametrize("ordered", [True, False])\ndef test_category_order_apply(as_index, sort, observed, method, index_kind, ordered):\n # GH#48749\n if (method == "transform" and index_kind == "range") or (\n not as_index and index_kind != "range"\n ):\n pytest.skip("No categories in result, nothing to test")\n df = DataFrame(\n {\n "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),\n "b": range(4),\n }\n )\n if index_kind == "range":\n keys = ["a"]\n elif index_kind == "single":\n keys = ["a"]\n df = df.set_index(keys)\n elif index_kind == "multi":\n keys = ["a", "a2"]\n df["a2"] = df["a"]\n df = df.set_index(keys)\n gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)\n warn = FutureWarning if method == "apply" and index_kind == "range" else None\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(warn, match=msg):\n op_result = getattr(gb, method)(lambda x: x.sum(numeric_only=True))\n if (method == "transform" or not as_index) and index_kind == "range":\n result = op_result["a"].cat.categories\n else:\n result = op_result.index.get_level_values("a").categories\n expected = Index([1, 4, 3, 2])\n tm.assert_index_equal(result, expected)\n\n if index_kind == "multi":\n result = op_result.index.get_level_values("a2").categories\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize("index_kind", ["range", "single", "multi"])\ndef test_many_categories(as_index, sort, index_kind, ordered):\n # GH#48749 - Test when the grouper has many categories\n if index_kind != "range" and not as_index:\n pytest.skip(reason="Result doesn't have categories, nothing to test")\n categories = np.arange(9999, -1, -1)\n grouper = Categorical([2, 1, 2, 3], categories=categories, ordered=ordered)\n df = DataFrame({"a": grouper, "b": range(4)})\n if index_kind == "range":\n keys = ["a"]\n elif index_kind == "single":\n keys = ["a"]\n df = df.set_index(keys)\n elif index_kind == "multi":\n keys = ["a", "a2"]\n df["a2"] = df["a"]\n df = df.set_index(keys)\n gb = df.groupby(keys, as_index=as_index, sort=sort, observed=True)\n result = gb.sum()\n\n # Test is setup so that data and index are the same values\n data = [3, 2, 1] if sort else [2, 1, 3]\n\n index = CategoricalIndex(\n data, categories=grouper.categories, ordered=ordered, name="a"\n )\n if as_index:\n expected = DataFrame({"b": data})\n if index_kind == "multi":\n expected.index = MultiIndex.from_frame(DataFrame({"a": index, "a2": index}))\n else:\n expected.index = index\n elif index_kind == "multi":\n expected = DataFrame({"a": Series(index), "a2": Series(index), "b": data})\n else:\n expected = DataFrame({"a": Series(index), "b": data})\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("cat_columns", ["a", "b", ["a", "b"]])\n@pytest.mark.parametrize("keys", ["a", "b", ["a", "b"]])\ndef test_groupby_default_depr(cat_columns, keys):\n # GH#43999\n df = DataFrame({"a": [1, 1, 2, 3], "b": [4, 5, 6, 7]})\n df[cat_columns] = df[cat_columns].astype("category")\n msg = "The default of observed=False is deprecated"\n klass = FutureWarning if set(cat_columns) & set(keys) else None\n with tm.assert_produces_warning(klass, match=msg):\n df.groupby(keys)\n\n\n@pytest.mark.parametrize("test_series", [True, False])\n@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])\ndef test_agg_list(request, as_index, observed, reduction_func, test_series, keys):\n # GH#52760\n if test_series and reduction_func == "corrwith":\n assert not hasattr(SeriesGroupBy, "corrwith")\n pytest.skip("corrwith not implemented for SeriesGroupBy")\n elif reduction_func == "corrwith":\n msg = "GH#32293: attempts to call SeriesGroupBy.corrwith"\n request.applymarker(pytest.mark.xfail(reason=msg))\n elif (\n reduction_func == "nunique"\n and not test_series\n and len(keys) != 1\n and not observed\n and not as_index\n ):\n msg = "GH#52848 - raises a ValueError"\n request.applymarker(pytest.mark.xfail(reason=msg))\n\n df = DataFrame({"a1": [0, 0, 1], "a2": [2, 3, 3], "b": [4, 5, 6]})\n df = df.astype({"a1": "category", "a2": "category"})\n if "a2" not in keys:\n df = df.drop(columns="a2")\n gb = df.groupby(by=keys, as_index=as_index, observed=observed)\n if test_series:\n gb = gb["b"]\n args = get_groupby_method_args(reduction_func, df)\n\n if not observed and reduction_func in ["idxmin", "idxmax"] and keys == ["a1", "a2"]:\n with pytest.raises(\n ValueError, match="empty group due to unobserved categories"\n ):\n gb.agg([reduction_func], *args)\n return\n\n result = gb.agg([reduction_func], *args)\n expected = getattr(gb, reduction_func)(*args)\n\n if as_index and (test_series or reduction_func == "size"):\n expected = expected.to_frame(reduction_func)\n if not test_series:\n expected.columns = MultiIndex.from_tuples(\n [(ind, "") for ind in expected.columns[:-1]] + [("b", reduction_func)]\n )\n elif not as_index:\n expected.columns = keys + [reduction_func]\n\n tm.assert_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_categorical.py | test_categorical.py | Python | 74,727 | 0.75 | 0.074908 | 0.09695 | node-utils | 893 | 2023-11-23T09:16:07.927990 | MIT | true | 0656820289f82c506d7a2045e3514b65 |
from itertools import product\nfrom string import ascii_lowercase\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Period,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestCounting:\n def test_cumcount(self):\n df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"])\n g = df.groupby("A")\n sg = g.A\n\n expected = Series([0, 1, 2, 0, 3])\n\n tm.assert_series_equal(expected, g.cumcount())\n tm.assert_series_equal(expected, sg.cumcount())\n\n def test_cumcount_empty(self):\n ge = DataFrame().groupby(level=0)\n se = Series(dtype=object).groupby(level=0)\n\n # edge case, as this is usually considered float\n e = Series(dtype="int64")\n\n tm.assert_series_equal(e, ge.cumcount())\n tm.assert_series_equal(e, se.cumcount())\n\n def test_cumcount_dupe_index(self):\n df = DataFrame(\n [["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5\n )\n g = df.groupby("A")\n sg = g.A\n\n expected = Series([0, 1, 2, 0, 3], index=[0] * 5)\n\n tm.assert_series_equal(expected, g.cumcount())\n tm.assert_series_equal(expected, sg.cumcount())\n\n def test_cumcount_mi(self):\n mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])\n df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=mi)\n g = df.groupby("A")\n sg = g.A\n\n expected = Series([0, 1, 2, 0, 3], index=mi)\n\n tm.assert_series_equal(expected, g.cumcount())\n tm.assert_series_equal(expected, sg.cumcount())\n\n def test_cumcount_groupby_not_col(self):\n df = DataFrame(\n [["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5\n )\n g = df.groupby([0, 0, 0, 1, 0])\n sg = g.A\n\n expected = Series([0, 1, 2, 0, 3], index=[0] * 5)\n\n tm.assert_series_equal(expected, g.cumcount())\n tm.assert_series_equal(expected, sg.cumcount())\n\n def test_ngroup(self):\n df = DataFrame({"A": list("aaaba")})\n g = df.groupby("A")\n sg = g.A\n\n expected = Series([0, 0, 0, 1, 0])\n\n tm.assert_series_equal(expected, g.ngroup())\n tm.assert_series_equal(expected, sg.ngroup())\n\n def test_ngroup_distinct(self):\n df = DataFrame({"A": list("abcde")})\n g = df.groupby("A")\n sg = g.A\n\n expected = Series(range(5), dtype="int64")\n\n tm.assert_series_equal(expected, g.ngroup())\n tm.assert_series_equal(expected, sg.ngroup())\n\n def test_ngroup_one_group(self):\n df = DataFrame({"A": [0] * 5})\n g = df.groupby("A")\n sg = g.A\n\n expected = Series([0] * 5)\n\n tm.assert_series_equal(expected, g.ngroup())\n tm.assert_series_equal(expected, sg.ngroup())\n\n def test_ngroup_empty(self):\n ge = DataFrame().groupby(level=0)\n se = Series(dtype=object).groupby(level=0)\n\n # edge case, as this is usually considered float\n e = Series(dtype="int64")\n\n tm.assert_series_equal(e, ge.ngroup())\n tm.assert_series_equal(e, se.ngroup())\n\n def test_ngroup_series_matches_frame(self):\n df = DataFrame({"A": list("aaaba")})\n s = Series(list("aaaba"))\n\n tm.assert_series_equal(df.groupby(s).ngroup(), s.groupby(s).ngroup())\n\n def test_ngroup_dupe_index(self):\n df = DataFrame({"A": list("aaaba")}, index=[0] * 5)\n g = df.groupby("A")\n sg = g.A\n\n expected = Series([0, 0, 0, 1, 0], index=[0] * 5)\n\n tm.assert_series_equal(expected, g.ngroup())\n tm.assert_series_equal(expected, sg.ngroup())\n\n def test_ngroup_mi(self):\n mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])\n df = DataFrame({"A": list("aaaba")}, index=mi)\n g = df.groupby("A")\n sg = g.A\n expected = Series([0, 0, 0, 1, 0], index=mi)\n\n tm.assert_series_equal(expected, g.ngroup())\n tm.assert_series_equal(expected, sg.ngroup())\n\n def test_ngroup_groupby_not_col(self):\n df = DataFrame({"A": list("aaaba")}, index=[0] * 5)\n g = df.groupby([0, 0, 0, 1, 0])\n sg = g.A\n\n expected = Series([0, 0, 0, 1, 0], index=[0] * 5)\n\n tm.assert_series_equal(expected, g.ngroup())\n tm.assert_series_equal(expected, sg.ngroup())\n\n def test_ngroup_descending(self):\n df = DataFrame(["a", "a", "b", "a", "b"], columns=["A"])\n g = df.groupby(["A"])\n\n ascending = Series([0, 0, 1, 0, 1])\n descending = Series([1, 1, 0, 1, 0])\n\n tm.assert_series_equal(descending, (g.ngroups - 1) - ascending)\n tm.assert_series_equal(ascending, g.ngroup(ascending=True))\n tm.assert_series_equal(descending, g.ngroup(ascending=False))\n\n def test_ngroup_matches_cumcount(self):\n # verify one manually-worked out case works\n df = DataFrame(\n [["a", "x"], ["a", "y"], ["b", "x"], ["a", "x"], ["b", "y"]],\n columns=["A", "X"],\n )\n g = df.groupby(["A", "X"])\n g_ngroup = g.ngroup()\n g_cumcount = g.cumcount()\n expected_ngroup = Series([0, 1, 2, 0, 3])\n expected_cumcount = Series([0, 0, 0, 1, 0])\n\n tm.assert_series_equal(g_ngroup, expected_ngroup)\n tm.assert_series_equal(g_cumcount, expected_cumcount)\n\n def test_ngroup_cumcount_pair(self):\n # brute force comparison for all small series\n for p in product(range(3), repeat=4):\n df = DataFrame({"a": p})\n g = df.groupby(["a"])\n\n order = sorted(set(p))\n ngroupd = [order.index(val) for val in p]\n cumcounted = [p[:i].count(val) for i, val in enumerate(p)]\n\n tm.assert_series_equal(g.ngroup(), Series(ngroupd))\n tm.assert_series_equal(g.cumcount(), Series(cumcounted))\n\n def test_ngroup_respects_groupby_order(self, sort):\n df = DataFrame({"a": np.random.default_rng(2).choice(list("abcdef"), 100)})\n g = df.groupby("a", sort=sort)\n df["group_id"] = -1\n df["group_index"] = -1\n\n for i, (_, group) in enumerate(g):\n df.loc[group.index, "group_id"] = i\n for j, ind in enumerate(group.index):\n df.loc[ind, "group_index"] = j\n\n tm.assert_series_equal(Series(df["group_id"].values), g.ngroup())\n tm.assert_series_equal(Series(df["group_index"].values), g.cumcount())\n\n @pytest.mark.parametrize(\n "datetimelike",\n [\n [Timestamp(f"2016-05-{i:02d} 20:09:25+00:00") for i in range(1, 4)],\n [Timestamp(f"2016-05-{i:02d} 20:09:25") for i in range(1, 4)],\n [Timestamp(f"2016-05-{i:02d} 20:09:25", tz="UTC") for i in range(1, 4)],\n [Timedelta(x, unit="h") for x in range(1, 4)],\n [Period(freq="2W", year=2017, month=x) for x in range(1, 4)],\n ],\n )\n def test_count_with_datetimelike(self, datetimelike):\n # test for #13393, where DataframeGroupBy.count() fails\n # when counting a datetimelike column.\n\n df = DataFrame({"x": ["a", "a", "b"], "y": datetimelike})\n res = df.groupby("x").count()\n expected = DataFrame({"y": [2, 1]}, index=["a", "b"])\n expected.index.name = "x"\n tm.assert_frame_equal(expected, res)\n\n def test_count_with_only_nans_in_first_group(self):\n # GH21956\n df = DataFrame({"A": [np.nan, np.nan], "B": ["a", "b"], "C": [1, 2]})\n result = df.groupby(["A", "B"]).C.count()\n mi = MultiIndex(levels=[[], ["a", "b"]], codes=[[], []], names=["A", "B"])\n expected = Series([], index=mi, dtype=np.int64, name="C")\n tm.assert_series_equal(result, expected, check_index_type=False)\n\n def test_count_groupby_column_with_nan_in_groupby_column(self):\n # https://github.com/pandas-dev/pandas/issues/32841\n df = DataFrame({"A": [1, 1, 1, 1, 1], "B": [5, 4, np.nan, 3, 0]})\n res = df.groupby(["B"]).count()\n expected = DataFrame(\n index=Index([0.0, 3.0, 4.0, 5.0], name="B"), data={"A": [1, 1, 1, 1]}\n )\n tm.assert_frame_equal(expected, res)\n\n def test_groupby_count_dateparseerror(self):\n dr = date_range(start="1/1/2012", freq="5min", periods=10)\n\n # BAD Example, datetimes first\n ser = Series(np.arange(10), index=[dr, np.arange(10)])\n grouped = ser.groupby(lambda x: x[1] % 2 == 0)\n result = grouped.count()\n\n ser = Series(np.arange(10), index=[np.arange(10), dr])\n grouped = ser.groupby(lambda x: x[0] % 2 == 0)\n expected = grouped.count()\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_timedelta_cython_count():\n df = DataFrame(\n {"g": list("ab" * 2), "delta": np.arange(4).astype("timedelta64[ns]")}\n )\n expected = Series([2, 2], index=Index(["a", "b"], name="g"), name="delta")\n result = df.groupby("g").delta.count()\n tm.assert_series_equal(expected, result)\n\n\ndef test_count():\n n = 1 << 15\n dr = date_range("2015-08-30", periods=n // 10, freq="min")\n\n df = DataFrame(\n {\n "1st": np.random.default_rng(2).choice(list(ascii_lowercase), n),\n "2nd": np.random.default_rng(2).integers(0, 5, n),\n "3rd": np.random.default_rng(2).standard_normal(n).round(3),\n "4th": np.random.default_rng(2).integers(-10, 10, n),\n "5th": np.random.default_rng(2).choice(dr, n),\n "6th": np.random.default_rng(2).standard_normal(n).round(3),\n "7th": np.random.default_rng(2).standard_normal(n).round(3),\n "8th": np.random.default_rng(2).choice(dr, n)\n - np.random.default_rng(2).choice(dr, 1),\n "9th": np.random.default_rng(2).choice(list(ascii_lowercase), n),\n }\n )\n\n for col in df.columns.drop(["1st", "2nd", "4th"]):\n df.loc[np.random.default_rng(2).choice(n, n // 10), col] = np.nan\n\n df["9th"] = df["9th"].astype("category")\n\n for key in ["1st", "2nd", ["1st", "2nd"]]:\n left = df.groupby(key).count()\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)\n tm.assert_frame_equal(left, right)\n\n\ndef test_count_non_nulls():\n # GH#5610\n # count counts non-nulls\n df = DataFrame(\n [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, np.nan]],\n columns=["A", "B", "C"],\n )\n\n count_as = df.groupby("A").count()\n count_not_as = df.groupby("A", as_index=False).count()\n\n expected = DataFrame([[1, 2], [0, 0]], columns=["B", "C"], index=[1, 3])\n expected.index.name = "A"\n tm.assert_frame_equal(count_not_as, expected.reset_index())\n tm.assert_frame_equal(count_as, expected)\n\n count_B = df.groupby("A")["B"].count()\n tm.assert_series_equal(count_B, expected["B"])\n\n\ndef test_count_object():\n df = DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3})\n result = df.groupby("c").a.count()\n expected = Series([3, 3], index=Index([2, 3], name="c"), name="a")\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3})\n result = df.groupby("c").a.count()\n expected = Series([1, 3], index=Index([2, 3], name="c"), name="a")\n tm.assert_series_equal(result, expected)\n\n\ndef test_count_cross_type():\n # GH8169\n # Set float64 dtype to avoid upcast when setting nan below\n vals = np.hstack(\n (\n np.random.default_rng(2).integers(0, 5, (100, 2)),\n np.random.default_rng(2).integers(0, 2, (100, 2)),\n )\n ).astype("float64")\n\n df = DataFrame(vals, columns=["a", "b", "c", "d"])\n df[df == 2] = np.nan\n expected = df.groupby(["c", "d"]).count()\n\n for t in ["float32", "object"]:\n df["a"] = df["a"].astype(t)\n df["b"] = df["b"].astype(t)\n result = df.groupby(["c", "d"]).count()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_lower_int_prec_count():\n df = DataFrame(\n {\n "a": np.array([0, 1, 2, 100], np.int8),\n "b": np.array([1, 2, 3, 6], np.uint32),\n "c": np.array([4, 5, 6, 8], np.int16),\n "grp": list("ab" * 2),\n }\n )\n result = df.groupby("grp").count()\n expected = DataFrame(\n {"a": [2, 2], "b": [2, 2], "c": [2, 2]}, index=Index(list("ab"), name="grp")\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_count_uses_size_on_exception():\n class RaisingObjectException(Exception):\n pass\n\n class RaisingObject:\n def __init__(self, msg="I will raise inside Cython") -> None:\n super().__init__()\n self.msg = msg\n\n def __eq__(self, other):\n # gets called in Cython to check that raising calls the method\n raise RaisingObjectException(self.msg)\n\n df = DataFrame({"a": [RaisingObject() for _ in range(4)], "grp": list("ab" * 2)})\n result = df.groupby("grp").count()\n expected = DataFrame({"a": [2, 2]}, index=Index(list("ab"), name="grp"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_count_arrow_string_array(any_string_dtype):\n # GH#54751\n pytest.importorskip("pyarrow")\n df = DataFrame(\n {"a": [1, 2, 3], "b": Series(["a", "b", "a"], dtype=any_string_dtype)}\n )\n result = df.groupby("a").count()\n expected = DataFrame({"b": 1}, index=Index([1, 2, 3], name="a"))\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_counting.py | test_counting.py | Python | 13,618 | 0.95 | 0.126904 | 0.04902 | node-utils | 224 | 2024-03-04T09:34:18.269862 | BSD-3-Clause | true | 6d7b6be47330a418a0f0cf38f70a5e07 |
import numpy as np\nimport pytest\n\nfrom pandas.errors import UnsupportedFunctionCall\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\n\n\n@pytest.fixture(\n params=[np.int32, np.int64, np.float32, np.float64, "Int64", "Float64"],\n ids=["np.int32", "np.int64", "np.float32", "np.float64", "Int64", "Float64"],\n)\ndef dtypes_for_minmax(request):\n """\n Fixture of dtypes with min and max values used for testing\n cummin and cummax\n """\n dtype = request.param\n\n np_type = dtype\n if dtype == "Int64":\n np_type = np.int64\n elif dtype == "Float64":\n np_type = np.float64\n\n min_val = (\n np.iinfo(np_type).min\n if np.dtype(np_type).kind == "i"\n else np.finfo(np_type).min\n )\n max_val = (\n np.iinfo(np_type).max\n if np.dtype(np_type).kind == "i"\n else np.finfo(np_type).max\n )\n\n return (dtype, min_val, max_val)\n\n\ndef test_groupby_cumprod():\n # GH 4095\n df = DataFrame({"key": ["b"] * 10, "value": 2})\n\n actual = df.groupby("key")["value"].cumprod()\n expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod())\n expected.name = "value"\n tm.assert_series_equal(actual, expected)\n\n df = DataFrame({"key": ["b"] * 100, "value": 2})\n df["value"] = df["value"].astype(float)\n actual = df.groupby("key")["value"].cumprod()\n expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod())\n expected.name = "value"\n tm.assert_series_equal(actual, expected)\n\n\n@pytest.mark.skip_ubsan\ndef test_groupby_cumprod_overflow():\n # GH#37493 if we overflow we return garbage consistent with numpy\n df = DataFrame({"key": ["b"] * 4, "value": 100_000})\n actual = df.groupby("key")["value"].cumprod()\n expected = Series(\n [100_000, 10_000_000_000, 1_000_000_000_000_000, 7766279631452241920],\n name="value",\n )\n tm.assert_series_equal(actual, expected)\n\n numpy_result = df.groupby("key", group_keys=False)["value"].apply(\n lambda x: x.cumprod()\n )\n numpy_result.name = "value"\n tm.assert_series_equal(actual, numpy_result)\n\n\ndef test_groupby_cumprod_nan_influences_other_columns():\n # GH#48064\n df = DataFrame(\n {\n "a": 1,\n "b": [1, np.nan, 2],\n "c": [1, 2, 3.0],\n }\n )\n result = df.groupby("a").cumprod(numeric_only=True, skipna=False)\n expected = DataFrame({"b": [1, np.nan, np.nan], "c": [1, 2, 6.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_cummin(dtypes_for_minmax):\n dtype = dtypes_for_minmax[0]\n min_val = dtypes_for_minmax[1]\n\n # GH 15048\n base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]})\n expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]\n\n df = base_df.astype(dtype)\n\n expected = DataFrame({"B": expected_mins}).astype(dtype)\n result = df.groupby("A").cummin()\n tm.assert_frame_equal(result, expected)\n result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()\n tm.assert_frame_equal(result, expected)\n\n # Test w/ min value for dtype\n df.loc[[2, 6], "B"] = min_val\n df.loc[[1, 5], "B"] = min_val + 1\n expected.loc[[2, 3, 6, 7], "B"] = min_val\n expected.loc[[1, 5], "B"] = min_val + 1 # should not be rounded to min_val\n result = df.groupby("A").cummin()\n tm.assert_frame_equal(result, expected, check_exact=True)\n expected = (\n df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()\n )\n tm.assert_frame_equal(result, expected, check_exact=True)\n\n # Test nan in some values\n # Explicit cast to float to avoid implicit cast when setting nan\n base_df = base_df.astype({"B": "float"})\n base_df.loc[[0, 2, 4, 6], "B"] = np.nan\n expected = DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]})\n result = base_df.groupby("A").cummin()\n tm.assert_frame_equal(result, expected)\n expected = (\n base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()\n )\n tm.assert_frame_equal(result, expected)\n\n # GH 15561\n df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])})\n expected = Series(pd.to_datetime("2001"), index=[0], name="b")\n\n result = df.groupby("a")["b"].cummin()\n tm.assert_series_equal(expected, result)\n\n # GH 15635\n df = DataFrame({"a": [1, 2, 1], "b": [1, 2, 2]})\n result = df.groupby("a").b.cummin()\n expected = Series([1, 2, 1], name="b")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["cummin", "cummax"])\n@pytest.mark.parametrize("dtype", ["UInt64", "Int64", "Float64", "float", "boolean"])\ndef test_cummin_max_all_nan_column(method, dtype):\n base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})\n base_df["B"] = base_df["B"].astype(dtype)\n grouped = base_df.groupby("A")\n\n expected = DataFrame({"B": [np.nan] * 8}, dtype=dtype)\n result = getattr(grouped, method)()\n tm.assert_frame_equal(expected, result)\n\n result = getattr(grouped["B"], method)().to_frame()\n tm.assert_frame_equal(expected, result)\n\n\ndef test_cummax(dtypes_for_minmax):\n dtype = dtypes_for_minmax[0]\n max_val = dtypes_for_minmax[2]\n\n # GH 15048\n base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]})\n expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]\n\n df = base_df.astype(dtype)\n\n expected = DataFrame({"B": expected_maxs}).astype(dtype)\n result = df.groupby("A").cummax()\n tm.assert_frame_equal(result, expected)\n result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()\n tm.assert_frame_equal(result, expected)\n\n # Test w/ max value for dtype\n df.loc[[2, 6], "B"] = max_val\n expected.loc[[2, 3, 6, 7], "B"] = max_val\n result = df.groupby("A").cummax()\n tm.assert_frame_equal(result, expected)\n expected = (\n df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()\n )\n tm.assert_frame_equal(result, expected)\n\n # Test nan in some values\n # Explicit cast to float to avoid implicit cast when setting nan\n base_df = base_df.astype({"B": "float"})\n base_df.loc[[0, 2, 4, 6], "B"] = np.nan\n expected = DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]})\n result = base_df.groupby("A").cummax()\n tm.assert_frame_equal(result, expected)\n expected = (\n base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()\n )\n tm.assert_frame_equal(result, expected)\n\n # GH 15561\n df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])})\n expected = Series(pd.to_datetime("2001"), index=[0], name="b")\n\n result = df.groupby("a")["b"].cummax()\n tm.assert_series_equal(expected, result)\n\n # GH 15635\n df = DataFrame({"a": [1, 2, 1], "b": [2, 1, 1]})\n result = df.groupby("a").b.cummax()\n expected = Series([2, 1, 2], name="b")\n tm.assert_series_equal(result, expected)\n\n\ndef test_cummax_i8_at_implementation_bound():\n # the minimum value used to be treated as NPY_NAT+1 instead of NPY_NAT\n # for int64 dtype GH#46382\n ser = Series([pd.NaT._value + n for n in range(5)])\n df = DataFrame({"A": 1, "B": ser, "C": ser._values.view("M8[ns]")})\n gb = df.groupby("A")\n\n res = gb.cummax()\n exp = df[["B", "C"]]\n tm.assert_frame_equal(res, exp)\n\n\n@pytest.mark.parametrize("method", ["cummin", "cummax"])\n@pytest.mark.parametrize("dtype", ["float", "Int64", "Float64"])\n@pytest.mark.parametrize(\n "groups,expected_data",\n [\n ([1, 1, 1], [1, None, None]),\n ([1, 2, 3], [1, None, 2]),\n ([1, 3, 3], [1, None, None]),\n ],\n)\ndef test_cummin_max_skipna(method, dtype, groups, expected_data):\n # GH-34047\n df = DataFrame({"a": Series([1, None, 2], dtype=dtype)})\n orig = df.copy()\n gb = df.groupby(groups)["a"]\n\n result = getattr(gb, method)(skipna=False)\n expected = Series(expected_data, dtype=dtype, name="a")\n\n # check we didn't accidentally alter df\n tm.assert_frame_equal(df, orig)\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["cummin", "cummax"])\ndef test_cummin_max_skipna_multiple_cols(method):\n # Ensure missing value in "a" doesn't cause "b" to be nan-filled\n df = DataFrame({"a": [np.nan, 2.0, 2.0], "b": [2.0, 2.0, 2.0]})\n gb = df.groupby([1, 1, 1])[["a", "b"]]\n\n result = getattr(gb, method)(skipna=False)\n expected = DataFrame({"a": [np.nan, np.nan, np.nan], "b": [2.0, 2.0, 2.0]})\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["cumprod", "cumsum"])\ndef test_numpy_compat(func):\n # see gh-12811\n df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})\n g = df.groupby("A")\n\n msg = "numpy operations are not valid with groupby"\n\n with pytest.raises(UnsupportedFunctionCall, match=msg):\n getattr(g, func)(1, 2, 3)\n with pytest.raises(UnsupportedFunctionCall, match=msg):\n getattr(g, func)(foo=1)\n\n\n@td.skip_if_32bit\n@pytest.mark.parametrize("method", ["cummin", "cummax"])\n@pytest.mark.parametrize(\n "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2**53 + 1)]\n)\ndef test_nullable_int_not_cast_as_float(method, dtype, val):\n data = [val, pd.NA]\n df = DataFrame({"grp": [1, 1], "b": data}, dtype=dtype)\n grouped = df.groupby("grp")\n\n result = grouped.transform(method)\n expected = DataFrame({"b": data}, dtype=dtype)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_cython_api2():\n # this takes the fast apply path\n\n # cumsum (GH5614)\n df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])\n expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])\n result = df.groupby("A").cumsum()\n tm.assert_frame_equal(result, expected)\n\n # GH 5755 - cumsum is a transformer and should ignore as_index\n result = df.groupby("A", as_index=False).cumsum()\n tm.assert_frame_equal(result, expected)\n\n # GH 13994\n msg = "DataFrameGroupBy.cumsum with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("A").cumsum(axis=1)\n expected = df.cumsum(axis=1)\n tm.assert_frame_equal(result, expected)\n\n msg = "DataFrameGroupBy.cumprod with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("A").cumprod(axis=1)\n expected = df.cumprod(axis=1)\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_cumulative.py | test_cumulative.py | Python | 10,588 | 0.95 | 0.068966 | 0.098814 | awesome-app | 437 | 2024-01-22T17:00:55.340227 | MIT | true | 6c38de204a289e45f99c4cabaf6b2539 |
from string import ascii_lowercase\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Series,\n Timestamp,\n)\nimport pandas._testing as tm\n\n\ndef test_filter_series():\n s = Series([1, 3, 20, 5, 22, 24, 7])\n expected_odd = Series([1, 3, 5, 7], index=[0, 1, 3, 6])\n expected_even = Series([20, 22, 24], index=[2, 4, 5])\n grouper = s.apply(lambda x: x % 2)\n grouped = s.groupby(grouper)\n tm.assert_series_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)\n tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)\n # Test dropna=False.\n tm.assert_series_equal(\n grouped.filter(lambda x: x.mean() < 10, dropna=False),\n expected_odd.reindex(s.index),\n )\n tm.assert_series_equal(\n grouped.filter(lambda x: x.mean() > 10, dropna=False),\n expected_even.reindex(s.index),\n )\n\n\ndef test_filter_single_column_df():\n df = DataFrame([1, 3, 20, 5, 22, 24, 7])\n expected_odd = DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])\n expected_even = DataFrame([20, 22, 24], index=[2, 4, 5])\n grouper = df[0].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n tm.assert_frame_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)\n tm.assert_frame_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)\n # Test dropna=False.\n tm.assert_frame_equal(\n grouped.filter(lambda x: x.mean() < 10, dropna=False),\n expected_odd.reindex(df.index),\n )\n tm.assert_frame_equal(\n grouped.filter(lambda x: x.mean() > 10, dropna=False),\n expected_even.reindex(df.index),\n )\n\n\ndef test_filter_multi_column_df():\n df = DataFrame({"A": [1, 12, 12, 1], "B": [1, 1, 1, 1]})\n grouper = df["A"].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n expected = DataFrame({"A": [12, 12], "B": [1, 1]}, index=[1, 2])\n tm.assert_frame_equal(\n grouped.filter(lambda x: x["A"].sum() - x["B"].sum() > 10), expected\n )\n\n\ndef test_filter_mixed_df():\n df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})\n grouper = df["A"].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n expected = DataFrame({"A": [12, 12], "B": ["b", "c"]}, index=[1, 2])\n tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 10), expected)\n\n\ndef test_filter_out_all_groups():\n s = Series([1, 3, 20, 5, 22, 24, 7])\n grouper = s.apply(lambda x: x % 2)\n grouped = s.groupby(grouper)\n tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])\n df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})\n grouper = df["A"].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 1000), df.loc[[]])\n\n\ndef test_filter_out_no_groups():\n s = Series([1, 3, 20, 5, 22, 24, 7])\n grouper = s.apply(lambda x: x % 2)\n grouped = s.groupby(grouper)\n filtered = grouped.filter(lambda x: x.mean() > 0)\n tm.assert_series_equal(filtered, s)\n df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})\n grouper = df["A"].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n filtered = grouped.filter(lambda x: x["A"].mean() > 0)\n tm.assert_frame_equal(filtered, df)\n\n\ndef test_filter_out_all_groups_in_df():\n # GH12768\n df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})\n res = df.groupby("a")\n res = res.filter(lambda x: x["b"].sum() > 5, dropna=False)\n expected = DataFrame({"a": [np.nan] * 3, "b": [np.nan] * 3})\n tm.assert_frame_equal(expected, res)\n\n df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})\n res = df.groupby("a")\n res = res.filter(lambda x: x["b"].sum() > 5, dropna=True)\n expected = DataFrame({"a": [], "b": []}, dtype="int64")\n tm.assert_frame_equal(expected, res)\n\n\ndef test_filter_condition_raises():\n def raise_if_sum_is_zero(x):\n if x.sum() == 0:\n raise ValueError\n return x.sum() > 0\n\n s = Series([-1, 0, 1, 2])\n grouper = s.apply(lambda x: x % 2)\n grouped = s.groupby(grouper)\n msg = "the filter must return a boolean result"\n with pytest.raises(TypeError, match=msg):\n grouped.filter(raise_if_sum_is_zero)\n\n\ndef test_filter_with_axis_in_groupby():\n # issue 11041\n index = pd.MultiIndex.from_product([range(10), [0, 1]])\n data = DataFrame(np.arange(100).reshape(-1, 20), columns=index, dtype="int64")\n\n msg = "DataFrame.groupby with axis=1"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = data.groupby(level=0, axis=1)\n result = gb.filter(lambda x: x.iloc[0, 0] > 10)\n expected = data.iloc[:, 12:20]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_filter_bad_shapes():\n df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})\n s = df["B"]\n g_df = df.groupby("B")\n g_s = s.groupby(s)\n\n f = lambda x: x\n msg = "filter function returned a DataFrame, but expected a scalar bool"\n with pytest.raises(TypeError, match=msg):\n g_df.filter(f)\n msg = "the filter must return a boolean result"\n with pytest.raises(TypeError, match=msg):\n g_s.filter(f)\n\n f = lambda x: x == 1\n msg = "filter function returned a DataFrame, but expected a scalar bool"\n with pytest.raises(TypeError, match=msg):\n g_df.filter(f)\n msg = "the filter must return a boolean result"\n with pytest.raises(TypeError, match=msg):\n g_s.filter(f)\n\n f = lambda x: np.outer(x, x)\n msg = "can't multiply sequence by non-int of type 'str'"\n with pytest.raises(TypeError, match=msg):\n g_df.filter(f)\n msg = "the filter must return a boolean result"\n with pytest.raises(TypeError, match=msg):\n g_s.filter(f)\n\n\ndef test_filter_nan_is_false():\n df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})\n s = df["B"]\n g_df = df.groupby(df["B"])\n g_s = s.groupby(s)\n\n f = lambda x: np.nan\n tm.assert_frame_equal(g_df.filter(f), df.loc[[]])\n tm.assert_series_equal(g_s.filter(f), s[[]])\n\n\ndef test_filter_pdna_is_false():\n # in particular, dont raise in filter trying to call bool(pd.NA)\n df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})\n ser = df["B"]\n g_df = df.groupby(df["B"])\n g_s = ser.groupby(ser)\n\n func = lambda x: pd.NA\n res = g_df.filter(func)\n tm.assert_frame_equal(res, df.loc[[]])\n res = g_s.filter(func)\n tm.assert_series_equal(res, ser[[]])\n\n\ndef test_filter_against_workaround_ints():\n # Series of ints\n s = Series(np.random.default_rng(2).integers(0, 100, 100))\n grouper = s.apply(lambda x: np.round(x, -1))\n grouped = s.groupby(grouper)\n f = lambda x: x.mean() > 10\n\n old_way = s[grouped.transform(f).astype("bool")]\n new_way = grouped.filter(f)\n tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())\n\n\ndef test_filter_against_workaround_floats():\n # Series of floats\n s = 100 * Series(np.random.default_rng(2).random(100))\n grouper = s.apply(lambda x: np.round(x, -1))\n grouped = s.groupby(grouper)\n f = lambda x: x.mean() > 10\n old_way = s[grouped.transform(f).astype("bool")]\n new_way = grouped.filter(f)\n tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())\n\n\ndef test_filter_against_workaround_dataframe():\n # Set up DataFrame of ints, floats, strings.\n letters = np.array(list(ascii_lowercase))\n N = 100\n random_letters = letters.take(\n np.random.default_rng(2).integers(0, 26, N, dtype=int)\n )\n df = DataFrame(\n {\n "ints": Series(np.random.default_rng(2).integers(0, 100, N)),\n "floats": N / 10 * Series(np.random.default_rng(2).random(N)),\n "letters": Series(random_letters),\n }\n )\n\n # Group by ints; filter on floats.\n grouped = df.groupby("ints")\n old_way = df[grouped.floats.transform(lambda x: x.mean() > N / 20).astype("bool")]\n new_way = grouped.filter(lambda x: x["floats"].mean() > N / 20)\n tm.assert_frame_equal(new_way, old_way)\n\n # Group by floats (rounded); filter on strings.\n grouper = df.floats.apply(lambda x: np.round(x, -1))\n grouped = df.groupby(grouper)\n old_way = df[grouped.letters.transform(lambda x: len(x) < N / 10).astype("bool")]\n new_way = grouped.filter(lambda x: len(x.letters) < N / 10)\n tm.assert_frame_equal(new_way, old_way)\n\n # Group by strings; filter on ints.\n grouped = df.groupby("letters")\n old_way = df[grouped.ints.transform(lambda x: x.mean() > N / 20).astype("bool")]\n new_way = grouped.filter(lambda x: x["ints"].mean() > N / 20)\n tm.assert_frame_equal(new_way, old_way)\n\n\ndef test_filter_using_len():\n # BUG GH4447\n df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})\n grouped = df.groupby("B")\n actual = grouped.filter(lambda x: len(x) > 2)\n expected = DataFrame(\n {"A": np.arange(2, 6), "B": list("bbbb"), "C": np.arange(2, 6)},\n index=np.arange(2, 6, dtype=np.int64),\n )\n tm.assert_frame_equal(actual, expected)\n\n actual = grouped.filter(lambda x: len(x) > 4)\n expected = df.loc[[]]\n tm.assert_frame_equal(actual, expected)\n\n # Series have always worked properly, but we'll test anyway.\n s = df["B"]\n grouped = s.groupby(s)\n actual = grouped.filter(lambda x: len(x) > 2)\n expected = Series(4 * ["b"], index=np.arange(2, 6, dtype=np.int64), name="B")\n tm.assert_series_equal(actual, expected)\n\n actual = grouped.filter(lambda x: len(x) > 4)\n expected = s[[]]\n tm.assert_series_equal(actual, expected)\n\n\ndef test_filter_maintains_ordering():\n # Simple case: index is sequential. #4621\n df = DataFrame(\n {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}\n )\n s = df["pid"]\n grouped = df.groupby("tag")\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = df.iloc[[1, 2, 4, 7]]\n tm.assert_frame_equal(actual, expected)\n\n grouped = s.groupby(df["tag"])\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = s.iloc[[1, 2, 4, 7]]\n tm.assert_series_equal(actual, expected)\n\n # Now index is sequentially decreasing.\n df.index = np.arange(len(df) - 1, -1, -1)\n s = df["pid"]\n grouped = df.groupby("tag")\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = df.iloc[[1, 2, 4, 7]]\n tm.assert_frame_equal(actual, expected)\n\n grouped = s.groupby(df["tag"])\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = s.iloc[[1, 2, 4, 7]]\n tm.assert_series_equal(actual, expected)\n\n # Index is shuffled.\n SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]\n df.index = df.index[SHUFFLED]\n s = df["pid"]\n grouped = df.groupby("tag")\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = df.iloc[[1, 2, 4, 7]]\n tm.assert_frame_equal(actual, expected)\n\n grouped = s.groupby(df["tag"])\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = s.iloc[[1, 2, 4, 7]]\n tm.assert_series_equal(actual, expected)\n\n\ndef test_filter_multiple_timestamp():\n # GH 10114\n df = DataFrame(\n {\n "A": np.arange(5, dtype="int64"),\n "B": ["foo", "bar", "foo", "bar", "bar"],\n "C": Timestamp("20130101"),\n }\n )\n\n grouped = df.groupby(["B", "C"])\n\n result = grouped["A"].filter(lambda x: True)\n tm.assert_series_equal(df["A"], result)\n\n result = grouped["A"].transform(len)\n expected = Series([2, 3, 2, 3, 3], name="A")\n tm.assert_series_equal(result, expected)\n\n result = grouped.filter(lambda x: True)\n tm.assert_frame_equal(df, result)\n\n result = grouped.transform("sum")\n expected = DataFrame({"A": [2, 8, 2, 8, 8]})\n tm.assert_frame_equal(result, expected)\n\n result = grouped.transform(len)\n expected = DataFrame({"A": [2, 3, 2, 3, 3]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_filter_and_transform_with_non_unique_int_index():\n # GH4620\n index = [1, 1, 1, 2, 1, 1, 0, 1]\n df = DataFrame(\n {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},\n index=index,\n )\n grouped_df = df.groupby("tag")\n ser = df["pid"]\n grouped_ser = ser.groupby(df["tag"])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n tm.assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n # Cast to avoid upcast when setting nan below\n expected = df.copy().astype("float64")\n expected.iloc[[0, 3, 5, 6]] = np.nan\n tm.assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n tm.assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")\n # ^ made manually because this can get confusing!\n tm.assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")\n tm.assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n tm.assert_series_equal(actual, expected)\n\n\ndef test_filter_and_transform_with_multiple_non_unique_int_index():\n # GH4620\n index = [1, 1, 1, 2, 0, 0, 0, 1]\n df = DataFrame(\n {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},\n index=index,\n )\n grouped_df = df.groupby("tag")\n ser = df["pid"]\n grouped_ser = ser.groupby(df["tag"])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n tm.assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n # Cast to avoid upcast when setting nan below\n expected = df.copy().astype("float64")\n expected.iloc[[0, 3, 5, 6]] = np.nan\n tm.assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n tm.assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")\n # ^ made manually because this can get confusing!\n tm.assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")\n tm.assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n tm.assert_series_equal(actual, expected)\n\n\ndef test_filter_and_transform_with_non_unique_float_index():\n # GH4620\n index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)\n df = DataFrame(\n {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},\n index=index,\n )\n grouped_df = df.groupby("tag")\n ser = df["pid"]\n grouped_ser = ser.groupby(df["tag"])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n tm.assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n # Cast to avoid upcast when setting nan below\n expected = df.copy().astype("float64")\n expected.iloc[[0, 3, 5, 6]] = np.nan\n tm.assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n tm.assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")\n # ^ made manually because this can get confusing!\n tm.assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")\n tm.assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n tm.assert_series_equal(actual, expected)\n\n\ndef test_filter_and_transform_with_non_unique_timestamp_index():\n # GH4620\n t0 = Timestamp("2013-09-30 00:05:00")\n t1 = Timestamp("2013-10-30 00:05:00")\n t2 = Timestamp("2013-11-30 00:05:00")\n index = [t1, t1, t1, t2, t1, t1, t0, t1]\n df = DataFrame(\n {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},\n index=index,\n )\n grouped_df = df.groupby("tag")\n ser = df["pid"]\n grouped_ser = ser.groupby(df["tag"])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n tm.assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n # Cast to avoid upcast when setting nan below\n expected = df.copy().astype("float64")\n expected.iloc[[0, 3, 5, 6]] = np.nan\n tm.assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n tm.assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")\n # ^ made manually because this can get confusing!\n tm.assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")\n tm.assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n tm.assert_series_equal(actual, expected)\n\n\ndef test_filter_and_transform_with_non_unique_string_index():\n # GH4620\n index = list("bbbcbbab")\n df = DataFrame(\n {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},\n index=index,\n )\n grouped_df = df.groupby("tag")\n ser = df["pid"]\n grouped_ser = ser.groupby(df["tag"])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n tm.assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n # Cast to avoid upcast when setting nan below\n expected = df.copy().astype("float64")\n expected.iloc[[0, 3, 5, 6]] = np.nan\n tm.assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n tm.assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")\n # ^ made manually because this can get confusing!\n tm.assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")\n tm.assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n tm.assert_series_equal(actual, expected)\n\n\ndef test_filter_has_access_to_grouped_cols():\n df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=["A", "B"])\n g = df.groupby("A")\n # previously didn't have access to col A #????\n filt = g.filter(lambda x: x["A"].sum() == 2)\n tm.assert_frame_equal(filt, df.iloc[[0, 1]])\n\n\ndef test_filter_enforces_scalarness():\n df = DataFrame(\n [\n ["best", "a", "x"],\n ["worst", "b", "y"],\n ["best", "c", "x"],\n ["best", "d", "y"],\n ["worst", "d", "y"],\n ["worst", "d", "y"],\n ["best", "d", "z"],\n ],\n columns=["a", "b", "c"],\n )\n with pytest.raises(TypeError, match="filter function returned a.*"):\n df.groupby("c").filter(lambda g: g["a"] == "best")\n\n\ndef test_filter_non_bool_raises():\n df = DataFrame(\n [\n ["best", "a", 1],\n ["worst", "b", 1],\n ["best", "c", 1],\n ["best", "d", 1],\n ["worst", "d", 1],\n ["worst", "d", 1],\n ["best", "d", 1],\n ],\n columns=["a", "b", "c"],\n )\n with pytest.raises(TypeError, match="filter function returned a.*"):\n df.groupby("a").filter(lambda g: g.c.mean())\n\n\ndef test_filter_dropna_with_empty_groups():\n # GH 10780\n data = Series(np.random.default_rng(2).random(9), index=np.repeat([1, 2, 3], 3))\n grouped = data.groupby(level=0)\n result_false = grouped.filter(lambda x: x.mean() > 1, dropna=False)\n expected_false = Series([np.nan] * 9, index=np.repeat([1, 2, 3], 3))\n tm.assert_series_equal(result_false, expected_false)\n\n result_true = grouped.filter(lambda x: x.mean() > 1, dropna=True)\n expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64)\n tm.assert_series_equal(result_true, expected_true)\n\n\ndef test_filter_consistent_result_before_after_agg_func():\n # GH 17091\n df = DataFrame({"data": range(6), "key": list("ABCABC")})\n grouper = df.groupby("key")\n result = grouper.filter(lambda x: True)\n expected = DataFrame({"data": range(6), "key": list("ABCABC")})\n tm.assert_frame_equal(result, expected)\n\n grouper.sum()\n result = grouper.filter(lambda x: True)\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_filters.py | test_filters.py | Python | 21,870 | 0.95 | 0.053459 | 0.105769 | react-lib | 404 | 2024-11-24T00:20:00.576077 | Apache-2.0 | true | 586a30b9eeb517d7cf60c2c9a4537d7d |
import numpy as np\nimport pytest\n\nfrom pandas.compat.pyarrow import pa_version_under10p1\n\nfrom pandas.core.dtypes.missing import na_value_for_dtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.groupby import get_groupby_method_args\n\n\n@pytest.mark.parametrize(\n "dropna, tuples, outputs",\n [\n (\n True,\n [["A", "B"], ["B", "A"]],\n {"c": [13.0, 123.23], "d": [13.0, 123.0], "e": [13.0, 1.0]},\n ),\n (\n False,\n [["A", "B"], ["A", np.nan], ["B", "A"]],\n {\n "c": [13.0, 12.3, 123.23],\n "d": [13.0, 233.0, 123.0],\n "e": [13.0, 12.0, 1.0],\n },\n ),\n ],\n)\ndef test_groupby_dropna_multi_index_dataframe_nan_in_one_group(\n dropna, tuples, outputs, nulls_fixture\n):\n # GH 3729 this is to test that NA is in one group\n df_list = [\n ["A", "B", 12, 12, 12],\n ["A", nulls_fixture, 12.3, 233.0, 12],\n ["B", "A", 123.23, 123, 1],\n ["A", "B", 1, 1, 1.0],\n ]\n df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])\n grouped = df.groupby(["a", "b"], dropna=dropna).sum()\n\n mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))\n\n # Since right now, by default MI will drop NA from levels when we create MI\n # via `from_*`, so we need to add NA for level manually afterwards.\n if not dropna:\n mi = mi.set_levels(["A", "B", np.nan], level="b")\n expected = pd.DataFrame(outputs, index=mi)\n\n tm.assert_frame_equal(grouped, expected)\n\n\n@pytest.mark.parametrize(\n "dropna, tuples, outputs",\n [\n (\n True,\n [["A", "B"], ["B", "A"]],\n {"c": [12.0, 123.23], "d": [12.0, 123.0], "e": [12.0, 1.0]},\n ),\n (\n False,\n [["A", "B"], ["A", np.nan], ["B", "A"], [np.nan, "B"]],\n {\n "c": [12.0, 13.3, 123.23, 1.0],\n "d": [12.0, 234.0, 123.0, 1.0],\n "e": [12.0, 13.0, 1.0, 1.0],\n },\n ),\n ],\n)\ndef test_groupby_dropna_multi_index_dataframe_nan_in_two_groups(\n dropna, tuples, outputs, nulls_fixture, nulls_fixture2\n):\n # GH 3729 this is to test that NA in different groups with different representations\n df_list = [\n ["A", "B", 12, 12, 12],\n ["A", nulls_fixture, 12.3, 233.0, 12],\n ["B", "A", 123.23, 123, 1],\n [nulls_fixture2, "B", 1, 1, 1.0],\n ["A", nulls_fixture2, 1, 1, 1.0],\n ]\n df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])\n grouped = df.groupby(["a", "b"], dropna=dropna).sum()\n\n mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))\n\n # Since right now, by default MI will drop NA from levels when we create MI\n # via `from_*`, so we need to add NA for level manually afterwards.\n if not dropna:\n mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]])\n expected = pd.DataFrame(outputs, index=mi)\n\n tm.assert_frame_equal(grouped, expected)\n\n\n@pytest.mark.parametrize(\n "dropna, idx, outputs",\n [\n (True, ["A", "B"], {"b": [123.23, 13.0], "c": [123.0, 13.0], "d": [1.0, 13.0]}),\n (\n False,\n ["A", "B", np.nan],\n {\n "b": [123.23, 13.0, 12.3],\n "c": [123.0, 13.0, 233.0],\n "d": [1.0, 13.0, 12.0],\n },\n ),\n ],\n)\ndef test_groupby_dropna_normal_index_dataframe(dropna, idx, outputs):\n # GH 3729\n df_list = [\n ["B", 12, 12, 12],\n [None, 12.3, 233.0, 12],\n ["A", 123.23, 123, 1],\n ["B", 1, 1, 1.0],\n ]\n df = pd.DataFrame(df_list, columns=["a", "b", "c", "d"])\n grouped = df.groupby("a", dropna=dropna).sum()\n\n expected = pd.DataFrame(outputs, index=pd.Index(idx, name="a"))\n\n tm.assert_frame_equal(grouped, expected)\n\n\n@pytest.mark.parametrize(\n "dropna, idx, expected",\n [\n (True, ["a", "a", "b", np.nan], pd.Series([3, 3], index=["a", "b"])),\n (\n False,\n ["a", "a", "b", np.nan],\n pd.Series([3, 3, 3], index=["a", "b", np.nan]),\n ),\n ],\n)\ndef test_groupby_dropna_series_level(dropna, idx, expected):\n ser = pd.Series([1, 2, 3, 3], index=idx)\n\n result = ser.groupby(level=0, dropna=dropna).sum()\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dropna, expected",\n [\n (True, pd.Series([210.0, 350.0], index=["a", "b"], name="Max Speed")),\n (\n False,\n pd.Series([210.0, 350.0, 20.0], index=["a", "b", np.nan], name="Max Speed"),\n ),\n ],\n)\ndef test_groupby_dropna_series_by(dropna, expected):\n ser = pd.Series(\n [390.0, 350.0, 30.0, 20.0],\n index=["Falcon", "Falcon", "Parrot", "Parrot"],\n name="Max Speed",\n )\n\n result = ser.groupby(["a", "b", "a", np.nan], dropna=dropna).mean()\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("dropna", (False, True))\ndef test_grouper_dropna_propagation(dropna):\n # GH 36604\n df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]})\n gb = df.groupby("A", dropna=dropna)\n assert gb._grouper.dropna == dropna\n\n\n@pytest.mark.parametrize(\n "index",\n [\n pd.RangeIndex(0, 4),\n list("abcd"),\n pd.MultiIndex.from_product([(1, 2), ("R", "B")], names=["num", "col"]),\n ],\n)\ndef test_groupby_dataframe_slice_then_transform(dropna, index):\n # GH35014 & GH35612\n expected_data = {"B": [2, 2, 1, np.nan if dropna else 1]}\n\n df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]}, index=index)\n gb = df.groupby("A", dropna=dropna)\n\n result = gb.transform(len)\n expected = pd.DataFrame(expected_data, index=index)\n tm.assert_frame_equal(result, expected)\n\n result = gb[["B"]].transform(len)\n expected = pd.DataFrame(expected_data, index=index)\n tm.assert_frame_equal(result, expected)\n\n result = gb["B"].transform(len)\n expected = pd.Series(expected_data["B"], index=index, name="B")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dropna, tuples, outputs",\n [\n (\n True,\n [["A", "B"], ["B", "A"]],\n {"c": [13.0, 123.23], "d": [12.0, 123.0], "e": [1.0, 1.0]},\n ),\n (\n False,\n [["A", "B"], ["A", np.nan], ["B", "A"]],\n {\n "c": [13.0, 12.3, 123.23],\n "d": [12.0, 233.0, 123.0],\n "e": [1.0, 12.0, 1.0],\n },\n ),\n ],\n)\ndef test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs):\n # GH 3729\n df_list = [\n ["A", "B", 12, 12, 12],\n ["A", None, 12.3, 233.0, 12],\n ["B", "A", 123.23, 123, 1],\n ["A", "B", 1, 1, 1.0],\n ]\n df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])\n agg_dict = {"c": "sum", "d": "max", "e": "min"}\n grouped = df.groupby(["a", "b"], dropna=dropna).agg(agg_dict)\n\n mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))\n\n # Since right now, by default MI will drop NA from levels when we create MI\n # via `from_*`, so we need to add NA for level manually afterwards.\n if not dropna:\n mi = mi.set_levels(["A", "B", np.nan], level="b")\n expected = pd.DataFrame(outputs, index=mi)\n\n tm.assert_frame_equal(grouped, expected)\n\n\n@pytest.mark.arm_slow\n@pytest.mark.parametrize(\n "datetime1, datetime2",\n [\n (pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")),\n (pd.Timedelta("-2 days"), pd.Timedelta("-1 days")),\n (pd.Period("2020-01-01"), pd.Period("2020-02-01")),\n ],\n)\n@pytest.mark.parametrize("dropna, values", [(True, [12, 3]), (False, [12, 3, 6])])\ndef test_groupby_dropna_datetime_like_data(\n dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2\n):\n # 3729\n df = pd.DataFrame(\n {\n "values": [1, 2, 3, 4, 5, 6],\n "dt": [\n datetime1,\n unique_nulls_fixture,\n datetime2,\n unique_nulls_fixture2,\n datetime1,\n datetime1,\n ],\n }\n )\n\n if dropna:\n indexes = [datetime1, datetime2]\n else:\n indexes = [datetime1, datetime2, np.nan]\n\n grouped = df.groupby("dt", dropna=dropna).agg({"values": "sum"})\n expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt"))\n\n tm.assert_frame_equal(grouped, expected)\n\n\n@pytest.mark.parametrize(\n "dropna, data, selected_data, levels",\n [\n pytest.param(\n False,\n {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]},\n {"values": [0, 1, 0, 0]},\n ["a", "b", np.nan],\n id="dropna_false_has_nan",\n ),\n pytest.param(\n True,\n {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]},\n {"values": [0, 1, 0]},\n None,\n id="dropna_true_has_nan",\n ),\n pytest.param(\n # no nan in "groups"; dropna=True|False should be same.\n False,\n {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]},\n {"values": [0, 1, 0, 0]},\n None,\n id="dropna_false_no_nan",\n ),\n pytest.param(\n # no nan in "groups"; dropna=True|False should be same.\n True,\n {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]},\n {"values": [0, 1, 0, 0]},\n None,\n id="dropna_true_no_nan",\n ),\n ],\n)\ndef test_groupby_apply_with_dropna_for_multi_index(dropna, data, selected_data, levels):\n # GH 35889\n\n df = pd.DataFrame(data)\n gb = df.groupby("groups", dropna=dropna)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = gb.apply(lambda grp: pd.DataFrame({"values": range(len(grp))}))\n\n mi_tuples = tuple(zip(data["groups"], selected_data["values"]))\n mi = pd.MultiIndex.from_tuples(mi_tuples, names=["groups", None])\n # Since right now, by default MI will drop NA from levels when we create MI\n # via `from_*`, so we need to add NA for level manually afterwards.\n if not dropna and levels:\n mi = mi.set_levels(levels, level="groups")\n\n expected = pd.DataFrame(selected_data, index=mi)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("input_index", [None, ["a"], ["a", "b"]])\n@pytest.mark.parametrize("keys", [["a"], ["a", "b"]])\n@pytest.mark.parametrize("series", [True, False])\ndef test_groupby_dropna_with_multiindex_input(input_index, keys, series):\n # GH#46783\n obj = pd.DataFrame(\n {\n "a": [1, np.nan],\n "b": [1, 1],\n "c": [2, 3],\n }\n )\n\n expected = obj.set_index(keys)\n if series:\n expected = expected["c"]\n elif input_index == ["a", "b"] and keys == ["a"]:\n # Column b should not be aggregated\n expected = expected[["c"]]\n\n if input_index is not None:\n obj = obj.set_index(input_index)\n gb = obj.groupby(keys, dropna=False)\n if series:\n gb = gb["c"]\n result = gb.sum()\n\n tm.assert_equal(result, expected)\n\n\ndef test_groupby_nan_included():\n # GH 35646\n data = {"group": ["g1", np.nan, "g1", "g2", np.nan], "B": [0, 1, 2, 3, 4]}\n df = pd.DataFrame(data)\n grouped = df.groupby("group", dropna=False)\n result = grouped.indices\n dtype = np.intp\n expected = {\n "g1": np.array([0, 2], dtype=dtype),\n "g2": np.array([3], dtype=dtype),\n np.nan: np.array([1, 4], dtype=dtype),\n }\n for result_values, expected_values in zip(result.values(), expected.values()):\n tm.assert_numpy_array_equal(result_values, expected_values)\n assert np.isnan(list(result.keys())[2])\n assert list(result.keys())[0:2] == ["g1", "g2"]\n\n\ndef test_groupby_drop_nan_with_multi_index():\n # GH 39895\n df = pd.DataFrame([[np.nan, 0, 1]], columns=["a", "b", "c"])\n df = df.set_index(["a", "b"])\n result = df.groupby(["a", "b"], dropna=False).first()\n expected = df\n tm.assert_frame_equal(result, expected)\n\n\n# sequence_index enumerates all strings made up of x, y, z of length 4\n@pytest.mark.parametrize("sequence_index", range(3**4))\n@pytest.mark.parametrize(\n "dtype",\n [\n None,\n "UInt8",\n "Int8",\n "UInt16",\n "Int16",\n "UInt32",\n "Int32",\n "UInt64",\n "Int64",\n "Float32",\n "Int64",\n "Float64",\n "category",\n "string",\n pytest.param(\n "string[pyarrow]",\n marks=pytest.mark.skipif(\n pa_version_under10p1, reason="pyarrow is not installed"\n ),\n ),\n "datetime64[ns]",\n "period[d]",\n "Sparse[float]",\n ],\n)\n@pytest.mark.parametrize("test_series", [True, False])\ndef test_no_sort_keep_na(sequence_index, dtype, test_series, as_index):\n # GH#46584, GH#48794\n\n # Convert sequence_index into a string sequence, e.g. 5 becomes "xxyz"\n # This sequence is used for the grouper.\n sequence = "".join(\n [{0: "x", 1: "y", 2: "z"}[sequence_index // (3**k) % 3] for k in range(4)]\n )\n\n # Unique values to use for grouper, depends on dtype\n if dtype in ("string", "string[pyarrow]"):\n uniques = {"x": "x", "y": "y", "z": pd.NA}\n elif dtype in ("datetime64[ns]", "period[d]"):\n uniques = {"x": "2016-01-01", "y": "2017-01-01", "z": pd.NA}\n else:\n uniques = {"x": 1, "y": 2, "z": np.nan}\n\n df = pd.DataFrame(\n {\n "key": pd.Series([uniques[label] for label in sequence], dtype=dtype),\n "a": [0, 1, 2, 3],\n }\n )\n gb = df.groupby("key", dropna=False, sort=False, as_index=as_index, observed=False)\n if test_series:\n gb = gb["a"]\n result = gb.sum()\n\n # Manually compute the groupby sum, use the labels "x", "y", and "z" to avoid\n # issues with hashing np.nan\n summed = {}\n for idx, label in enumerate(sequence):\n summed[label] = summed.get(label, 0) + idx\n if dtype == "category":\n index = pd.CategoricalIndex(\n [uniques[e] for e in summed],\n df["key"].cat.categories,\n name="key",\n )\n elif isinstance(dtype, str) and dtype.startswith("Sparse"):\n index = pd.Index(\n pd.array([uniques[label] for label in summed], dtype=dtype), name="key"\n )\n else:\n index = pd.Index([uniques[label] for label in summed], dtype=dtype, name="key")\n expected = pd.Series(summed.values(), index=index, name="a", dtype=None)\n if not test_series:\n expected = expected.to_frame()\n if not as_index:\n expected = expected.reset_index()\n if dtype is not None and dtype.startswith("Sparse"):\n expected["key"] = expected["key"].astype(dtype)\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("test_series", [True, False])\n@pytest.mark.parametrize("dtype", [object, None])\ndef test_null_is_null_for_dtype(\n sort, dtype, nulls_fixture, nulls_fixture2, test_series\n):\n # GH#48506 - groups should always result in using the null for the dtype\n df = pd.DataFrame({"a": [1, 2]})\n groups = pd.Series([nulls_fixture, nulls_fixture2], dtype=dtype)\n obj = df["a"] if test_series else df\n gb = obj.groupby(groups, dropna=False, sort=sort)\n result = gb.sum()\n index = pd.Index([na_value_for_dtype(groups.dtype)])\n expected = pd.DataFrame({"a": [3]}, index=index)\n if test_series:\n tm.assert_series_equal(result, expected["a"])\n else:\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("index_kind", ["range", "single", "multi"])\ndef test_categorical_reducers(reduction_func, observed, sort, as_index, index_kind):\n # Ensure there is at least one null value by appending to the end\n values = np.append(np.random.default_rng(2).choice([1, 2, None], size=19), None)\n df = pd.DataFrame(\n {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(20)}\n )\n\n # Strategy: Compare to dropna=True by filling null values with a new code\n df_filled = df.copy()\n df_filled["x"] = pd.Categorical(values, categories=[1, 2, 3, 4]).fillna(4)\n\n if index_kind == "range":\n keys = ["x"]\n elif index_kind == "single":\n keys = ["x"]\n df = df.set_index("x")\n df_filled = df_filled.set_index("x")\n else:\n keys = ["x", "x2"]\n df["x2"] = df["x"]\n df = df.set_index(["x", "x2"])\n df_filled["x2"] = df_filled["x"]\n df_filled = df_filled.set_index(["x", "x2"])\n args = get_groupby_method_args(reduction_func, df)\n args_filled = get_groupby_method_args(reduction_func, df_filled)\n if reduction_func == "corrwith" and index_kind == "range":\n # Don't include the grouping columns so we can call reset_index\n args = (args[0].drop(columns=keys),)\n args_filled = (args_filled[0].drop(columns=keys),)\n\n gb_keepna = df.groupby(\n keys, dropna=False, observed=observed, sort=sort, as_index=as_index\n )\n\n if not observed and reduction_func in ["idxmin", "idxmax"]:\n with pytest.raises(\n ValueError, match="empty group due to unobserved categories"\n ):\n getattr(gb_keepna, reduction_func)(*args)\n return\n\n gb_filled = df_filled.groupby(keys, observed=observed, sort=sort, as_index=True)\n expected = getattr(gb_filled, reduction_func)(*args_filled).reset_index()\n expected["x"] = expected["x"].cat.remove_categories([4])\n if index_kind == "multi":\n expected["x2"] = expected["x2"].cat.remove_categories([4])\n if as_index:\n if index_kind == "multi":\n expected = expected.set_index(["x", "x2"])\n else:\n expected = expected.set_index("x")\n elif index_kind != "range" and reduction_func != "size":\n # size, unlike other methods, has the desired behavior in GH#49519\n expected = expected.drop(columns="x")\n if index_kind == "multi":\n expected = expected.drop(columns="x2")\n if reduction_func in ("idxmax", "idxmin") and index_kind != "range":\n # expected was computed with a RangeIndex; need to translate to index values\n values = expected["y"].values.tolist()\n if index_kind == "single":\n values = [np.nan if e == 4 else e for e in values]\n expected["y"] = pd.Categorical(values, categories=[1, 2, 3])\n else:\n values = [(np.nan, np.nan) if e == (4, 4) else e for e in values]\n expected["y"] = values\n if reduction_func == "size":\n # size, unlike other methods, has the desired behavior in GH#49519\n expected = expected.rename(columns={0: "size"})\n if as_index:\n expected = expected["size"].rename(None)\n\n if as_index or index_kind == "range" or reduction_func == "size":\n warn = None\n else:\n warn = FutureWarning\n msg = "A grouping .* was excluded from the result"\n with tm.assert_produces_warning(warn, match=msg):\n result = getattr(gb_keepna, reduction_func)(*args)\n\n # size will return a Series, others are DataFrame\n tm.assert_equal(result, expected)\n\n\ndef test_categorical_transformers(\n request, transformation_func, observed, sort, as_index\n):\n # GH#36327\n if transformation_func == "fillna":\n msg = "GH#49651 fillna may incorrectly reorders results when dropna=False"\n request.applymarker(pytest.mark.xfail(reason=msg, strict=False))\n\n values = np.append(np.random.default_rng(2).choice([1, 2, None], size=19), None)\n df = pd.DataFrame(\n {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(20)}\n )\n args = get_groupby_method_args(transformation_func, df)\n\n # Compute result for null group\n null_group_values = df[df["x"].isnull()]["y"]\n if transformation_func == "cumcount":\n null_group_data = list(range(len(null_group_values)))\n elif transformation_func == "ngroup":\n if sort:\n if observed:\n na_group = df["x"].nunique(dropna=False) - 1\n else:\n # TODO: Should this be 3?\n na_group = df["x"].nunique(dropna=False) - 1\n else:\n na_group = df.iloc[: null_group_values.index[0]]["x"].nunique()\n null_group_data = len(null_group_values) * [na_group]\n else:\n null_group_data = getattr(null_group_values, transformation_func)(*args)\n null_group_result = pd.DataFrame({"y": null_group_data})\n\n gb_keepna = df.groupby(\n "x", dropna=False, observed=observed, sort=sort, as_index=as_index\n )\n gb_dropna = df.groupby("x", dropna=True, observed=observed, sort=sort)\n\n msg = "The default fill_method='ffill' in DataFrameGroupBy.pct_change is deprecated"\n if transformation_func == "pct_change":\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = getattr(gb_keepna, "pct_change")(*args)\n else:\n result = getattr(gb_keepna, transformation_func)(*args)\n expected = getattr(gb_dropna, transformation_func)(*args)\n\n for iloc, value in zip(\n df[df["x"].isnull()].index.tolist(), null_group_result.values.ravel()\n ):\n if expected.ndim == 1:\n expected.iloc[iloc] = value\n else:\n expected.iloc[iloc, 0] = value\n if transformation_func == "ngroup":\n expected[df["x"].notnull() & expected.ge(na_group)] += 1\n if transformation_func not in ("rank", "diff", "pct_change", "shift"):\n expected = expected.astype("int64")\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["head", "tail"])\ndef test_categorical_head_tail(method, observed, sort, as_index):\n # GH#36327\n values = np.random.default_rng(2).choice([1, 2, None], 30)\n df = pd.DataFrame(\n {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))}\n )\n gb = df.groupby("x", dropna=False, observed=observed, sort=sort, as_index=as_index)\n result = getattr(gb, method)()\n\n if method == "tail":\n values = values[::-1]\n # Take the top 5 values from each group\n mask = (\n ((values == 1) & ((values == 1).cumsum() <= 5))\n | ((values == 2) & ((values == 2).cumsum() <= 5))\n # flake8 doesn't like the vectorized check for None, thinks we should use `is`\n | ((values == None) & ((values == None).cumsum() <= 5)) # noqa: E711\n )\n if method == "tail":\n mask = mask[::-1]\n expected = df[mask]\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_agg():\n # GH#36327\n values = np.random.default_rng(2).choice([1, 2, None], 30)\n df = pd.DataFrame(\n {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))}\n )\n gb = df.groupby("x", dropna=False, observed=False)\n result = gb.agg(lambda x: x.sum())\n expected = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_transform():\n # GH#36327\n values = np.random.default_rng(2).choice([1, 2, None], 30)\n df = pd.DataFrame(\n {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))}\n )\n gb = df.groupby("x", dropna=False, observed=False)\n result = gb.transform(lambda x: x.sum())\n expected = gb.transform("sum")\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_groupby_dropna.py | test_groupby_dropna.py | Python | 23,509 | 0.95 | 0.114943 | 0.074135 | vue-tools | 136 | 2024-11-15T13:52:28.249729 | MIT | true | 812fb80efab5dc258e03244dcd105def |
from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.tests.groupby import get_groupby_method_args\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning"\n)\n\n\n@pytest.mark.parametrize(\n "obj",\n [\n tm.SubclassedDataFrame({"A": np.arange(0, 10)}),\n tm.SubclassedSeries(np.arange(0, 10), name="A"),\n ],\n)\ndef test_groupby_preserves_subclass(obj, groupby_func):\n # GH28330 -- preserve subclass through groupby operations\n\n if isinstance(obj, Series) and groupby_func in {"corrwith"}:\n pytest.skip(f"Not applicable for Series and {groupby_func}")\n\n grouped = obj.groupby(np.arange(0, 10))\n\n # Groups should preserve subclass type\n assert isinstance(grouped.get_group(0), type(obj))\n\n args = get_groupby_method_args(groupby_func, obj)\n\n warn = FutureWarning if groupby_func == "fillna" else None\n msg = f"{type(grouped).__name__}.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=msg, raise_on_extra_warnings=False):\n result1 = getattr(grouped, groupby_func)(*args)\n with tm.assert_produces_warning(warn, match=msg, raise_on_extra_warnings=False):\n result2 = grouped.agg(groupby_func, *args)\n\n # Reduction or transformation kernels should preserve type\n slices = {"ngroup", "cumcount", "size"}\n if isinstance(obj, DataFrame) and groupby_func in slices:\n assert isinstance(result1, tm.SubclassedSeries)\n else:\n assert isinstance(result1, type(obj))\n\n # Confirm .agg() groupby operations return same results\n if isinstance(result1, DataFrame):\n tm.assert_frame_equal(result1, result2)\n else:\n tm.assert_series_equal(result1, result2)\n\n\ndef test_groupby_preserves_metadata():\n # GH-37343\n custom_df = tm.SubclassedDataFrame({"a": [1, 2, 3], "b": [1, 1, 2], "c": [7, 8, 9]})\n assert "testattr" in custom_df._metadata\n custom_df.testattr = "hello"\n for _, group_df in custom_df.groupby("c"):\n assert group_df.testattr == "hello"\n\n # GH-45314\n def func(group):\n assert isinstance(group, tm.SubclassedDataFrame)\n assert hasattr(group, "testattr")\n assert group.testattr == "hello"\n return group.testattr\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(\n FutureWarning,\n match=msg,\n raise_on_extra_warnings=False,\n check_stacklevel=False,\n ):\n result = custom_df.groupby("c").apply(func)\n expected = tm.SubclassedSeries(["hello"] * 3, index=Index([7, 8, 9], name="c"))\n tm.assert_series_equal(result, expected)\n\n result = custom_df.groupby("c").apply(func, include_groups=False)\n tm.assert_series_equal(result, expected)\n\n # https://github.com/pandas-dev/pandas/pull/56761\n result = custom_df.groupby("c")[["a", "b"]].apply(func)\n tm.assert_series_equal(result, expected)\n\n def func2(group):\n assert isinstance(group, tm.SubclassedSeries)\n assert hasattr(group, "testattr")\n return group.testattr\n\n custom_series = tm.SubclassedSeries([1, 2, 3])\n custom_series.testattr = "hello"\n result = custom_series.groupby(custom_df["c"]).apply(func2)\n tm.assert_series_equal(result, expected)\n result = custom_series.groupby(custom_df["c"]).agg(func2)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("obj", [DataFrame, tm.SubclassedDataFrame])\ndef test_groupby_resample_preserves_subclass(obj):\n # GH28330 -- preserve subclass through groupby.resample()\n\n df = obj(\n {\n "Buyer": Series("Carl Carl Carl Carl Joe Carl".split(), dtype=object),\n "Quantity": [18, 3, 5, 1, 9, 3],\n "Date": [\n datetime(2013, 9, 1, 13, 0),\n datetime(2013, 9, 1, 13, 5),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 3, 10, 0),\n datetime(2013, 12, 2, 12, 0),\n datetime(2013, 9, 2, 14, 0),\n ],\n }\n )\n df = df.set_index("Date")\n\n # Confirm groupby.resample() preserves dataframe type\n msg = "DataFrameGroupBy.resample operated on the grouping columns"\n with tm.assert_produces_warning(\n FutureWarning,\n match=msg,\n raise_on_extra_warnings=False,\n check_stacklevel=False,\n ):\n result = df.groupby("Buyer").resample("5D").sum()\n assert isinstance(result, obj)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_groupby_subclass.py | test_groupby_subclass.py | Python | 4,580 | 0.95 | 0.081481 | 0.081081 | react-lib | 787 | 2025-06-06T19:23:47.737139 | BSD-3-Clause | true | 5bb1f3a0d23282090d9ee400460fece9 |
"""\ntest where we are determining what we are grouping, or getting groups\n"""\nfrom datetime import (\n date,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n CategoricalIndex,\n DataFrame,\n Grouper,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.groupby.grouper import Grouping\n\n# selection\n# --------------------------------\n\n\nclass TestSelection:\n def test_select_bad_cols(self):\n df = DataFrame([[1, 2]], columns=["A", "B"])\n g = df.groupby("A")\n with pytest.raises(KeyError, match="\"Columns not found: 'C'\""):\n g[["C"]]\n\n with pytest.raises(KeyError, match="^[^A]+$"):\n # A should not be referenced as a bad column...\n # will have to rethink regex if you change message!\n g[["A", "C"]]\n\n def test_groupby_duplicated_column_errormsg(self):\n # GH7511\n df = DataFrame(\n columns=["A", "B", "A", "C"], data=[range(4), range(2, 6), range(0, 8, 2)]\n )\n\n msg = "Grouper for 'A' not 1-dimensional"\n with pytest.raises(ValueError, match=msg):\n df.groupby("A")\n with pytest.raises(ValueError, match=msg):\n df.groupby(["A", "B"])\n\n grouped = df.groupby("B")\n c = grouped.count()\n assert c.columns.nlevels == 1\n assert c.columns.size == 3\n\n def test_column_select_via_attr(self, df):\n result = df.groupby("A").C.sum()\n expected = df.groupby("A")["C"].sum()\n tm.assert_series_equal(result, expected)\n\n df["mean"] = 1.5\n result = df.groupby("A").mean(numeric_only=True)\n expected = df.groupby("A")[["C", "D", "mean"]].agg("mean")\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_list_of_columns(self):\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.random.default_rng(2).standard_normal(8),\n "E": np.random.default_rng(2).standard_normal(8),\n }\n )\n\n result = df.groupby("A")[["C", "D"]].mean()\n result2 = df.groupby("A")[df.columns[2:4]].mean()\n\n expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n def test_getitem_numeric_column_names(self):\n # GH #13731\n df = DataFrame(\n {\n 0: list("abcd") * 2,\n 2: np.random.default_rng(2).standard_normal(8),\n 4: np.random.default_rng(2).standard_normal(8),\n 6: np.random.default_rng(2).standard_normal(8),\n }\n )\n result = df.groupby(0)[df.columns[1:3]].mean()\n result2 = df.groupby(0)[[2, 4]].mean()\n\n expected = df.loc[:, [0, 2, 4]].groupby(0).mean()\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # per GH 23566 enforced deprecation raises a ValueError\n with pytest.raises(ValueError, match="Cannot subset columns with a tuple"):\n df.groupby(0)[2, 4].mean()\n\n def test_getitem_single_tuple_of_columns_raises(self, df):\n # per GH 23566 enforced deprecation raises a ValueError\n with pytest.raises(ValueError, match="Cannot subset columns with a tuple"):\n df.groupby("A")["C", "D"].mean()\n\n def test_getitem_single_column(self):\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.random.default_rng(2).standard_normal(8),\n "E": np.random.default_rng(2).standard_normal(8),\n }\n )\n\n result = df.groupby("A")["C"].mean()\n\n as_frame = df.loc[:, ["A", "C"]].groupby("A").mean()\n as_series = as_frame.iloc[:, 0]\n expected = as_series\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "func", [lambda x: x.sum(), lambda x: x.agg(lambda y: y.sum())]\n )\n def test_getitem_from_grouper(self, func):\n # GH 50383\n df = DataFrame({"a": [1, 1, 2], "b": 3, "c": 4, "d": 5})\n gb = df.groupby(["a", "b"])[["a", "c"]]\n\n idx = MultiIndex.from_tuples([(1, 3), (2, 3)], names=["a", "b"])\n expected = DataFrame({"a": [2, 2], "c": [8, 4]}, index=idx)\n result = func(gb)\n\n tm.assert_frame_equal(result, expected)\n\n def test_indices_grouped_by_tuple_with_lambda(self):\n # GH 36158\n df = DataFrame(\n {\n "Tuples": (\n (x, y)\n for x in [0, 1]\n for y in np.random.default_rng(2).integers(3, 5, 5)\n )\n }\n )\n\n gb = df.groupby("Tuples")\n gb_lambda = df.groupby(lambda x: df.iloc[x, 0])\n\n expected = gb.indices\n result = gb_lambda.indices\n\n tm.assert_dict_equal(result, expected)\n\n\n# grouping\n# --------------------------------\n\n\nclass TestGrouping:\n @pytest.mark.parametrize(\n "index",\n [\n Index(list("abcde")),\n Index(np.arange(5)),\n Index(np.arange(5, dtype=float)),\n date_range("2020-01-01", periods=5),\n period_range("2020-01-01", periods=5),\n ],\n )\n def test_grouper_index_types(self, index):\n # related GH5375\n # groupby misbehaving when using a Floatlike index\n df = DataFrame(np.arange(10).reshape(5, 2), columns=list("AB"), index=index)\n\n df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)\n\n df.index = df.index[::-1]\n df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)\n\n def test_grouper_multilevel_freq(self):\n # GH 7885\n # with level and freq specified in a Grouper\n d0 = date.today() - timedelta(days=14)\n dates = date_range(d0, date.today())\n date_index = MultiIndex.from_product([dates, dates], names=["foo", "bar"])\n df = DataFrame(np.random.default_rng(2).integers(0, 100, 225), index=date_index)\n\n # Check string level\n expected = (\n df.reset_index()\n .groupby([Grouper(key="foo", freq="W"), Grouper(key="bar", freq="W")])\n .sum()\n )\n # reset index changes columns dtype to object\n expected.columns = Index([0], dtype="int64")\n\n result = df.groupby(\n [Grouper(level="foo", freq="W"), Grouper(level="bar", freq="W")]\n ).sum()\n tm.assert_frame_equal(result, expected)\n\n # Check integer level\n result = df.groupby(\n [Grouper(level=0, freq="W"), Grouper(level=1, freq="W")]\n ).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_creation_bug(self):\n # GH 8795\n df = DataFrame({"A": [0, 0, 1, 1, 2, 2], "B": [1, 2, 3, 4, 5, 6]})\n g = df.groupby("A")\n expected = g.sum()\n\n g = df.groupby(Grouper(key="A"))\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n msg = "Grouper axis keyword is deprecated and will be removed"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gpr = Grouper(key="A", axis=0)\n g = df.groupby(gpr)\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = g.apply(lambda x: x.sum())\n expected["A"] = [0, 2, 4]\n expected = expected.loc[:, ["A", "B"]]\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_creation_bug2(self):\n # GH14334\n # Grouper(key=...) may be passed in a list\n df = DataFrame(\n {"A": [0, 0, 0, 1, 1, 1], "B": [1, 1, 2, 2, 3, 3], "C": [1, 2, 3, 4, 5, 6]}\n )\n # Group by single column\n expected = df.groupby("A").sum()\n g = df.groupby([Grouper(key="A")])\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n # Group by two columns\n # using a combination of strings and Grouper objects\n expected = df.groupby(["A", "B"]).sum()\n\n # Group with two Grouper objects\n g = df.groupby([Grouper(key="A"), Grouper(key="B")])\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n # Group with a string and a Grouper object\n g = df.groupby(["A", Grouper(key="B")])\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n # Group with a Grouper object and a string\n g = df.groupby([Grouper(key="A"), "B"])\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_creation_bug3(self, unit):\n # GH8866\n dti = date_range("20130101", periods=2, unit=unit)\n mi = MultiIndex.from_product(\n [list("ab"), range(2), dti],\n names=["one", "two", "three"],\n )\n ser = Series(\n np.arange(8, dtype="int64"),\n index=mi,\n )\n result = ser.groupby(Grouper(level="three", freq="ME")).sum()\n exp_dti = pd.DatetimeIndex(\n [Timestamp("2013-01-31")], freq="ME", name="three"\n ).as_unit(unit)\n expected = Series(\n [28],\n index=exp_dti,\n )\n tm.assert_series_equal(result, expected)\n\n # just specifying a level breaks\n result = ser.groupby(Grouper(level="one")).sum()\n expected = ser.groupby(level="one").sum()\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("func", [False, True])\n def test_grouper_returning_tuples(self, func):\n # GH 22257 , both with dict and with callable\n df = DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]})\n mapping = dict(zip(range(4), [("C", 5), ("D", 6)] * 2))\n\n if func:\n gb = df.groupby(by=lambda idx: mapping[idx], sort=False)\n else:\n gb = df.groupby(by=mapping, sort=False)\n\n name, expected = next(iter(gb))\n assert name == ("C", 5)\n result = gb.get_group(name)\n\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_column_and_index(self):\n # GH 14327\n\n # Grouping a multi-index frame by a column and an index level should\n # be equivalent to resetting the index and grouping by two columns\n idx = MultiIndex.from_tuples(\n [("a", 1), ("a", 2), ("a", 3), ("b", 1), ("b", 2), ("b", 3)]\n )\n idx.names = ["outer", "inner"]\n df_multi = DataFrame(\n {"A": np.arange(6), "B": ["one", "one", "two", "two", "one", "one"]},\n index=idx,\n )\n result = df_multi.groupby(["B", Grouper(level="inner")]).mean(numeric_only=True)\n expected = (\n df_multi.reset_index().groupby(["B", "inner"]).mean(numeric_only=True)\n )\n tm.assert_frame_equal(result, expected)\n\n # Test the reverse grouping order\n result = df_multi.groupby([Grouper(level="inner"), "B"]).mean(numeric_only=True)\n expected = (\n df_multi.reset_index().groupby(["inner", "B"]).mean(numeric_only=True)\n )\n tm.assert_frame_equal(result, expected)\n\n # Grouping a single-index frame by a column and the index should\n # be equivalent to resetting the index and grouping by two columns\n df_single = df_multi.reset_index("outer")\n result = df_single.groupby(["B", Grouper(level="inner")]).mean(\n numeric_only=True\n )\n expected = (\n df_single.reset_index().groupby(["B", "inner"]).mean(numeric_only=True)\n )\n tm.assert_frame_equal(result, expected)\n\n # Test the reverse grouping order\n result = df_single.groupby([Grouper(level="inner"), "B"]).mean(\n numeric_only=True\n )\n expected = (\n df_single.reset_index().groupby(["inner", "B"]).mean(numeric_only=True)\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_levels_and_columns(self):\n # GH9344, GH9049\n idx_names = ["x", "y"]\n idx = MultiIndex.from_tuples([(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names)\n df = DataFrame(np.arange(12).reshape(-1, 3), index=idx)\n\n by_levels = df.groupby(level=idx_names).mean()\n # reset_index changes columns dtype to object\n by_columns = df.reset_index().groupby(idx_names).mean()\n\n # without casting, by_columns.columns is object-dtype\n by_columns.columns = by_columns.columns.astype(np.int64)\n tm.assert_frame_equal(by_levels, by_columns)\n\n def test_groupby_categorical_index_and_columns(self, observed):\n # GH18432, adapted for GH25871\n columns = ["A", "B", "A", "B"]\n categories = ["B", "A"]\n data = np.array(\n [[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]], int\n )\n cat_columns = CategoricalIndex(columns, categories=categories, ordered=True)\n df = DataFrame(data=data, columns=cat_columns)\n depr_msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n result = df.groupby(axis=1, level=0, observed=observed).sum()\n expected_data = np.array([[4, 2], [4, 2], [4, 2], [4, 2], [4, 2]], int)\n expected_columns = CategoricalIndex(\n categories, categories=categories, ordered=True\n )\n expected = DataFrame(data=expected_data, columns=expected_columns)\n tm.assert_frame_equal(result, expected)\n\n # test transposed version\n df = DataFrame(data.T, index=cat_columns)\n msg = "The 'axis' keyword in DataFrame.groupby is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby(axis=0, level=0, observed=observed).sum()\n expected = DataFrame(data=expected_data.T, index=expected_columns)\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_getting_correct_binner(self):\n # GH 10063\n # using a non-time-based grouper and a time-based grouper\n # and specifying levels\n df = DataFrame(\n {"A": 1},\n index=MultiIndex.from_product(\n [list("ab"), date_range("20130101", periods=80)], names=["one", "two"]\n ),\n )\n result = df.groupby(\n [Grouper(level="one"), Grouper(level="two", freq="ME")]\n ).sum()\n expected = DataFrame(\n {"A": [31, 28, 21, 31, 28, 21]},\n index=MultiIndex.from_product(\n [list("ab"), date_range("20130101", freq="ME", periods=3)],\n names=["one", "two"],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_iter(self, df):\n gb = df.groupby("A")\n msg = "DataFrameGroupBy.grouper is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouper = gb.grouper\n result = sorted(grouper)\n expected = ["bar", "foo"]\n assert result == expected\n\n def test_empty_groups(self, df):\n # see gh-1048\n with pytest.raises(ValueError, match="No group keys passed!"):\n df.groupby([])\n\n def test_groupby_grouper(self, df):\n grouped = df.groupby("A")\n msg = "DataFrameGroupBy.grouper is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouper = grouped.grouper\n result = df.groupby(grouper).mean(numeric_only=True)\n expected = grouped.mean(numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_dict_mapping(self):\n # GH #679\n s = Series({"T1": 5})\n result = s.groupby({"T1": "T2"}).agg("sum")\n expected = s.groupby(["T2"]).agg("sum")\n tm.assert_series_equal(result, expected)\n\n s = Series([1.0, 2.0, 3.0, 4.0], index=list("abcd"))\n mapping = {"a": 0, "b": 0, "c": 1, "d": 1}\n\n result = s.groupby(mapping).mean()\n result2 = s.groupby(mapping).agg("mean")\n exp_key = np.array([0, 0, 1, 1], dtype=np.int64)\n expected = s.groupby(exp_key).mean()\n expected2 = s.groupby(exp_key).mean()\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result, result2)\n tm.assert_series_equal(result, expected2)\n\n @pytest.mark.parametrize(\n "index",\n [\n [0, 1, 2, 3],\n ["a", "b", "c", "d"],\n [Timestamp(2021, 7, 28 + i) for i in range(4)],\n ],\n )\n def test_groupby_series_named_with_tuple(self, frame_or_series, index):\n # GH 42731\n obj = frame_or_series([1, 2, 3, 4], index=index)\n groups = Series([1, 0, 1, 0], index=index, name=("a", "a"))\n result = obj.groupby(groups).last()\n expected = frame_or_series([4, 3])\n expected.index.name = ("a", "a")\n tm.assert_equal(result, expected)\n\n def test_groupby_grouper_f_sanity_checked(self):\n dates = date_range("01-Jan-2013", periods=12, freq="MS")\n ts = Series(np.random.default_rng(2).standard_normal(12), index=dates)\n\n # GH51979\n # simple check that the passed function doesn't operates on the whole index\n msg = "'Timestamp' object is not subscriptable"\n with pytest.raises(TypeError, match=msg):\n ts.groupby(lambda key: key[0:6])\n\n result = ts.groupby(lambda x: x).sum()\n expected = ts.groupby(ts.index).sum()\n expected.index.freq = None\n tm.assert_series_equal(result, expected)\n\n def test_groupby_with_datetime_key(self):\n # GH 51158\n df = DataFrame(\n {\n "id": ["a", "b"] * 3,\n "b": date_range("2000-01-01", "2000-01-03", freq="9h"),\n }\n )\n grouper = Grouper(key="b", freq="D")\n gb = df.groupby([grouper, "id"])\n\n # test number of groups\n expected = {\n (Timestamp("2000-01-01"), "a"): [0, 2],\n (Timestamp("2000-01-01"), "b"): [1],\n (Timestamp("2000-01-02"), "a"): [4],\n (Timestamp("2000-01-02"), "b"): [3, 5],\n }\n tm.assert_dict_equal(gb.groups, expected)\n\n # test number of group keys\n assert len(gb.groups.keys()) == 4\n\n def test_grouping_error_on_multidim_input(self, df):\n msg = "Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional"\n with pytest.raises(ValueError, match=msg):\n Grouping(df.index, df[["A", "A"]])\n\n def test_multiindex_passthru(self):\n # GH 7997\n # regression from 0.14.1\n df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n df.columns = MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)])\n\n depr_msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n gb = df.groupby(axis=1, level=[0, 1])\n result = gb.first()\n tm.assert_frame_equal(result, df)\n\n def test_multiindex_negative_level(self, multiindex_dataframe_random_data):\n # GH 13901\n result = multiindex_dataframe_random_data.groupby(level=-1).sum()\n expected = multiindex_dataframe_random_data.groupby(level="second").sum()\n tm.assert_frame_equal(result, expected)\n\n result = multiindex_dataframe_random_data.groupby(level=-2).sum()\n expected = multiindex_dataframe_random_data.groupby(level="first").sum()\n tm.assert_frame_equal(result, expected)\n\n result = multiindex_dataframe_random_data.groupby(level=[-2, -1]).sum()\n expected = multiindex_dataframe_random_data.sort_index()\n tm.assert_frame_equal(result, expected)\n\n result = multiindex_dataframe_random_data.groupby(level=[-1, "first"]).sum()\n expected = multiindex_dataframe_random_data.groupby(\n level=["second", "first"]\n ).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_multifunc_select_col_integer_cols(self, df):\n df.columns = np.arange(len(df.columns))\n\n # it works!\n msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby(1, as_index=False)[2].agg({"Q": np.mean})\n\n def test_multiindex_columns_empty_level(self):\n lst = [["count", "values"], ["to filter", ""]]\n midx = MultiIndex.from_tuples(lst)\n\n df = DataFrame([[1, "A"]], columns=midx)\n\n grouped = df.groupby("to filter").groups\n assert grouped["A"] == [0]\n\n grouped = df.groupby([("to filter", "")]).groups\n assert grouped["A"] == [0]\n\n df = DataFrame([[1, "A"], [2, "B"]], columns=midx)\n\n expected = df.groupby("to filter").groups\n result = df.groupby([("to filter", "")]).groups\n assert result == expected\n\n df = DataFrame([[1, "A"], [2, "A"]], columns=midx)\n\n expected = df.groupby("to filter").groups\n result = df.groupby([("to filter", "")]).groups\n tm.assert_dict_equal(result, expected)\n\n def test_groupby_multiindex_tuple(self):\n # GH 17979\n df = DataFrame(\n [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],\n columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]),\n )\n expected = df.groupby([("b", 1)]).groups\n result = df.groupby(("b", 1)).groups\n tm.assert_dict_equal(expected, result)\n\n df2 = DataFrame(\n df.values,\n columns=MultiIndex.from_arrays(\n [["a", "b", "b", "c"], ["d", "d", "e", "e"]]\n ),\n )\n expected = df2.groupby([("b", "d")]).groups\n result = df.groupby(("b", 1)).groups\n tm.assert_dict_equal(expected, result)\n\n df3 = DataFrame(df.values, columns=[("a", "d"), ("b", "d"), ("b", "e"), "c"])\n expected = df3.groupby([("b", "d")]).groups\n result = df.groupby(("b", 1)).groups\n tm.assert_dict_equal(expected, result)\n\n def test_groupby_multiindex_partial_indexing_equivalence(self):\n # GH 17977\n df = DataFrame(\n [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],\n columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]),\n )\n\n expected_mean = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].mean()\n result_mean = df.groupby([("a", 1)])["b"].mean()\n tm.assert_frame_equal(expected_mean, result_mean)\n\n expected_sum = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].sum()\n result_sum = df.groupby([("a", 1)])["b"].sum()\n tm.assert_frame_equal(expected_sum, result_sum)\n\n expected_count = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].count()\n result_count = df.groupby([("a", 1)])["b"].count()\n tm.assert_frame_equal(expected_count, result_count)\n\n expected_min = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].min()\n result_min = df.groupby([("a", 1)])["b"].min()\n tm.assert_frame_equal(expected_min, result_min)\n\n expected_max = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].max()\n result_max = df.groupby([("a", 1)])["b"].max()\n tm.assert_frame_equal(expected_max, result_max)\n\n expected_groups = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].groups\n result_groups = df.groupby([("a", 1)])["b"].groups\n tm.assert_dict_equal(expected_groups, result_groups)\n\n @pytest.mark.parametrize("sort", [True, False])\n def test_groupby_level(self, sort, multiindex_dataframe_random_data, df):\n # GH 17537\n frame = multiindex_dataframe_random_data\n deleveled = frame.reset_index()\n\n result0 = frame.groupby(level=0, sort=sort).sum()\n result1 = frame.groupby(level=1, sort=sort).sum()\n\n expected0 = frame.groupby(deleveled["first"].values, sort=sort).sum()\n expected1 = frame.groupby(deleveled["second"].values, sort=sort).sum()\n\n expected0.index.name = "first"\n expected1.index.name = "second"\n\n assert result0.index.name == "first"\n assert result1.index.name == "second"\n\n tm.assert_frame_equal(result0, expected0)\n tm.assert_frame_equal(result1, expected1)\n assert result0.index.name == frame.index.names[0]\n assert result1.index.name == frame.index.names[1]\n\n # groupby level name\n result0 = frame.groupby(level="first", sort=sort).sum()\n result1 = frame.groupby(level="second", sort=sort).sum()\n tm.assert_frame_equal(result0, expected0)\n tm.assert_frame_equal(result1, expected1)\n\n # axis=1\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result0 = frame.T.groupby(level=0, axis=1, sort=sort).sum()\n result1 = frame.T.groupby(level=1, axis=1, sort=sort).sum()\n tm.assert_frame_equal(result0, expected0.T)\n tm.assert_frame_equal(result1, expected1.T)\n\n # raise exception for non-MultiIndex\n msg = "level > 0 or level < -1 only valid with MultiIndex"\n with pytest.raises(ValueError, match=msg):\n df.groupby(level=1)\n\n def test_groupby_level_index_names(self, axis):\n # GH4014 this used to raise ValueError since 'exp'>1 (in py2)\n df = DataFrame({"exp": ["A"] * 3 + ["B"] * 3, "var1": range(6)}).set_index(\n "exp"\n )\n if axis in (1, "columns"):\n df = df.T\n depr_msg = "DataFrame.groupby with axis=1 is deprecated"\n else:\n depr_msg = "The 'axis' keyword in DataFrame.groupby is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n df.groupby(level="exp", axis=axis)\n msg = f"level name foo is not the name of the {df._get_axis_name(axis)}"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n df.groupby(level="foo", axis=axis)\n\n @pytest.mark.parametrize("sort", [True, False])\n def test_groupby_level_with_nas(self, sort):\n # GH 17537\n index = MultiIndex(\n levels=[[1, 0], [0, 1, 2, 3]],\n codes=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],\n )\n\n # factorizing doesn't confuse things\n s = Series(np.arange(8.0), index=index)\n result = s.groupby(level=0, sort=sort).sum()\n expected = Series([6.0, 22.0], index=[0, 1])\n tm.assert_series_equal(result, expected)\n\n index = MultiIndex(\n levels=[[1, 0], [0, 1, 2, 3]],\n codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],\n )\n\n # factorizing doesn't confuse things\n s = Series(np.arange(8.0), index=index)\n result = s.groupby(level=0, sort=sort).sum()\n expected = Series([6.0, 18.0], index=[0.0, 1.0])\n tm.assert_series_equal(result, expected)\n\n def test_groupby_args(self, multiindex_dataframe_random_data):\n # PR8618 and issue 8015\n frame = multiindex_dataframe_random_data\n\n msg = "You have to supply one of 'by' and 'level'"\n with pytest.raises(TypeError, match=msg):\n frame.groupby()\n\n msg = "You have to supply one of 'by' and 'level'"\n with pytest.raises(TypeError, match=msg):\n frame.groupby(by=None, level=None)\n\n @pytest.mark.parametrize(\n "sort,labels",\n [\n [True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]],\n [False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]],\n ],\n )\n def test_level_preserve_order(self, sort, labels, multiindex_dataframe_random_data):\n # GH 17537\n grouped = multiindex_dataframe_random_data.groupby(level=0, sort=sort)\n exp_labels = np.array(labels, np.intp)\n tm.assert_almost_equal(grouped._grouper.codes[0], exp_labels)\n\n def test_grouping_labels(self, multiindex_dataframe_random_data):\n grouped = multiindex_dataframe_random_data.groupby(\n multiindex_dataframe_random_data.index.get_level_values(0)\n )\n exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3], dtype=np.intp)\n tm.assert_almost_equal(grouped._grouper.codes[0], exp_labels)\n\n def test_list_grouper_with_nat(self):\n # GH 14715\n df = DataFrame({"date": date_range("1/1/2011", periods=365, freq="D")})\n df.iloc[-1] = pd.NaT\n grouper = Grouper(key="date", freq="YS")\n\n # Grouper in a list grouping\n result = df.groupby([grouper])\n expected = {Timestamp("2011-01-01"): Index(list(range(364)))}\n tm.assert_dict_equal(result.groups, expected)\n\n # Test case without a list\n result = df.groupby(grouper)\n expected = {Timestamp("2011-01-01"): 365}\n tm.assert_dict_equal(result.groups, expected)\n\n @pytest.mark.parametrize(\n "func,expected",\n [\n (\n "transform",\n Series(name=2, dtype=np.float64),\n ),\n (\n "agg",\n Series(\n name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1)\n ),\n ),\n (\n "apply",\n Series(\n name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1)\n ),\n ),\n ],\n )\n def test_evaluate_with_empty_groups(self, func, expected):\n # 26208\n # test transform'ing empty groups\n # (not testing other agg fns, because they return\n # different index objects.\n df = DataFrame({1: [], 2: []})\n g = df.groupby(1, group_keys=False)\n result = getattr(g[2], func)(lambda x: x)\n tm.assert_series_equal(result, expected)\n\n def test_groupby_empty(self):\n # https://github.com/pandas-dev/pandas/issues/27190\n s = Series([], name="name", dtype="float64")\n gr = s.groupby([])\n\n result = gr.mean()\n expected = s.set_axis(Index([], dtype=np.intp))\n tm.assert_series_equal(result, expected)\n\n # check group properties\n assert len(gr._grouper.groupings) == 1\n tm.assert_numpy_array_equal(\n gr._grouper.group_info[0], np.array([], dtype=np.dtype(np.intp))\n )\n\n tm.assert_numpy_array_equal(\n gr._grouper.group_info[1], np.array([], dtype=np.dtype(np.intp))\n )\n\n assert gr._grouper.group_info[2] == 0\n\n # check name\n gb = s.groupby(s)\n msg = "SeriesGroupBy.grouper is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouper = gb.grouper\n result = grouper.names\n expected = ["name"]\n assert result == expected\n\n def test_groupby_level_index_value_all_na(self):\n # issue 20519\n df = DataFrame(\n [["x", np.nan, 10], [None, np.nan, 20]], columns=["A", "B", "C"]\n ).set_index(["A", "B"])\n result = df.groupby(level=["A", "B"]).sum()\n expected = DataFrame(\n data=[],\n index=MultiIndex(\n levels=[Index(["x"], dtype="str"), Index([], dtype="float64")],\n codes=[[], []],\n names=["A", "B"],\n ),\n columns=["C"],\n dtype="int64",\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_multiindex_level_empty(self):\n # https://github.com/pandas-dev/pandas/issues/31670\n df = DataFrame(\n [[123, "a", 1.0], [123, "b", 2.0]], columns=["id", "category", "value"]\n )\n df = df.set_index(["id", "category"])\n empty = df[df.value < 0]\n result = empty.groupby("id").sum()\n expected = DataFrame(\n dtype="float64",\n columns=["value"],\n index=Index([], dtype=np.int64, name="id"),\n )\n tm.assert_frame_equal(result, expected)\n\n\n# get_group\n# --------------------------------\n\n\nclass TestGetGroup:\n def test_get_group(self):\n # GH 5267\n # be datelike friendly\n df = DataFrame(\n {\n "DATE": pd.to_datetime(\n [\n "10-Oct-2013",\n "10-Oct-2013",\n "10-Oct-2013",\n "11-Oct-2013",\n "11-Oct-2013",\n "11-Oct-2013",\n ]\n ),\n "label": ["foo", "foo", "bar", "foo", "foo", "bar"],\n "VAL": [1, 2, 3, 4, 5, 6],\n }\n )\n\n g = df.groupby("DATE")\n key = next(iter(g.groups))\n result1 = g.get_group(key)\n result2 = g.get_group(Timestamp(key).to_pydatetime())\n result3 = g.get_group(str(Timestamp(key)))\n tm.assert_frame_equal(result1, result2)\n tm.assert_frame_equal(result1, result3)\n\n g = df.groupby(["DATE", "label"])\n\n key = next(iter(g.groups))\n result1 = g.get_group(key)\n result2 = g.get_group((Timestamp(key[0]).to_pydatetime(), key[1]))\n result3 = g.get_group((str(Timestamp(key[0])), key[1]))\n tm.assert_frame_equal(result1, result2)\n tm.assert_frame_equal(result1, result3)\n\n # must pass a same-length tuple with multiple keys\n msg = "must supply a tuple to get_group with multiple grouping keys"\n with pytest.raises(ValueError, match=msg):\n g.get_group("foo")\n with pytest.raises(ValueError, match=msg):\n g.get_group("foo")\n msg = "must supply a same-length tuple to get_group with multiple grouping keys"\n with pytest.raises(ValueError, match=msg):\n g.get_group(("foo", "bar", "baz"))\n\n def test_get_group_empty_bins(self, observed):\n d = DataFrame([3, 1, 7, 6])\n bins = [0, 5, 10, 15]\n g = d.groupby(pd.cut(d[0], bins), observed=observed)\n\n # TODO: should prob allow a str of Interval work as well\n # IOW '(0, 5]'\n result = g.get_group(pd.Interval(0, 5))\n expected = DataFrame([3, 1], index=[0, 1])\n tm.assert_frame_equal(result, expected)\n\n msg = r"Interval\(10, 15, closed='right'\)"\n with pytest.raises(KeyError, match=msg):\n g.get_group(pd.Interval(10, 15))\n\n def test_get_group_grouped_by_tuple(self):\n # GH 8121\n df = DataFrame([[(1,), (1, 2), (1,), (1, 2)]], index=["ids"]).T\n gr = df.groupby("ids")\n expected = DataFrame({"ids": [(1,), (1,)]}, index=[0, 2])\n result = gr.get_group((1,))\n tm.assert_frame_equal(result, expected)\n\n dt = pd.to_datetime(["2010-01-01", "2010-01-02", "2010-01-01", "2010-01-02"])\n df = DataFrame({"ids": [(x,) for x in dt]})\n gr = df.groupby("ids")\n result = gr.get_group(("2010-01-01",))\n expected = DataFrame({"ids": [(dt[0],), (dt[0],)]}, index=[0, 2])\n tm.assert_frame_equal(result, expected)\n\n def test_get_group_grouped_by_tuple_with_lambda(self):\n # GH 36158\n df = DataFrame(\n {\n "Tuples": (\n (x, y)\n for x in [0, 1]\n for y in np.random.default_rng(2).integers(3, 5, 5)\n )\n }\n )\n\n gb = df.groupby("Tuples")\n gb_lambda = df.groupby(lambda x: df.iloc[x, 0])\n\n expected = gb.get_group(next(iter(gb.groups.keys())))\n result = gb_lambda.get_group(next(iter(gb_lambda.groups.keys())))\n\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_with_empty(self):\n index = pd.DatetimeIndex(())\n data = ()\n series = Series(data, index, dtype=object)\n grouper = Grouper(freq="D")\n grouped = series.groupby(grouper)\n assert next(iter(grouped), None) is None\n\n def test_groupby_with_single_column(self):\n df = DataFrame({"a": list("abssbab")})\n tm.assert_frame_equal(df.groupby("a").get_group("a"), df.iloc[[0, 5]])\n # GH 13530\n exp = DataFrame(\n index=Index(["a", "b", "s"], name="a"), columns=Index([], dtype="str")\n )\n tm.assert_frame_equal(df.groupby("a").count(), exp)\n tm.assert_frame_equal(df.groupby("a").sum(), exp)\n\n exp = df.iloc[[3, 4, 5]]\n tm.assert_frame_equal(df.groupby("a").nth(1), exp)\n\n def test_gb_key_len_equal_axis_len(self):\n # GH16843\n # test ensures that index and column keys are recognized correctly\n # when number of keys equals axis length of groupby\n df = DataFrame(\n [["foo", "bar", "B", 1], ["foo", "bar", "B", 2], ["foo", "baz", "C", 3]],\n columns=["first", "second", "third", "one"],\n )\n df = df.set_index(["first", "second"])\n df = df.groupby(["first", "second", "third"]).size()\n assert df.loc[("foo", "bar", "B")] == 2\n assert df.loc[("foo", "baz", "C")] == 1\n\n\n# groups & iteration\n# --------------------------------\n\n\nclass TestIteration:\n def test_groups(self, df):\n grouped = df.groupby(["A"])\n groups = grouped.groups\n assert groups is grouped.groups # caching works\n\n for k, v in grouped.groups.items():\n assert (df.loc[v]["A"] == k).all()\n\n grouped = df.groupby(["A", "B"])\n groups = grouped.groups\n assert groups is grouped.groups # caching works\n\n for k, v in grouped.groups.items():\n assert (df.loc[v]["A"] == k[0]).all()\n assert (df.loc[v]["B"] == k[1]).all()\n\n def test_grouping_is_iterable(self, tsframe):\n # this code path isn't used anywhere else\n # not sure it's useful\n grouped = tsframe.groupby([lambda x: x.weekday(), lambda x: x.year])\n\n # test it works\n for g in grouped._grouper.groupings[0]:\n pass\n\n def test_multi_iter(self):\n s = Series(np.arange(6))\n k1 = np.array(["a", "a", "a", "b", "b", "b"])\n k2 = np.array(["1", "2", "1", "2", "1", "2"])\n\n grouped = s.groupby([k1, k2])\n\n iterated = list(grouped)\n expected = [\n ("a", "1", s[[0, 2]]),\n ("a", "2", s[[1]]),\n ("b", "1", s[[4]]),\n ("b", "2", s[[3, 5]]),\n ]\n for i, ((one, two), three) in enumerate(iterated):\n e1, e2, e3 = expected[i]\n assert e1 == one\n assert e2 == two\n tm.assert_series_equal(three, e3)\n\n def test_multi_iter_frame(self, three_group):\n k1 = np.array(["b", "b", "b", "a", "a", "a"])\n k2 = np.array(["1", "2", "1", "2", "1", "2"])\n df = DataFrame(\n {\n "v1": np.random.default_rng(2).standard_normal(6),\n "v2": np.random.default_rng(2).standard_normal(6),\n "k1": k1,\n "k2": k2,\n },\n index=["one", "two", "three", "four", "five", "six"],\n )\n\n grouped = df.groupby(["k1", "k2"])\n\n # things get sorted!\n iterated = list(grouped)\n idx = df.index\n expected = [\n ("a", "1", df.loc[idx[[4]]]),\n ("a", "2", df.loc[idx[[3, 5]]]),\n ("b", "1", df.loc[idx[[0, 2]]]),\n ("b", "2", df.loc[idx[[1]]]),\n ]\n for i, ((one, two), three) in enumerate(iterated):\n e1, e2, e3 = expected[i]\n assert e1 == one\n assert e2 == two\n tm.assert_frame_equal(three, e3)\n\n # don't iterate through groups with no data\n df["k1"] = np.array(["b", "b", "b", "a", "a", "a"])\n df["k2"] = np.array(["1", "1", "1", "2", "2", "2"])\n grouped = df.groupby(["k1", "k2"])\n # calling `dict` on a DataFrameGroupBy leads to a TypeError,\n # we need to use a dictionary comprehension here\n # pylint: disable-next=unnecessary-comprehension\n groups = {key: gp for key, gp in grouped} # noqa: C416\n assert len(groups) == 2\n\n # axis = 1\n three_levels = three_group.groupby(["A", "B", "C"]).mean()\n depr_msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n grouped = three_levels.T.groupby(axis=1, level=(1, 2))\n for key, group in grouped:\n pass\n\n def test_dictify(self, df):\n dict(iter(df.groupby("A")))\n dict(iter(df.groupby(["A", "B"])))\n dict(iter(df["C"].groupby(df["A"])))\n dict(iter(df["C"].groupby([df["A"], df["B"]])))\n dict(iter(df.groupby("A")["C"]))\n dict(iter(df.groupby(["A", "B"])["C"]))\n\n def test_groupby_with_small_elem(self):\n # GH 8542\n # length=2\n df = DataFrame(\n {"event": ["start", "start"], "change": [1234, 5678]},\n index=pd.DatetimeIndex(["2014-09-10", "2013-10-10"]),\n )\n grouped = df.groupby([Grouper(freq="ME"), "event"])\n assert len(grouped.groups) == 2\n assert grouped.ngroups == 2\n assert (Timestamp("2014-09-30"), "start") in grouped.groups\n assert (Timestamp("2013-10-31"), "start") in grouped.groups\n\n res = grouped.get_group((Timestamp("2014-09-30"), "start"))\n tm.assert_frame_equal(res, df.iloc[[0], :])\n res = grouped.get_group((Timestamp("2013-10-31"), "start"))\n tm.assert_frame_equal(res, df.iloc[[1], :])\n\n df = DataFrame(\n {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]},\n index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-09-15"]),\n )\n grouped = df.groupby([Grouper(freq="ME"), "event"])\n assert len(grouped.groups) == 2\n assert grouped.ngroups == 2\n assert (Timestamp("2014-09-30"), "start") in grouped.groups\n assert (Timestamp("2013-10-31"), "start") in grouped.groups\n\n res = grouped.get_group((Timestamp("2014-09-30"), "start"))\n tm.assert_frame_equal(res, df.iloc[[0, 2], :])\n res = grouped.get_group((Timestamp("2013-10-31"), "start"))\n tm.assert_frame_equal(res, df.iloc[[1], :])\n\n # length=3\n df = DataFrame(\n {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]},\n index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-08-05"]),\n )\n grouped = df.groupby([Grouper(freq="ME"), "event"])\n assert len(grouped.groups) == 3\n assert grouped.ngroups == 3\n assert (Timestamp("2014-09-30"), "start") in grouped.groups\n assert (Timestamp("2013-10-31"), "start") in grouped.groups\n assert (Timestamp("2014-08-31"), "start") in grouped.groups\n\n res = grouped.get_group((Timestamp("2014-09-30"), "start"))\n tm.assert_frame_equal(res, df.iloc[[0], :])\n res = grouped.get_group((Timestamp("2013-10-31"), "start"))\n tm.assert_frame_equal(res, df.iloc[[1], :])\n res = grouped.get_group((Timestamp("2014-08-31"), "start"))\n tm.assert_frame_equal(res, df.iloc[[2], :])\n\n def test_grouping_string_repr(self):\n # GH 13394\n mi = MultiIndex.from_arrays([list("AAB"), list("aba")])\n df = DataFrame([[1, 2, 3]], columns=mi)\n gr = df.groupby(df[("A", "a")])\n\n result = gr._grouper.groupings[0].__repr__()\n expected = "Grouping(('A', 'a'))"\n assert result == expected\n\n\ndef test_grouping_by_key_is_in_axis():\n # GH#50413 - Groupers specified by key are in-axis\n df = DataFrame({"a": [1, 1, 2], "b": [1, 1, 2], "c": [3, 4, 5]}).set_index("a")\n gb = df.groupby([Grouper(level="a"), Grouper(key="b")], as_index=False)\n assert not gb._grouper.groupings[0].in_axis\n assert gb._grouper.groupings[1].in_axis\n\n # Currently only in-axis groupings are including in the result when as_index=False;\n # This is likely to change in the future.\n msg = "A grouping .* was excluded from the result"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = gb.sum()\n expected = DataFrame({"b": [1, 2], "c": [7, 5]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_grouper_groups():\n # GH#51182 check Grouper.groups does not raise AttributeError\n df = DataFrame({"a": [1, 2, 3], "b": 1})\n grper = Grouper(key="a")\n gb = df.groupby(grper)\n\n msg = "Use GroupBy.groups instead"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = grper.groups\n assert res is gb.groups\n\n msg = "Use GroupBy.grouper instead"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = grper.grouper\n assert res is gb._grouper\n\n msg = "Grouper.obj is deprecated and will be removed"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = grper.obj\n assert res is gb.obj\n\n msg = "Use Resampler.ax instead"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grper.ax\n\n msg = "Grouper.indexer is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grper.indexer\n\n\n@pytest.mark.parametrize("attr", ["group_index", "result_index", "group_arraylike"])\ndef test_depr_grouping_attrs(attr):\n # GH#56148\n df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})\n gb = df.groupby("a")\n msg = f"{attr} is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n getattr(gb._grouper.groupings[0], attr)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_grouping.py | test_grouping.py | Python | 45,896 | 0.95 | 0.070275 | 0.110471 | react-lib | 726 | 2024-04-09T04:58:58.603050 | GPL-3.0 | true | 83b08d4ecc997cbca20c3db9dc392d0d |
# Test GroupBy._positional_selector positional grouped indexing GH#42864\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "arg, expected_rows",\n [\n [0, [0, 1, 4]],\n [2, [5]],\n [5, []],\n [-1, [3, 4, 7]],\n [-2, [1, 6]],\n [-6, []],\n ],\n)\ndef test_int(slice_test_df, slice_test_grouped, arg, expected_rows):\n # Test single integer\n result = slice_test_grouped._positional_selector[arg]\n expected = slice_test_df.iloc[expected_rows]\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_slice(slice_test_df, slice_test_grouped):\n # Test single slice\n result = slice_test_grouped._positional_selector[0:3:2]\n expected = slice_test_df.iloc[[0, 1, 4, 5]]\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "arg, expected_rows",\n [\n [[0, 2], [0, 1, 4, 5]],\n [[0, 2, -1], [0, 1, 3, 4, 5, 7]],\n [range(0, 3, 2), [0, 1, 4, 5]],\n [{0, 2}, [0, 1, 4, 5]],\n ],\n ids=[\n "list",\n "negative",\n "range",\n "set",\n ],\n)\ndef test_list(slice_test_df, slice_test_grouped, arg, expected_rows):\n # Test lists of integers and integer valued iterables\n result = slice_test_grouped._positional_selector[arg]\n expected = slice_test_df.iloc[expected_rows]\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_ints(slice_test_df, slice_test_grouped):\n # Test tuple of ints\n result = slice_test_grouped._positional_selector[0, 2, -1]\n expected = slice_test_df.iloc[[0, 1, 3, 4, 5, 7]]\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_slices(slice_test_df, slice_test_grouped):\n # Test tuple of slices\n result = slice_test_grouped._positional_selector[:2, -2:]\n expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]]\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_mix(slice_test_df, slice_test_grouped):\n # Test mixed tuple of ints and slices\n result = slice_test_grouped._positional_selector[0, 1, -2:]\n expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]]\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "arg, expected_rows",\n [\n [0, [0, 1, 4]],\n [[0, 2, -1], [0, 1, 3, 4, 5, 7]],\n [(slice(None, 2), slice(-2, None)), [0, 1, 2, 3, 4, 6, 7]],\n ],\n)\ndef test_as_index(slice_test_df, arg, expected_rows):\n # Test the default as_index behaviour\n result = slice_test_df.groupby("Group", sort=False)._positional_selector[arg]\n expected = slice_test_df.iloc[expected_rows]\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_doc_examples():\n # Test the examples in the documentation\n df = pd.DataFrame(\n [["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]], columns=["A", "B"]\n )\n\n grouped = df.groupby("A", as_index=False)\n\n result = grouped._positional_selector[1:2]\n expected = pd.DataFrame([["a", 2], ["b", 5]], columns=["A", "B"], index=[1, 4])\n\n tm.assert_frame_equal(result, expected)\n\n result = grouped._positional_selector[1, -1]\n expected = pd.DataFrame(\n [["a", 2], ["a", 3], ["b", 5]], columns=["A", "B"], index=[1, 2, 4]\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.fixture()\ndef multiindex_data():\n rng = np.random.default_rng(2)\n ndates = 100\n nitems = 20\n dates = pd.date_range("20130101", periods=ndates, freq="D")\n items = [f"item {i}" for i in range(nitems)]\n\n data = {}\n for date in dates:\n nitems_for_date = nitems - rng.integers(0, 12)\n levels = [\n (item, rng.integers(0, 10000) / 100, rng.integers(0, 10000) / 100)\n for item in items[:nitems_for_date]\n ]\n levels.sort(key=lambda x: x[1])\n data[date] = levels\n\n return data\n\n\ndef _make_df_from_data(data):\n rows = {}\n for date in data:\n for level in data[date]:\n rows[(date, level[0])] = {"A": level[1], "B": level[2]}\n\n df = pd.DataFrame.from_dict(rows, orient="index")\n df.index.names = ("Date", "Item")\n return df\n\n\ndef test_multiindex(multiindex_data):\n # Test the multiindex mentioned as the use-case in the documentation\n df = _make_df_from_data(multiindex_data)\n result = df.groupby("Date", as_index=False).nth(slice(3, -3))\n\n sliced = {date: multiindex_data[date][3:-3] for date in multiindex_data}\n expected = _make_df_from_data(sliced)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("arg", [1, 5, 30, 1000, -1, -5, -30, -1000])\n@pytest.mark.parametrize("method", ["head", "tail"])\n@pytest.mark.parametrize("simulated", [True, False])\ndef test_against_head_and_tail(arg, method, simulated):\n # Test gives the same results as grouped head and tail\n n_groups = 100\n n_rows_per_group = 30\n\n data = {\n "group": [\n f"group {g}" for j in range(n_rows_per_group) for g in range(n_groups)\n ],\n "value": [\n f"group {g} row {j}"\n for j in range(n_rows_per_group)\n for g in range(n_groups)\n ],\n }\n df = pd.DataFrame(data)\n grouped = df.groupby("group", as_index=False)\n size = arg if arg >= 0 else n_rows_per_group + arg\n\n if method == "head":\n result = grouped._positional_selector[:arg]\n\n if simulated:\n indices = [\n j * n_groups + i\n for j in range(size)\n for i in range(n_groups)\n if j * n_groups + i < n_groups * n_rows_per_group\n ]\n expected = df.iloc[indices]\n\n else:\n expected = grouped.head(arg)\n\n else:\n result = grouped._positional_selector[-arg:]\n\n if simulated:\n indices = [\n (n_rows_per_group + j - size) * n_groups + i\n for j in range(size)\n for i in range(n_groups)\n if (n_rows_per_group + j - size) * n_groups + i >= 0\n ]\n expected = df.iloc[indices]\n\n else:\n expected = grouped.tail(arg)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("start", [None, 0, 1, 10, -1, -10])\n@pytest.mark.parametrize("stop", [None, 0, 1, 10, -1, -10])\n@pytest.mark.parametrize("step", [None, 1, 5])\ndef test_against_df_iloc(start, stop, step):\n # Test that a single group gives the same results as DataFrame.iloc\n n_rows = 30\n\n data = {\n "group": ["group 0"] * n_rows,\n "value": list(range(n_rows)),\n }\n df = pd.DataFrame(data)\n grouped = df.groupby("group", as_index=False)\n\n result = grouped._positional_selector[start:stop:step]\n expected = df.iloc[start:stop:step]\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_series():\n # Test grouped Series\n ser = pd.Series([1, 2, 3, 4, 5], index=["a", "a", "a", "b", "b"])\n grouped = ser.groupby(level=0)\n result = grouped._positional_selector[1:2]\n expected = pd.Series([2, 5], index=["a", "b"])\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("step", [1, 2, 3, 4, 5])\ndef test_step(step):\n # Test slice with various step values\n data = [["x", f"x{i}"] for i in range(5)]\n data += [["y", f"y{i}"] for i in range(4)]\n data += [["z", f"z{i}"] for i in range(3)]\n df = pd.DataFrame(data, columns=["A", "B"])\n\n grouped = df.groupby("A", as_index=False)\n\n result = grouped._positional_selector[::step]\n\n data = [["x", f"x{i}"] for i in range(0, 5, step)]\n data += [["y", f"y{i}"] for i in range(0, 4, step)]\n data += [["z", f"z{i}"] for i in range(0, 3, step)]\n\n index = [0 + i for i in range(0, 5, step)]\n index += [5 + i for i in range(0, 4, step)]\n index += [9 + i for i in range(0, 3, step)]\n\n expected = pd.DataFrame(data, columns=["A", "B"], index=index)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.fixture()\ndef column_group_df():\n return pd.DataFrame(\n [[0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 0, 1, 0, 2]],\n columns=["A", "B", "C", "D", "E", "F", "G"],\n )\n\n\ndef test_column_axis(column_group_df):\n msg = "DataFrame.groupby with axis=1"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n g = column_group_df.groupby(column_group_df.iloc[1], axis=1)\n result = g._positional_selector[1:-1]\n expected = column_group_df.iloc[:, [1, 3]]\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_columns_on_iter():\n # GitHub issue #44821\n df = pd.DataFrame({k: range(10) for k in "ABC"})\n\n # Group-by and select columns\n cols = ["A", "B"]\n for _, dg in df.groupby(df.A < 4)[cols]:\n tm.assert_index_equal(dg.columns, pd.Index(cols))\n assert "C" not in dg.columns\n\n\n@pytest.mark.parametrize("func", [list, pd.Index, pd.Series, np.array])\ndef test_groupby_duplicated_columns(func):\n # GH#44924\n df = pd.DataFrame(\n {\n "A": [1, 2],\n "B": [3, 3],\n "C": ["G", "G"],\n }\n )\n result = df.groupby("C")[func(["A", "B", "A"])].mean()\n expected = pd.DataFrame(\n [[1.5, 3.0, 1.5]], columns=["A", "B", "A"], index=pd.Index(["G"], name="C")\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_get_nonexisting_groups():\n # GH#32492\n df = pd.DataFrame(\n data={\n "A": ["a1", "a2", None],\n "B": ["b1", "b2", "b1"],\n "val": [1, 2, 3],\n }\n )\n grps = df.groupby(by=["A", "B"])\n\n msg = "('a2', 'b1')"\n with pytest.raises(KeyError, match=msg):\n grps.get_group(("a2", "b1"))\n | .venv\Lib\site-packages\pandas\tests\groupby\test_indexing.py | test_indexing.py | Python | 9,521 | 0.95 | 0.153153 | 0.071146 | react-lib | 36 | 2024-10-31T20:45:02.684238 | GPL-3.0 | true | 27116eb2740462b212261f3e48fe270e |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\n@pytest.fixture(params=[["inner"], ["inner", "outer"]])\ndef frame(request):\n levels = request.param\n df = pd.DataFrame(\n {\n "outer": ["a", "a", "a", "b", "b", "b"],\n "inner": [1, 2, 3, 1, 2, 3],\n "A": np.arange(6),\n "B": ["one", "one", "two", "two", "one", "one"],\n }\n )\n if levels:\n df = df.set_index(levels)\n\n return df\n\n\n@pytest.fixture()\ndef series():\n df = pd.DataFrame(\n {\n "outer": ["a", "a", "a", "b", "b", "b"],\n "inner": [1, 2, 3, 1, 2, 3],\n "A": np.arange(6),\n "B": ["one", "one", "two", "two", "one", "one"],\n }\n )\n s = df.set_index(["outer", "inner", "B"])["A"]\n\n return s\n\n\n@pytest.mark.parametrize(\n "key_strs,groupers",\n [\n ("inner", pd.Grouper(level="inner")), # Index name\n (["inner"], [pd.Grouper(level="inner")]), # List of index name\n (["B", "inner"], ["B", pd.Grouper(level="inner")]), # Column and index\n (["inner", "B"], [pd.Grouper(level="inner"), "B"]), # Index and column\n ],\n)\ndef test_grouper_index_level_as_string(frame, key_strs, groupers):\n if "B" not in key_strs or "outer" in frame.columns:\n result = frame.groupby(key_strs).mean(numeric_only=True)\n expected = frame.groupby(groupers).mean(numeric_only=True)\n else:\n result = frame.groupby(key_strs).mean()\n expected = frame.groupby(groupers).mean()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "levels",\n [\n "inner",\n "outer",\n "B",\n ["inner"],\n ["outer"],\n ["B"],\n ["inner", "outer"],\n ["outer", "inner"],\n ["inner", "outer", "B"],\n ["B", "outer", "inner"],\n ],\n)\ndef test_grouper_index_level_as_string_series(series, levels):\n # Compute expected result\n if isinstance(levels, list):\n groupers = [pd.Grouper(level=lv) for lv in levels]\n else:\n groupers = pd.Grouper(level=levels)\n\n expected = series.groupby(groupers).mean()\n\n # Compute and check result\n result = series.groupby(levels).mean()\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_index_as_string.py | test_index_as_string.py | Python | 2,274 | 0.95 | 0.094118 | 0.027778 | react-lib | 677 | 2024-06-29T20:18:59.175566 | MIT | true | 20d2a9e2815849813a52b01b11171994 |
import numpy as np\nimport pytest\n\nfrom pandas._libs import groupby as libgroupby\nfrom pandas._libs.groupby import (\n group_cumprod,\n group_cumsum,\n group_mean,\n group_sum,\n group_var,\n)\n\nfrom pandas.core.dtypes.common import ensure_platform_int\n\nfrom pandas import isna\nimport pandas._testing as tm\n\n\nclass GroupVarTestMixin:\n def test_group_var_generic_1d(self):\n prng = np.random.default_rng(2)\n\n out = (np.nan * np.ones((5, 1))).astype(self.dtype)\n counts = np.zeros(5, dtype="int64")\n values = 10 * prng.random((15, 1)).astype(self.dtype)\n labels = np.tile(np.arange(5), (3,)).astype("intp")\n\n expected_out = (\n np.squeeze(values).reshape((5, 3), order="F").std(axis=1, ddof=1) ** 2\n )[:, np.newaxis]\n expected_counts = counts + 3\n\n self.algo(out, counts, values, labels)\n assert np.allclose(out, expected_out, self.rtol)\n tm.assert_numpy_array_equal(counts, expected_counts)\n\n def test_group_var_generic_1d_flat_labels(self):\n prng = np.random.default_rng(2)\n\n out = (np.nan * np.ones((1, 1))).astype(self.dtype)\n counts = np.zeros(1, dtype="int64")\n values = 10 * prng.random((5, 1)).astype(self.dtype)\n labels = np.zeros(5, dtype="intp")\n\n expected_out = np.array([[values.std(ddof=1) ** 2]])\n expected_counts = counts + 5\n\n self.algo(out, counts, values, labels)\n\n assert np.allclose(out, expected_out, self.rtol)\n tm.assert_numpy_array_equal(counts, expected_counts)\n\n def test_group_var_generic_2d_all_finite(self):\n prng = np.random.default_rng(2)\n\n out = (np.nan * np.ones((5, 2))).astype(self.dtype)\n counts = np.zeros(5, dtype="int64")\n values = 10 * prng.random((10, 2)).astype(self.dtype)\n labels = np.tile(np.arange(5), (2,)).astype("intp")\n\n expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2\n expected_counts = counts + 2\n\n self.algo(out, counts, values, labels)\n assert np.allclose(out, expected_out, self.rtol)\n tm.assert_numpy_array_equal(counts, expected_counts)\n\n def test_group_var_generic_2d_some_nan(self):\n prng = np.random.default_rng(2)\n\n out = (np.nan * np.ones((5, 2))).astype(self.dtype)\n counts = np.zeros(5, dtype="int64")\n values = 10 * prng.random((10, 2)).astype(self.dtype)\n values[:, 1] = np.nan\n labels = np.tile(np.arange(5), (2,)).astype("intp")\n\n expected_out = np.vstack(\n [\n values[:, 0].reshape(5, 2, order="F").std(ddof=1, axis=1) ** 2,\n np.nan * np.ones(5),\n ]\n ).T.astype(self.dtype)\n expected_counts = counts + 2\n\n self.algo(out, counts, values, labels)\n tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)\n tm.assert_numpy_array_equal(counts, expected_counts)\n\n def test_group_var_constant(self):\n # Regression test from GH 10448.\n\n out = np.array([[np.nan]], dtype=self.dtype)\n counts = np.array([0], dtype="int64")\n values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)\n labels = np.zeros(3, dtype="intp")\n\n self.algo(out, counts, values, labels)\n\n assert counts[0] == 3\n assert out[0, 0] >= 0\n tm.assert_almost_equal(out[0, 0], 0.0)\n\n\nclass TestGroupVarFloat64(GroupVarTestMixin):\n __test__ = True\n\n algo = staticmethod(group_var)\n dtype = np.float64\n rtol = 1e-5\n\n def test_group_var_large_inputs(self):\n prng = np.random.default_rng(2)\n\n out = np.array([[np.nan]], dtype=self.dtype)\n counts = np.array([0], dtype="int64")\n values = (prng.random(10**6) + 10**12).astype(self.dtype)\n values.shape = (10**6, 1)\n labels = np.zeros(10**6, dtype="intp")\n\n self.algo(out, counts, values, labels)\n\n assert counts[0] == 10**6\n tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)\n\n\nclass TestGroupVarFloat32(GroupVarTestMixin):\n __test__ = True\n\n algo = staticmethod(group_var)\n dtype = np.float32\n rtol = 1e-2\n\n\n@pytest.mark.parametrize("dtype", ["float32", "float64"])\ndef test_group_ohlc(dtype):\n obj = np.array(np.random.default_rng(2).standard_normal(20), dtype=dtype)\n\n bins = np.array([6, 12, 20])\n out = np.zeros((3, 4), dtype)\n counts = np.zeros(len(out), dtype=np.int64)\n labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))\n\n func = libgroupby.group_ohlc\n func(out, counts, obj[:, None], labels)\n\n def _ohlc(group):\n if isna(group).all():\n return np.repeat(np.nan, 4)\n return [group[0], group.max(), group.min(), group[-1]]\n\n expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])\n\n tm.assert_almost_equal(out, expected)\n tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))\n\n obj[:6] = np.nan\n func(out, counts, obj[:, None], labels)\n expected[0] = np.nan\n tm.assert_almost_equal(out, expected)\n\n\ndef _check_cython_group_transform_cumulative(pd_op, np_op, dtype):\n """\n Check a group transform that executes a cumulative function.\n\n Parameters\n ----------\n pd_op : callable\n The pandas cumulative function.\n np_op : callable\n The analogous one in NumPy.\n dtype : type\n The specified dtype of the data.\n """\n is_datetimelike = False\n\n data = np.array([[1], [2], [3], [4]], dtype=dtype)\n answer = np.zeros_like(data)\n\n labels = np.array([0, 0, 0, 0], dtype=np.intp)\n ngroups = 1\n pd_op(answer, data, labels, ngroups, is_datetimelike)\n\n tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)\n\n\n@pytest.mark.parametrize("np_dtype", ["int64", "uint64", "float32", "float64"])\ndef test_cython_group_transform_cumsum(np_dtype):\n # see gh-4095\n dtype = np.dtype(np_dtype).type\n pd_op, np_op = group_cumsum, np.cumsum\n _check_cython_group_transform_cumulative(pd_op, np_op, dtype)\n\n\ndef test_cython_group_transform_cumprod():\n # see gh-4095\n dtype = np.float64\n pd_op, np_op = group_cumprod, np.cumprod\n _check_cython_group_transform_cumulative(pd_op, np_op, dtype)\n\n\ndef test_cython_group_transform_algos():\n # see gh-4095\n is_datetimelike = False\n\n # with nans\n labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)\n ngroups = 1\n\n data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")\n actual = np.zeros_like(data)\n actual.fill(np.nan)\n group_cumprod(actual, data, labels, ngroups, is_datetimelike)\n expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")\n tm.assert_numpy_array_equal(actual[:, 0], expected)\n\n actual = np.zeros_like(data)\n actual.fill(np.nan)\n group_cumsum(actual, data, labels, ngroups, is_datetimelike)\n expected = np.array([1, 3, 6, np.nan, 10], dtype="float64")\n tm.assert_numpy_array_equal(actual[:, 0], expected)\n\n # timedelta\n is_datetimelike = True\n data = np.array([np.timedelta64(1, "ns")] * 5, dtype="m8[ns]")[:, None]\n actual = np.zeros_like(data, dtype="int64")\n group_cumsum(actual, data.view("int64"), labels, ngroups, is_datetimelike)\n expected = np.array(\n [\n np.timedelta64(1, "ns"),\n np.timedelta64(2, "ns"),\n np.timedelta64(3, "ns"),\n np.timedelta64(4, "ns"),\n np.timedelta64(5, "ns"),\n ]\n )\n tm.assert_numpy_array_equal(actual[:, 0].view("m8[ns]"), expected)\n\n\ndef test_cython_group_mean_datetimelike():\n actual = np.zeros(shape=(1, 1), dtype="float64")\n counts = np.array([0], dtype="int64")\n data = (\n np.array(\n [np.timedelta64(2, "ns"), np.timedelta64(4, "ns"), np.timedelta64("NaT")],\n dtype="m8[ns]",\n )[:, None]\n .view("int64")\n .astype("float64")\n )\n labels = np.zeros(len(data), dtype=np.intp)\n\n group_mean(actual, counts, data, labels, is_datetimelike=True)\n\n tm.assert_numpy_array_equal(actual[:, 0], np.array([3], dtype="float64"))\n\n\ndef test_cython_group_mean_wrong_min_count():\n actual = np.zeros(shape=(1, 1), dtype="float64")\n counts = np.zeros(1, dtype="int64")\n data = np.zeros(1, dtype="float64")[:, None]\n labels = np.zeros(1, dtype=np.intp)\n\n with pytest.raises(AssertionError, match="min_count"):\n group_mean(actual, counts, data, labels, is_datetimelike=True, min_count=0)\n\n\ndef test_cython_group_mean_not_datetimelike_but_has_NaT_values():\n actual = np.zeros(shape=(1, 1), dtype="float64")\n counts = np.array([0], dtype="int64")\n data = (\n np.array(\n [np.timedelta64("NaT"), np.timedelta64("NaT")],\n dtype="m8[ns]",\n )[:, None]\n .view("int64")\n .astype("float64")\n )\n labels = np.zeros(len(data), dtype=np.intp)\n\n group_mean(actual, counts, data, labels, is_datetimelike=False)\n\n tm.assert_numpy_array_equal(\n actual[:, 0], np.array(np.divide(np.add(data[0], data[1]), 2), dtype="float64")\n )\n\n\ndef test_cython_group_mean_Inf_at_begining_and_end():\n # GH 50367\n actual = np.array([[np.nan, np.nan], [np.nan, np.nan]], dtype="float64")\n counts = np.array([0, 0], dtype="int64")\n data = np.array(\n [[np.inf, 1.0], [1.0, 2.0], [2.0, 3.0], [3.0, 4.0], [4.0, 5.0], [5, np.inf]],\n dtype="float64",\n )\n labels = np.array([0, 1, 0, 1, 0, 1], dtype=np.intp)\n\n group_mean(actual, counts, data, labels, is_datetimelike=False)\n\n expected = np.array([[np.inf, 3], [3, np.inf]], dtype="float64")\n\n tm.assert_numpy_array_equal(\n actual,\n expected,\n )\n\n\n@pytest.mark.parametrize(\n "values, out",\n [\n ([[np.inf], [np.inf], [np.inf]], [[np.inf], [np.inf]]),\n ([[np.inf], [np.inf], [-np.inf]], [[np.inf], [np.nan]]),\n ([[np.inf], [-np.inf], [np.inf]], [[np.inf], [np.nan]]),\n ([[np.inf], [-np.inf], [-np.inf]], [[np.inf], [-np.inf]]),\n ],\n)\ndef test_cython_group_sum_Inf_at_begining_and_end(values, out):\n # GH #53606\n actual = np.array([[np.nan], [np.nan]], dtype="float64")\n counts = np.array([0, 0], dtype="int64")\n data = np.array(values, dtype="float64")\n labels = np.array([0, 1, 1], dtype=np.intp)\n\n group_sum(actual, counts, data, labels, None, is_datetimelike=False)\n\n expected = np.array(out, dtype="float64")\n\n tm.assert_numpy_array_equal(\n actual,\n expected,\n )\n | .venv\Lib\site-packages\pandas\tests\groupby\test_libgroupby.py | test_libgroupby.py | Python | 10,457 | 0.95 | 0.069486 | 0.031873 | awesome-app | 304 | 2025-03-31T14:44:36.872311 | BSD-3-Clause | true | e2b526453f25cdfa3c4375894bb0440f |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n date_range,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize("func", ["ffill", "bfill"])\ndef test_groupby_column_index_name_lost_fill_funcs(func):\n # GH: 29764 groupby loses index sometimes\n df = DataFrame(\n [[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]],\n columns=Index(["type", "a", "b"], name="idx"),\n )\n df_grouped = df.groupby(["type"])[["a", "b"]]\n result = getattr(df_grouped, func)().columns\n expected = Index(["a", "b"], name="idx")\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["ffill", "bfill"])\ndef test_groupby_fill_duplicate_column_names(func):\n # GH: 25610 ValueError with duplicate column names\n df1 = DataFrame({"field1": [1, 3, 4], "field2": [1, 3, 4]})\n df2 = DataFrame({"field1": [1, np.nan, 4]})\n df_grouped = pd.concat([df1, df2], axis=1).groupby(by=["field2"])\n expected = DataFrame(\n [[1, 1.0], [3, np.nan], [4, 4.0]], columns=["field1", "field1"]\n )\n result = getattr(df_grouped, func)()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_ffill_missing_arguments():\n # GH 14955\n df = DataFrame({"a": [1, 2], "b": [1, 1]})\n msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with pytest.raises(ValueError, match="Must specify a fill"):\n df.groupby("b").fillna()\n\n\n@pytest.mark.parametrize(\n "method, expected", [("ffill", [None, "a", "a"]), ("bfill", ["a", "a", None])]\n)\ndef test_fillna_with_string_dtype(method, expected):\n # GH 40250\n df = DataFrame({"a": pd.array([None, "a", None], dtype="string"), "b": [0, 0, 0]})\n grp = df.groupby("b")\n msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grp.fillna(method=method)\n expected = DataFrame({"a": pd.array(expected, dtype="string")})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_fill_consistency():\n # GH9221\n # pass thru keyword arguments to the generated wrapper\n # are set if the passed kw is None (only)\n df = DataFrame(\n index=pd.MultiIndex.from_product(\n [["value1", "value2"], date_range("2014-01-01", "2014-01-06")]\n ),\n columns=Index(["1", "2"], name="id"),\n )\n df["1"] = [\n np.nan,\n 1,\n np.nan,\n np.nan,\n 11,\n np.nan,\n np.nan,\n 2,\n np.nan,\n np.nan,\n 22,\n np.nan,\n ]\n df["2"] = [\n np.nan,\n 3,\n np.nan,\n np.nan,\n 33,\n np.nan,\n np.nan,\n 4,\n np.nan,\n np.nan,\n 44,\n np.nan,\n ]\n\n msg = "The 'axis' keyword in DataFrame.groupby is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df.groupby(level=0, axis=0).fillna(method="ffill")\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["ffill", "bfill"])\n@pytest.mark.parametrize("dropna", [True, False])\n@pytest.mark.parametrize("has_nan_group", [True, False])\ndef test_ffill_handles_nan_groups(dropna, method, has_nan_group):\n # GH 34725\n\n df_without_nan_rows = DataFrame([(1, 0.1), (2, 0.2)])\n\n ridx = [-1, 0, -1, -1, 1, -1]\n df = df_without_nan_rows.reindex(ridx).reset_index(drop=True)\n\n group_b = np.nan if has_nan_group else "b"\n df["group_col"] = pd.Series(["a"] * 3 + [group_b] * 3)\n\n grouped = df.groupby(by="group_col", dropna=dropna)\n result = getattr(grouped, method)(limit=None)\n\n expected_rows = {\n ("ffill", True, True): [-1, 0, 0, -1, -1, -1],\n ("ffill", True, False): [-1, 0, 0, -1, 1, 1],\n ("ffill", False, True): [-1, 0, 0, -1, 1, 1],\n ("ffill", False, False): [-1, 0, 0, -1, 1, 1],\n ("bfill", True, True): [0, 0, -1, -1, -1, -1],\n ("bfill", True, False): [0, 0, -1, 1, 1, -1],\n ("bfill", False, True): [0, 0, -1, 1, 1, -1],\n ("bfill", False, False): [0, 0, -1, 1, 1, -1],\n }\n\n ridx = expected_rows.get((method, dropna, has_nan_group))\n expected = df_without_nan_rows.reindex(ridx).reset_index(drop=True)\n # columns are a 'take' on df.columns, which are object dtype\n expected.columns = expected.columns.astype(object)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("min_count, value", [(2, np.nan), (-1, 1.0)])\n@pytest.mark.parametrize("func", ["first", "last", "max", "min"])\ndef test_min_count(func, min_count, value):\n # GH#37821\n df = DataFrame({"a": [1] * 3, "b": [1, np.nan, np.nan], "c": [np.nan] * 3})\n result = getattr(df.groupby("a"), func)(min_count=min_count)\n expected = DataFrame({"b": [value], "c": [np.nan]}, index=Index([1], name="a"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_indices_with_missing():\n # GH 9304\n df = DataFrame({"a": [1, 1, np.nan], "b": [2, 3, 4], "c": [5, 6, 7]})\n g = df.groupby(["a", "b"])\n result = g.indices\n expected = {(1.0, 2): np.array([0]), (1.0, 3): np.array([1])}\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\groupby\test_missing.py | test_missing.py | Python | 5,358 | 0.95 | 0.06135 | 0.080292 | awesome-app | 344 | 2024-02-11T18:54:26.490033 | Apache-2.0 | true | 464d11b619a10653fd49036486c11635 |
import pytest\n\nfrom pandas.compat import is_platform_arm\n\nfrom pandas import (\n DataFrame,\n Series,\n option_context,\n)\nimport pandas._testing as tm\nfrom pandas.util.version import Version\n\npytestmark = [pytest.mark.single_cpu]\n\nnumba = pytest.importorskip("numba")\npytestmark.append(\n pytest.mark.skipif(\n Version(numba.__version__) == Version("0.61") and is_platform_arm(),\n reason=f"Segfaults on ARM platforms with numba {numba.__version__}",\n )\n)\n\n\n@pytest.mark.filterwarnings("ignore")\n# Filter warnings when parallel=True and the function can't be parallelized by Numba\nclass TestEngine:\n def test_cython_vs_numba_frame(\n self, sort, nogil, parallel, nopython, numba_supported_reductions\n ):\n func, kwargs = numba_supported_reductions\n df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n gb = df.groupby("a", sort=sort)\n result = getattr(gb, func)(\n engine="numba", engine_kwargs=engine_kwargs, **kwargs\n )\n expected = getattr(gb, func)(**kwargs)\n tm.assert_frame_equal(result, expected)\n\n def test_cython_vs_numba_getitem(\n self, sort, nogil, parallel, nopython, numba_supported_reductions\n ):\n func, kwargs = numba_supported_reductions\n df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n gb = df.groupby("a", sort=sort)["c"]\n result = getattr(gb, func)(\n engine="numba", engine_kwargs=engine_kwargs, **kwargs\n )\n expected = getattr(gb, func)(**kwargs)\n tm.assert_series_equal(result, expected)\n\n def test_cython_vs_numba_series(\n self, sort, nogil, parallel, nopython, numba_supported_reductions\n ):\n func, kwargs = numba_supported_reductions\n ser = Series(range(3), index=[1, 2, 1], name="foo")\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n gb = ser.groupby(level=0, sort=sort)\n result = getattr(gb, func)(\n engine="numba", engine_kwargs=engine_kwargs, **kwargs\n )\n expected = getattr(gb, func)(**kwargs)\n tm.assert_series_equal(result, expected)\n\n def test_as_index_false_unsupported(self, numba_supported_reductions):\n func, kwargs = numba_supported_reductions\n df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})\n gb = df.groupby("a", as_index=False)\n with pytest.raises(NotImplementedError, match="as_index=False"):\n getattr(gb, func)(engine="numba", **kwargs)\n\n def test_axis_1_unsupported(self, numba_supported_reductions):\n func, kwargs = numba_supported_reductions\n df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})\n gb = df.groupby("a", axis=1)\n with pytest.raises(NotImplementedError, match="axis=1"):\n getattr(gb, func)(engine="numba", **kwargs)\n\n def test_no_engine_doesnt_raise(self):\n # GH55520\n df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})\n gb = df.groupby("a")\n # Make sure behavior of functions w/out engine argument don't raise\n # when the global use_numba option is set\n with option_context("compute.use_numba", True):\n res = gb.agg({"b": "first"})\n expected = gb.agg({"b": "first"})\n tm.assert_frame_equal(res, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_numba.py | test_numba.py | Python | 3,558 | 0.95 | 0.089888 | 0.051282 | vue-tools | 914 | 2023-08-16T11:49:20.628813 | BSD-3-Clause | true | 521a6b5e203cefd100a30376fe02e052 |
import re\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import lib\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.groupby import get_groupby_method_args\n\n\nclass TestNumericOnly:\n # make sure that we are passing thru kwargs to our agg functions\n\n @pytest.fixture\n def df(self):\n # GH3668\n # GH5724\n df = DataFrame(\n {\n "group": [1, 1, 2],\n "int": [1, 2, 3],\n "float": [4.0, 5.0, 6.0],\n "string": Series(["a", "b", "c"], dtype="str"),\n "object": Series(["a", "b", "c"], dtype=object),\n "category_string": Series(list("abc")).astype("category"),\n "category_int": [7, 8, 9],\n "datetime": date_range("20130101", periods=3),\n "datetimetz": date_range("20130101", periods=3, tz="US/Eastern"),\n "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),\n },\n columns=[\n "group",\n "int",\n "float",\n "string",\n "object",\n "category_string",\n "category_int",\n "datetime",\n "datetimetz",\n "timedelta",\n ],\n )\n return df\n\n @pytest.mark.parametrize("method", ["mean", "median"])\n def test_averages(self, df, method):\n # mean / median\n expected_columns_numeric = Index(["int", "float", "category_int"])\n\n gb = df.groupby("group")\n expected = DataFrame(\n {\n "category_int": [7.5, 9],\n "float": [4.5, 6.0],\n "timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],\n "int": [1.5, 3],\n "datetime": [\n Timestamp("2013-01-01 12:00:00"),\n Timestamp("2013-01-03 00:00:00"),\n ],\n "datetimetz": [\n Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),\n Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),\n ],\n },\n index=Index([1, 2], name="group"),\n columns=[\n "int",\n "float",\n "category_int",\n ],\n )\n\n result = getattr(gb, method)(numeric_only=True)\n tm.assert_frame_equal(result.reindex_like(expected), expected)\n\n expected_columns = expected.columns\n\n self._check(df, method, expected_columns, expected_columns_numeric)\n\n @pytest.mark.parametrize("method", ["min", "max"])\n def test_extrema(self, df, method):\n # TODO: min, max *should* handle\n # categorical (ordered) dtype\n\n expected_columns = Index(\n [\n "int",\n "float",\n "string",\n "category_int",\n "datetime",\n "datetimetz",\n "timedelta",\n ]\n )\n expected_columns_numeric = expected_columns\n\n self._check(df, method, expected_columns, expected_columns_numeric)\n\n @pytest.mark.parametrize("method", ["first", "last"])\n def test_first_last(self, df, method):\n expected_columns = Index(\n [\n "int",\n "float",\n "string",\n "object",\n "category_string",\n "category_int",\n "datetime",\n "datetimetz",\n "timedelta",\n ]\n )\n expected_columns_numeric = expected_columns\n\n self._check(df, method, expected_columns, expected_columns_numeric)\n\n @pytest.mark.parametrize("method", ["sum", "cumsum"])\n def test_sum_cumsum(self, df, method):\n expected_columns_numeric = Index(["int", "float", "category_int"])\n expected_columns = Index(\n ["int", "float", "string", "category_int", "timedelta"]\n )\n if method == "cumsum":\n # cumsum loses string\n expected_columns = Index(["int", "float", "category_int", "timedelta"])\n\n self._check(df, method, expected_columns, expected_columns_numeric)\n\n @pytest.mark.parametrize("method", ["prod", "cumprod"])\n def test_prod_cumprod(self, df, method):\n expected_columns = Index(["int", "float", "category_int"])\n expected_columns_numeric = expected_columns\n\n self._check(df, method, expected_columns, expected_columns_numeric)\n\n @pytest.mark.parametrize("method", ["cummin", "cummax"])\n def test_cummin_cummax(self, df, method):\n # like min, max, but don't include strings\n expected_columns = Index(\n ["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]\n )\n\n # GH#15561: numeric_only=False set by default like min/max\n expected_columns_numeric = expected_columns\n\n self._check(df, method, expected_columns, expected_columns_numeric)\n\n def _check(self, df, method, expected_columns, expected_columns_numeric):\n gb = df.groupby("group")\n\n # object dtypes for transformations are not implemented in Cython and\n # have no Python fallback\n exception = (\n (NotImplementedError, TypeError) if method.startswith("cum") else TypeError\n )\n\n if method in ("min", "max", "cummin", "cummax", "cumsum", "cumprod"):\n # The methods default to numeric_only=False and raise TypeError\n msg = "|".join(\n [\n "Categorical is not ordered",\n f"Cannot perform {method} with non-ordered Categorical",\n re.escape(f"agg function failed [how->{method},dtype->object]"),\n # cumsum/cummin/cummax/cumprod\n "function is not implemented for this dtype",\n f"dtype 'str' does not support operation '{method}'",\n ]\n )\n with pytest.raises(exception, match=msg):\n getattr(gb, method)()\n elif method in ("sum", "mean", "median", "prod"):\n msg = "|".join(\n [\n "category type does not support sum operations",\n re.escape(f"agg function failed [how->{method},dtype->object]"),\n re.escape(f"agg function failed [how->{method},dtype->string]"),\n f"dtype 'str' does not support operation '{method}'",\n ]\n )\n with pytest.raises(exception, match=msg):\n getattr(gb, method)()\n else:\n result = getattr(gb, method)()\n tm.assert_index_equal(result.columns, expected_columns_numeric)\n\n if method not in ("first", "last"):\n msg = "|".join(\n [\n "Categorical is not ordered",\n "category type does not support",\n "function is not implemented for this dtype",\n f"Cannot perform {method} with non-ordered Categorical",\n re.escape(f"agg function failed [how->{method},dtype->object]"),\n re.escape(f"agg function failed [how->{method},dtype->string]"),\n f"dtype 'str' does not support operation '{method}'",\n ]\n )\n with pytest.raises(exception, match=msg):\n getattr(gb, method)(numeric_only=False)\n else:\n result = getattr(gb, method)(numeric_only=False)\n tm.assert_index_equal(result.columns, expected_columns)\n\n\n@pytest.mark.parametrize("numeric_only", [True, False, None])\ndef test_axis1_numeric_only(request, groupby_func, numeric_only, using_infer_string):\n if groupby_func in ("idxmax", "idxmin"):\n pytest.skip("idxmax and idx_min tested in test_idxmin_idxmax_axis1")\n if groupby_func in ("corrwith", "skew"):\n msg = "GH#47723 groupby.corrwith and skew do not correctly implement axis=1"\n request.applymarker(pytest.mark.xfail(reason=msg))\n\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "B", "C", "D"]\n )\n df["E"] = "x"\n groups = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4]\n gb = df.groupby(groups)\n method = getattr(gb, groupby_func)\n args = get_groupby_method_args(groupby_func, df)\n kwargs = {"axis": 1}\n if numeric_only is not None:\n # when numeric_only is None we don't pass any argument\n kwargs["numeric_only"] = numeric_only\n\n # Functions without numeric_only and axis args\n no_args = ("cumprod", "cumsum", "diff", "fillna", "pct_change", "rank", "shift")\n # Functions with axis args\n has_axis = (\n "cumprod",\n "cumsum",\n "diff",\n "pct_change",\n "rank",\n "shift",\n "cummax",\n "cummin",\n "idxmin",\n "idxmax",\n "fillna",\n )\n warn_msg = f"DataFrameGroupBy.{groupby_func} with axis=1 is deprecated"\n if numeric_only is not None and groupby_func in no_args:\n msg = "got an unexpected keyword argument 'numeric_only'"\n if groupby_func in ["cumprod", "cumsum"]:\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n method(*args, **kwargs)\n else:\n with pytest.raises(TypeError, match=msg):\n method(*args, **kwargs)\n elif groupby_func not in has_axis:\n msg = "got an unexpected keyword argument 'axis'"\n with pytest.raises(TypeError, match=msg):\n method(*args, **kwargs)\n # fillna and shift are successful even on object dtypes\n elif (numeric_only is None or not numeric_only) and groupby_func not in (\n "fillna",\n "shift",\n ):\n msgs = (\n # cummax, cummin, rank\n "not supported between instances of",\n # cumprod\n "can't multiply sequence by non-int of type 'float'",\n # cumsum, diff, pct_change\n "unsupported operand type",\n "has no kernel",\n "operation 'sub' not supported for dtype 'str' with dtype 'float64'",\n )\n if using_infer_string:\n pa = pytest.importorskip("pyarrow")\n\n errs = (TypeError, pa.lib.ArrowNotImplementedError)\n else:\n errs = TypeError\n with pytest.raises(errs, match=f"({'|'.join(msgs)})"):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n method(*args, **kwargs)\n else:\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n result = method(*args, **kwargs)\n\n df_expected = df.drop(columns="E").T if numeric_only else df.T\n expected = getattr(df_expected, groupby_func)(*args).T\n if groupby_func == "shift" and not numeric_only:\n # shift with axis=1 leaves the leftmost column as numeric\n # but transposing for expected gives us object dtype\n expected = expected.astype(float)\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "kernel, has_arg",\n [\n ("all", False),\n ("any", False),\n ("bfill", False),\n ("corr", True),\n ("corrwith", True),\n ("cov", True),\n ("cummax", True),\n ("cummin", True),\n ("cumprod", True),\n ("cumsum", True),\n ("diff", False),\n ("ffill", False),\n ("fillna", False),\n ("first", True),\n ("idxmax", True),\n ("idxmin", True),\n ("last", True),\n ("max", True),\n ("mean", True),\n ("median", True),\n ("min", True),\n ("nth", False),\n ("nunique", False),\n ("pct_change", False),\n ("prod", True),\n ("quantile", True),\n ("sem", True),\n ("skew", True),\n ("std", True),\n ("sum", True),\n ("var", True),\n ],\n)\n@pytest.mark.parametrize("numeric_only", [True, False, lib.no_default])\n@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])\ndef test_numeric_only(kernel, has_arg, numeric_only, keys):\n # GH#46072\n # drops_nuisance: Whether the op drops nuisance columns even when numeric_only=False\n # has_arg: Whether the op has a numeric_only arg\n df = DataFrame({"a1": [1, 1], "a2": [2, 2], "a3": [5, 6], "b": 2 * [object]})\n\n args = get_groupby_method_args(kernel, df)\n kwargs = {} if numeric_only is lib.no_default else {"numeric_only": numeric_only}\n\n gb = df.groupby(keys)\n method = getattr(gb, kernel)\n if has_arg and numeric_only is True:\n # Cases where b does not appear in the result\n result = method(*args, **kwargs)\n assert "b" not in result.columns\n elif (\n # kernels that work on any dtype and have numeric_only arg\n kernel in ("first", "last")\n or (\n # kernels that work on any dtype and don't have numeric_only arg\n kernel in ("any", "all", "bfill", "ffill", "fillna", "nth", "nunique")\n and numeric_only is lib.no_default\n )\n ):\n warn = FutureWarning if kernel == "fillna" else None\n msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n result = method(*args, **kwargs)\n assert "b" in result.columns\n elif has_arg:\n assert numeric_only is not True\n # kernels that are successful on any dtype were above; this will fail\n\n # object dtypes for transformations are not implemented in Cython and\n # have no Python fallback\n exception = NotImplementedError if kernel.startswith("cum") else TypeError\n\n msg = "|".join(\n [\n "not allowed for this dtype",\n "cannot be performed against 'object' dtypes",\n # On PY39 message is "a number"; on PY310 and after is "a real number"\n "must be a string or a.* number",\n "unsupported operand type",\n "function is not implemented for this dtype",\n re.escape(f"agg function failed [how->{kernel},dtype->object]"),\n ]\n )\n if kernel == "quantile":\n msg = "dtype 'object' does not support operation 'quantile'"\n elif kernel == "idxmin":\n msg = "'<' not supported between instances of 'type' and 'type'"\n elif kernel == "idxmax":\n msg = "'>' not supported between instances of 'type' and 'type'"\n with pytest.raises(exception, match=msg):\n method(*args, **kwargs)\n elif not has_arg and numeric_only is not lib.no_default:\n with pytest.raises(\n TypeError, match="got an unexpected keyword argument 'numeric_only'"\n ):\n method(*args, **kwargs)\n else:\n assert kernel in ("diff", "pct_change")\n assert numeric_only is lib.no_default\n # Doesn't have numeric_only argument and fails on nuisance columns\n with pytest.raises(TypeError, match=r"unsupported operand type"):\n method(*args, **kwargs)\n\n\n@pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")\n@pytest.mark.parametrize("dtype", [bool, int, float, object])\ndef test_deprecate_numeric_only_series(dtype, groupby_func, request):\n # GH#46560\n grouper = [0, 0, 1]\n\n ser = Series([1, 0, 0], dtype=dtype)\n gb = ser.groupby(grouper)\n\n if groupby_func == "corrwith":\n # corrwith is not implemented on SeriesGroupBy\n assert not hasattr(gb, groupby_func)\n return\n\n method = getattr(gb, groupby_func)\n\n expected_ser = Series([1, 0, 0])\n expected_gb = expected_ser.groupby(grouper)\n expected_method = getattr(expected_gb, groupby_func)\n\n args = get_groupby_method_args(groupby_func, ser)\n\n fails_on_numeric_object = (\n "corr",\n "cov",\n "cummax",\n "cummin",\n "cumprod",\n "cumsum",\n "quantile",\n )\n # ops that give an object result on object input\n obj_result = (\n "first",\n "last",\n "nth",\n "bfill",\n "ffill",\n "shift",\n "sum",\n "diff",\n "pct_change",\n "var",\n "mean",\n "median",\n "min",\n "max",\n "prod",\n "skew",\n )\n\n # Test default behavior; kernels that fail may be enabled in the future but kernels\n # that succeed should not be allowed to fail (without deprecation, at least)\n if groupby_func in fails_on_numeric_object and dtype is object:\n if groupby_func == "quantile":\n msg = "dtype 'object' does not support operation 'quantile'"\n else:\n msg = "is not supported for object dtype"\n warn = FutureWarning if groupby_func == "fillna" else None\n warn_msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=warn_msg):\n with pytest.raises(TypeError, match=msg):\n method(*args)\n elif dtype is object:\n warn = FutureWarning if groupby_func == "fillna" else None\n warn_msg = "SeriesGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=warn_msg):\n result = method(*args)\n with tm.assert_produces_warning(warn, match=warn_msg):\n expected = expected_method(*args)\n if groupby_func in obj_result:\n expected = expected.astype(object)\n tm.assert_series_equal(result, expected)\n\n has_numeric_only = (\n "first",\n "last",\n "max",\n "mean",\n "median",\n "min",\n "prod",\n "quantile",\n "sem",\n "skew",\n "std",\n "sum",\n "var",\n "cummax",\n "cummin",\n "cumprod",\n "cumsum",\n )\n if groupby_func not in has_numeric_only:\n msg = "got an unexpected keyword argument 'numeric_only'"\n with pytest.raises(TypeError, match=msg):\n method(*args, numeric_only=True)\n elif dtype is object:\n msg = "|".join(\n [\n "SeriesGroupBy.sem called with numeric_only=True and dtype object",\n "Series.skew does not allow numeric_only=True with non-numeric",\n "cum(sum|prod|min|max) is not supported for object dtype",\n r"Cannot use numeric_only=True with SeriesGroupBy\..* and non-numeric",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n method(*args, numeric_only=True)\n elif dtype == bool and groupby_func == "quantile":\n msg = "Allowing bool dtype in SeriesGroupBy.quantile"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#51424\n result = method(*args, numeric_only=True)\n expected = method(*args, numeric_only=False)\n tm.assert_series_equal(result, expected)\n else:\n result = method(*args, numeric_only=True)\n expected = method(*args, numeric_only=False)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_numeric_only.py | test_numeric_only.py | Python | 19,188 | 0.95 | 0.103383 | 0.080913 | vue-tools | 554 | 2024-01-22T19:24:44.753976 | BSD-3-Clause | true | 87c9f398713b384340b719db7b138ff1 |
import numpy as np\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n)\nimport pandas._testing as tm\n\n\ndef test_pipe():\n # Test the pipe method of DataFrameGroupBy.\n # Issue #17871\n\n random_state = np.random.default_rng(2)\n\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": random_state.standard_normal(8),\n "C": random_state.standard_normal(8),\n }\n )\n\n def f(dfgb):\n return dfgb.B.max() - dfgb.C.min().min()\n\n def square(srs):\n return srs**2\n\n # Note that the transformations are\n # GroupBy -> Series\n # Series -> Series\n # This then chains the GroupBy.pipe and the\n # NDFrame.pipe methods\n result = df.groupby("A").pipe(f).pipe(square)\n\n index = Index(["bar", "foo"], name="A")\n expected = pd.Series([3.749306591013693, 6.717707873081384], name="B", index=index)\n\n tm.assert_series_equal(expected, result)\n\n\ndef test_pipe_args():\n # Test passing args to the pipe method of DataFrameGroupBy.\n # Issue #17871\n\n df = DataFrame(\n {\n "group": ["A", "A", "B", "B", "C"],\n "x": [1.0, 2.0, 3.0, 2.0, 5.0],\n "y": [10.0, 100.0, 1000.0, -100.0, -1000.0],\n }\n )\n\n def f(dfgb, arg1):\n filtered = dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False)\n return filtered.groupby("group")\n\n def g(dfgb, arg2):\n return dfgb.sum() / dfgb.sum().sum() + arg2\n\n def h(df, arg3):\n return df.x + df.y - arg3\n\n result = df.groupby("group").pipe(f, 0).pipe(g, 10).pipe(h, 100)\n\n # Assert the results here\n index = Index(["A", "B"], name="group")\n expected = pd.Series([-79.5160891089, -78.4839108911], index=index)\n\n tm.assert_series_equal(result, expected)\n\n # test SeriesGroupby.pipe\n ser = pd.Series([1, 1, 2, 2, 3, 3])\n result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())\n\n expected = pd.Series([4, 8, 12], index=Index([1, 2, 3], dtype=np.int64))\n\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_pipe.py | test_pipe.py | Python | 2,082 | 0.95 | 0.0875 | 0.189655 | vue-tools | 439 | 2024-12-03T13:22:01.269130 | MIT | true | c2f27b24082937f2240a3ef131a291cf |
# Only tests that raise an error and have no better location should go here.\n# Tests for specific groupby methods should go in their respective\n# test file.\n\nimport datetime\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n DataFrame,\n Grouper,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.tests.groupby import get_groupby_method_args\n\n\n@pytest.fixture(\n params=[\n "a",\n ["a"],\n ["a", "b"],\n Grouper(key="a"),\n lambda x: x % 2,\n [0, 0, 0, 1, 2, 2, 2, 3, 3],\n np.array([0, 0, 0, 1, 2, 2, 2, 3, 3]),\n dict(zip(range(9), [0, 0, 0, 1, 2, 2, 2, 3, 3])),\n Series([1, 1, 1, 1, 1, 2, 2, 2, 2]),\n [Series([1, 1, 1, 1, 1, 2, 2, 2, 2]), Series([3, 3, 4, 4, 4, 4, 4, 3, 3])],\n ]\n)\ndef by(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef groupby_series(request):\n return request.param\n\n\n@pytest.fixture\ndef df_with_string_col():\n df = DataFrame(\n {\n "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],\n "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],\n "c": range(9),\n "d": list("xyzwtyuio"),\n }\n )\n return df\n\n\n@pytest.fixture\ndef df_with_datetime_col():\n df = DataFrame(\n {\n "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],\n "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],\n "c": range(9),\n "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),\n }\n )\n return df\n\n\n@pytest.fixture\ndef df_with_timedelta_col():\n df = DataFrame(\n {\n "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],\n "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],\n "c": range(9),\n "d": datetime.timedelta(days=1),\n }\n )\n return df\n\n\n@pytest.fixture\ndef df_with_cat_col():\n df = DataFrame(\n {\n "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],\n "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],\n "c": range(9),\n "d": Categorical(\n ["a", "a", "a", "a", "b", "b", "b", "b", "c"],\n categories=["a", "b", "c", "d"],\n ordered=True,\n ),\n }\n )\n return df\n\n\ndef _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg=""):\n warn_klass = None if warn_msg == "" else FutureWarning\n with tm.assert_produces_warning(warn_klass, match=warn_msg):\n if klass is None:\n if how == "method":\n getattr(gb, groupby_func)(*args)\n elif how == "agg":\n gb.agg(groupby_func, *args)\n else:\n gb.transform(groupby_func, *args)\n else:\n with pytest.raises(klass, match=msg):\n if how == "method":\n getattr(gb, groupby_func)(*args)\n elif how == "agg":\n gb.agg(groupby_func, *args)\n else:\n gb.transform(groupby_func, *args)\n\n\n@pytest.mark.parametrize("how", ["method", "agg", "transform"])\ndef test_groupby_raises_string(\n how, by, groupby_series, groupby_func, df_with_string_col, using_infer_string\n):\n df = df_with_string_col\n args = get_groupby_method_args(groupby_func, df)\n gb = df.groupby(by=by)\n\n if groupby_series:\n gb = gb["d"]\n\n if groupby_func == "corrwith":\n assert not hasattr(gb, "corrwith")\n return\n\n klass, msg = {\n "all": (None, ""),\n "any": (None, ""),\n "bfill": (None, ""),\n "corrwith": (TypeError, "Could not convert"),\n "count": (None, ""),\n "cumcount": (None, ""),\n "cummax": (\n (NotImplementedError, TypeError),\n "(function|cummax) is not (implemented|supported) for (this|object) dtype",\n ),\n "cummin": (\n (NotImplementedError, TypeError),\n "(function|cummin) is not (implemented|supported) for (this|object) dtype",\n ),\n "cumprod": (\n (NotImplementedError, TypeError),\n "(function|cumprod) is not (implemented|supported) for (this|object) dtype",\n ),\n "cumsum": (\n (NotImplementedError, TypeError),\n "(function|cumsum) is not (implemented|supported) for (this|object) dtype",\n ),\n "diff": (TypeError, "unsupported operand type"),\n "ffill": (None, ""),\n "fillna": (None, ""),\n "first": (None, ""),\n "idxmax": (None, ""),\n "idxmin": (None, ""),\n "last": (None, ""),\n "max": (None, ""),\n "mean": (\n TypeError,\n re.escape("agg function failed [how->mean,dtype->object]"),\n ),\n "median": (\n TypeError,\n re.escape("agg function failed [how->median,dtype->object]"),\n ),\n "min": (None, ""),\n "ngroup": (None, ""),\n "nunique": (None, ""),\n "pct_change": (TypeError, "unsupported operand type"),\n "prod": (\n TypeError,\n re.escape("agg function failed [how->prod,dtype->object]"),\n ),\n "quantile": (TypeError, "dtype 'object' does not support operation 'quantile'"),\n "rank": (None, ""),\n "sem": (ValueError, "could not convert string to float"),\n "shift": (None, ""),\n "size": (None, ""),\n "skew": (ValueError, "could not convert string to float"),\n "std": (ValueError, "could not convert string to float"),\n "sum": (None, ""),\n "var": (\n TypeError,\n re.escape("agg function failed [how->var,dtype->"),\n ),\n }[groupby_func]\n\n if using_infer_string:\n if groupby_func in [\n "prod",\n "mean",\n "median",\n "cumsum",\n "cumprod",\n "std",\n "sem",\n "var",\n "skew",\n "quantile",\n ]:\n msg = f"dtype 'str' does not support operation '{groupby_func}'"\n if groupby_func in ["sem", "std", "skew"]:\n # The object-dtype raises ValueError when trying to convert to numeric.\n klass = TypeError\n elif groupby_func == "pct_change" and df["d"].dtype.storage == "pyarrow":\n # This doesn't go through EA._groupby_op so the message isn't controlled\n # there.\n msg = "operation 'truediv' not supported for dtype 'str' with dtype 'str'"\n elif groupby_func == "diff" and df["d"].dtype.storage == "pyarrow":\n # This doesn't go through EA._groupby_op so the message isn't controlled\n # there.\n msg = "operation 'sub' not supported for dtype 'str' with dtype 'str'"\n\n elif groupby_func in ["cummin", "cummax"]:\n msg = msg.replace("object", "str")\n elif groupby_func == "corrwith":\n msg = "Cannot perform reduction 'mean' with string dtype"\n\n if groupby_func == "fillna":\n kind = "Series" if groupby_series else "DataFrame"\n warn_msg = f"{kind}GroupBy.fillna is deprecated"\n else:\n warn_msg = ""\n _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg)\n\n\n@pytest.mark.parametrize("how", ["agg", "transform"])\ndef test_groupby_raises_string_udf(how, by, groupby_series, df_with_string_col):\n df = df_with_string_col\n gb = df.groupby(by=by)\n\n if groupby_series:\n gb = gb["d"]\n\n def func(x):\n raise TypeError("Test error message")\n\n with pytest.raises(TypeError, match="Test error message"):\n getattr(gb, how)(func)\n\n\n@pytest.mark.parametrize("how", ["agg", "transform"])\n@pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])\ndef test_groupby_raises_string_np(\n how,\n by,\n groupby_series,\n groupby_func_np,\n df_with_string_col,\n using_infer_string,\n):\n # GH#50749\n df = df_with_string_col\n gb = df.groupby(by=by)\n\n if groupby_series:\n gb = gb["d"]\n\n klass, msg = {\n np.sum: (None, ""),\n np.mean: (\n TypeError,\n "agg function failed|Cannot perform reduction 'mean' with string dtype",\n ),\n }[groupby_func_np]\n\n if using_infer_string:\n if groupby_func_np is np.mean:\n klass = TypeError\n msg = "dtype 'str' does not support operation 'mean'"\n\n if groupby_series:\n warn_msg = "using SeriesGroupBy.[sum|mean]"\n else:\n warn_msg = "using DataFrameGroupBy.[sum|mean]"\n _call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg)\n\n\n@pytest.mark.parametrize("how", ["method", "agg", "transform"])\ndef test_groupby_raises_datetime(\n how, by, groupby_series, groupby_func, df_with_datetime_col\n):\n df = df_with_datetime_col\n args = get_groupby_method_args(groupby_func, df)\n gb = df.groupby(by=by)\n\n if groupby_series:\n gb = gb["d"]\n\n if groupby_func == "corrwith":\n assert not hasattr(gb, "corrwith")\n return\n\n klass, msg = {\n "all": (None, ""),\n "any": (None, ""),\n "bfill": (None, ""),\n "corrwith": (TypeError, "cannot perform __mul__ with this index type"),\n "count": (None, ""),\n "cumcount": (None, ""),\n "cummax": (None, ""),\n "cummin": (None, ""),\n "cumprod": (TypeError, "datetime64 type does not support cumprod operations"),\n "cumsum": (TypeError, "datetime64 type does not support cumsum operations"),\n "diff": (None, ""),\n "ffill": (None, ""),\n "fillna": (None, ""),\n "first": (None, ""),\n "idxmax": (None, ""),\n "idxmin": (None, ""),\n "last": (None, ""),\n "max": (None, ""),\n "mean": (None, ""),\n "median": (None, ""),\n "min": (None, ""),\n "ngroup": (None, ""),\n "nunique": (None, ""),\n "pct_change": (TypeError, "cannot perform __truediv__ with this index type"),\n "prod": (TypeError, "datetime64 type does not support prod"),\n "quantile": (None, ""),\n "rank": (None, ""),\n "sem": (None, ""),\n "shift": (None, ""),\n "size": (None, ""),\n "skew": (\n TypeError,\n "|".join(\n [\n r"dtype datetime64\[ns\] does not support reduction",\n "datetime64 type does not support skew operations",\n ]\n ),\n ),\n "std": (None, ""),\n "sum": (TypeError, "datetime64 type does not support sum operations"),\n "var": (TypeError, "datetime64 type does not support var operations"),\n }[groupby_func]\n\n if groupby_func in ["any", "all"]:\n warn_msg = f"'{groupby_func}' with datetime64 dtypes is deprecated"\n elif groupby_func == "fillna":\n kind = "Series" if groupby_series else "DataFrame"\n warn_msg = f"{kind}GroupBy.fillna is deprecated"\n else:\n warn_msg = ""\n _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg=warn_msg)\n\n\n@pytest.mark.parametrize("how", ["agg", "transform"])\ndef test_groupby_raises_datetime_udf(how, by, groupby_series, df_with_datetime_col):\n df = df_with_datetime_col\n gb = df.groupby(by=by)\n\n if groupby_series:\n gb = gb["d"]\n\n def func(x):\n raise TypeError("Test error message")\n\n with pytest.raises(TypeError, match="Test error message"):\n getattr(gb, how)(func)\n\n\n@pytest.mark.parametrize("how", ["agg", "transform"])\n@pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])\ndef test_groupby_raises_datetime_np(\n how, by, groupby_series, groupby_func_np, df_with_datetime_col\n):\n # GH#50749\n df = df_with_datetime_col\n gb = df.groupby(by=by)\n\n if groupby_series:\n gb = gb["d"]\n\n klass, msg = {\n np.sum: (TypeError, "datetime64 type does not support sum operations"),\n np.mean: (None, ""),\n }[groupby_func_np]\n\n if groupby_series:\n warn_msg = "using SeriesGroupBy.[sum|mean]"\n else:\n warn_msg = "using DataFrameGroupBy.[sum|mean]"\n _call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg)\n\n\n@pytest.mark.parametrize("func", ["prod", "cumprod", "skew", "var"])\ndef test_groupby_raises_timedelta(func, df_with_timedelta_col):\n df = df_with_timedelta_col\n gb = df.groupby(by="a")\n\n _call_and_check(\n TypeError,\n "timedelta64 type does not support .* operations",\n "method",\n gb,\n func,\n [],\n )\n\n\n@pytest.mark.parametrize("how", ["method", "agg", "transform"])\ndef test_groupby_raises_category(\n how, by, groupby_series, groupby_func, using_copy_on_write, df_with_cat_col\n):\n # GH#50749\n df = df_with_cat_col\n args = get_groupby_method_args(groupby_func, df)\n gb = df.groupby(by=by)\n\n if groupby_series:\n gb = gb["d"]\n\n if groupby_func == "corrwith":\n assert not hasattr(gb, "corrwith")\n return\n\n klass, msg = {\n "all": (None, ""),\n "any": (None, ""),\n "bfill": (None, ""),\n "corrwith": (\n TypeError,\n r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'",\n ),\n "count": (None, ""),\n "cumcount": (None, ""),\n "cummax": (\n (NotImplementedError, TypeError),\n "(category type does not support cummax operations|"\n "category dtype not supported|"\n "cummax is not supported for category dtype)",\n ),\n "cummin": (\n (NotImplementedError, TypeError),\n "(category type does not support cummin operations|"\n "category dtype not supported|"\n "cummin is not supported for category dtype)",\n ),\n "cumprod": (\n (NotImplementedError, TypeError),\n "(category type does not support cumprod operations|"\n "category dtype not supported|"\n "cumprod is not supported for category dtype)",\n ),\n "cumsum": (\n (NotImplementedError, TypeError),\n "(category type does not support cumsum operations|"\n "category dtype not supported|"\n "cumsum is not supported for category dtype)",\n ),\n "diff": (\n TypeError,\n r"unsupported operand type\(s\) for -: 'Categorical' and 'Categorical'",\n ),\n "ffill": (None, ""),\n "fillna": (\n TypeError,\n r"Cannot setitem on a Categorical with a new category \(0\), "\n "set the categories first",\n )\n if not using_copy_on_write\n else (None, ""), # no-op with CoW\n "first": (None, ""),\n "idxmax": (None, ""),\n "idxmin": (None, ""),\n "last": (None, ""),\n "max": (None, ""),\n "mean": (\n TypeError,\n "|".join(\n [\n "'Categorical' .* does not support reduction 'mean'",\n "category dtype does not support aggregation 'mean'",\n ]\n ),\n ),\n "median": (\n TypeError,\n "|".join(\n [\n "'Categorical' .* does not support reduction 'median'",\n "category dtype does not support aggregation 'median'",\n ]\n ),\n ),\n "min": (None, ""),\n "ngroup": (None, ""),\n "nunique": (None, ""),\n "pct_change": (\n TypeError,\n r"unsupported operand type\(s\) for /: 'Categorical' and 'Categorical'",\n ),\n "prod": (TypeError, "category type does not support prod operations"),\n "quantile": (TypeError, "No matching signature found"),\n "rank": (None, ""),\n "sem": (\n TypeError,\n "|".join(\n [\n "'Categorical' .* does not support reduction 'sem'",\n "category dtype does not support aggregation 'sem'",\n ]\n ),\n ),\n "shift": (None, ""),\n "size": (None, ""),\n "skew": (\n TypeError,\n "|".join(\n [\n "dtype category does not support reduction 'skew'",\n "category type does not support skew operations",\n ]\n ),\n ),\n "std": (\n TypeError,\n "|".join(\n [\n "'Categorical' .* does not support reduction 'std'",\n "category dtype does not support aggregation 'std'",\n ]\n ),\n ),\n "sum": (TypeError, "category type does not support sum operations"),\n "var": (\n TypeError,\n "|".join(\n [\n "'Categorical' .* does not support reduction 'var'",\n "category dtype does not support aggregation 'var'",\n ]\n ),\n ),\n }[groupby_func]\n\n if groupby_func == "fillna":\n kind = "Series" if groupby_series else "DataFrame"\n warn_msg = f"{kind}GroupBy.fillna is deprecated"\n else:\n warn_msg = ""\n _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg)\n\n\n@pytest.mark.parametrize("how", ["agg", "transform"])\ndef test_groupby_raises_category_udf(how, by, groupby_series, df_with_cat_col):\n # GH#50749\n df = df_with_cat_col\n gb = df.groupby(by=by)\n\n if groupby_series:\n gb = gb["d"]\n\n def func(x):\n raise TypeError("Test error message")\n\n with pytest.raises(TypeError, match="Test error message"):\n getattr(gb, how)(func)\n\n\n@pytest.mark.parametrize("how", ["agg", "transform"])\n@pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])\ndef test_groupby_raises_category_np(\n how, by, groupby_series, groupby_func_np, df_with_cat_col\n):\n # GH#50749\n df = df_with_cat_col\n gb = df.groupby(by=by)\n\n if groupby_series:\n gb = gb["d"]\n\n klass, msg = {\n np.sum: (TypeError, "category type does not support sum operations"),\n np.mean: (\n TypeError,\n "category dtype does not support aggregation 'mean'",\n ),\n }[groupby_func_np]\n\n if groupby_series:\n warn_msg = "using SeriesGroupBy.[sum|mean]"\n else:\n warn_msg = "using DataFrameGroupBy.[sum|mean]"\n _call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg)\n\n\n@pytest.mark.parametrize("how", ["method", "agg", "transform"])\ndef test_groupby_raises_category_on_category(\n how,\n by,\n groupby_series,\n groupby_func,\n observed,\n using_copy_on_write,\n df_with_cat_col,\n):\n # GH#50749\n df = df_with_cat_col\n df["a"] = Categorical(\n ["a", "a", "a", "a", "b", "b", "b", "b", "c"],\n categories=["a", "b", "c", "d"],\n ordered=True,\n )\n args = get_groupby_method_args(groupby_func, df)\n gb = df.groupby(by=by, observed=observed)\n\n if groupby_series:\n gb = gb["d"]\n\n if groupby_func == "corrwith":\n assert not hasattr(gb, "corrwith")\n return\n\n empty_groups = not observed and any(group.empty for group in gb.groups.values())\n if (\n not observed\n and how != "transform"\n and isinstance(by, list)\n and isinstance(by[0], str)\n and by == ["a", "b"]\n ):\n assert not empty_groups\n # TODO: empty_groups should be true due to unobserved categorical combinations\n empty_groups = True\n if how == "transform":\n # empty groups will be ignored\n empty_groups = False\n\n klass, msg = {\n "all": (None, ""),\n "any": (None, ""),\n "bfill": (None, ""),\n "corrwith": (\n TypeError,\n r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'",\n ),\n "count": (None, ""),\n "cumcount": (None, ""),\n "cummax": (\n (NotImplementedError, TypeError),\n "(cummax is not supported for category dtype|"\n "category dtype not supported|"\n "category type does not support cummax operations)",\n ),\n "cummin": (\n (NotImplementedError, TypeError),\n "(cummin is not supported for category dtype|"\n "category dtype not supported|"\n "category type does not support cummin operations)",\n ),\n "cumprod": (\n (NotImplementedError, TypeError),\n "(cumprod is not supported for category dtype|"\n "category dtype not supported|"\n "category type does not support cumprod operations)",\n ),\n "cumsum": (\n (NotImplementedError, TypeError),\n "(cumsum is not supported for category dtype|"\n "category dtype not supported|"\n "category type does not support cumsum operations)",\n ),\n "diff": (TypeError, "unsupported operand type"),\n "ffill": (None, ""),\n "fillna": (\n TypeError,\n r"Cannot setitem on a Categorical with a new category \(0\), "\n "set the categories first",\n )\n if not using_copy_on_write\n else (None, ""), # no-op with CoW\n "first": (None, ""),\n "idxmax": (ValueError, "empty group due to unobserved categories")\n if empty_groups\n else (None, ""),\n "idxmin": (ValueError, "empty group due to unobserved categories")\n if empty_groups\n else (None, ""),\n "last": (None, ""),\n "max": (None, ""),\n "mean": (TypeError, "category dtype does not support aggregation 'mean'"),\n "median": (TypeError, "category dtype does not support aggregation 'median'"),\n "min": (None, ""),\n "ngroup": (None, ""),\n "nunique": (None, ""),\n "pct_change": (TypeError, "unsupported operand type"),\n "prod": (TypeError, "category type does not support prod operations"),\n "quantile": (TypeError, "No matching signature found"),\n "rank": (None, ""),\n "sem": (\n TypeError,\n "|".join(\n [\n "'Categorical' .* does not support reduction 'sem'",\n "category dtype does not support aggregation 'sem'",\n ]\n ),\n ),\n "shift": (None, ""),\n "size": (None, ""),\n "skew": (\n TypeError,\n "|".join(\n [\n "category type does not support skew operations",\n "dtype category does not support reduction 'skew'",\n ]\n ),\n ),\n "std": (\n TypeError,\n "|".join(\n [\n "'Categorical' .* does not support reduction 'std'",\n "category dtype does not support aggregation 'std'",\n ]\n ),\n ),\n "sum": (TypeError, "category type does not support sum operations"),\n "var": (\n TypeError,\n "|".join(\n [\n "'Categorical' .* does not support reduction 'var'",\n "category dtype does not support aggregation 'var'",\n ]\n ),\n ),\n }[groupby_func]\n\n if groupby_func == "fillna":\n kind = "Series" if groupby_series else "DataFrame"\n warn_msg = f"{kind}GroupBy.fillna is deprecated"\n else:\n warn_msg = ""\n _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg)\n\n\ndef test_subsetting_columns_axis_1_raises():\n # GH 35443\n df = DataFrame({"a": [1], "b": [2], "c": [3]})\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby("a", axis=1)\n with pytest.raises(ValueError, match="Cannot subset columns when using axis=1"):\n gb["b"]\n | .venv\Lib\site-packages\pandas\tests\groupby\test_raises.py | test_raises.py | Python | 23,764 | 0.95 | 0.120211 | 0.025111 | node-utils | 821 | 2025-03-03T17:51:59.328813 | GPL-3.0 | true | 81f4897264dda68cc0d71fbbf64b11ac |
import builtins\nimport datetime as dt\nfrom string import ascii_lowercase\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import iNaT\n\nfrom pandas.core.dtypes.common import pandas_dtype\nfrom pandas.core.dtypes.missing import na_value_for_dtype\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n isna,\n)\nimport pandas._testing as tm\nfrom pandas.util import _test_decorators as td\n\n\n@pytest.mark.parametrize("agg_func", ["any", "all"])\n@pytest.mark.parametrize(\n "vals",\n [\n ["foo", "bar", "baz"],\n ["foo", "", ""],\n ["", "", ""],\n [1, 2, 3],\n [1, 0, 0],\n [0, 0, 0],\n [1.0, 2.0, 3.0],\n [1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [True, True, True],\n [True, False, False],\n [False, False, False],\n [np.nan, np.nan, np.nan],\n ],\n)\ndef test_groupby_bool_aggs(skipna, agg_func, vals):\n df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})\n\n # Figure out expectation using Python builtin\n exp = getattr(builtins, agg_func)(vals)\n\n # edge case for missing data with skipna and 'any'\n if skipna and all(isna(vals)) and agg_func == "any":\n exp = False\n\n expected = DataFrame(\n [exp] * 2, columns=["val"], index=pd.Index(["a", "b"], name="key")\n )\n result = getattr(df.groupby("key"), agg_func)(skipna=skipna)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_any():\n df = DataFrame(\n [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]],\n columns=["A", "B", "C"],\n )\n expected = DataFrame(\n [[True, True], [False, True]], columns=["B", "C"], index=[1, 3]\n )\n expected.index.name = "A"\n result = df.groupby("A").any()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("bool_agg_func", ["any", "all"])\ndef test_bool_aggs_dup_column_labels(bool_agg_func):\n # GH#21668\n df = DataFrame([[True, True]], columns=["a", "a"])\n grp_by = df.groupby([0])\n result = getattr(grp_by, bool_agg_func)()\n\n expected = df.set_axis(np.array([0]))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("bool_agg_func", ["any", "all"])\n@pytest.mark.parametrize(\n "data",\n [\n [False, False, False],\n [True, True, True],\n [pd.NA, pd.NA, pd.NA],\n [False, pd.NA, False],\n [True, pd.NA, True],\n [True, pd.NA, False],\n ],\n)\ndef test_masked_kleene_logic(bool_agg_func, skipna, data):\n # GH#37506\n ser = Series(data, dtype="boolean")\n\n # The result should match aggregating on the whole series. Correctness\n # there is verified in test_reductions.py::test_any_all_boolean_kleene_logic\n expected_data = getattr(ser, bool_agg_func)(skipna=skipna)\n expected = Series(expected_data, index=np.array([0]), dtype="boolean")\n\n result = ser.groupby([0, 0, 0]).agg(bool_agg_func, skipna=skipna)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dtype1,dtype2,exp_col1,exp_col2",\n [\n (\n "float",\n "Float64",\n np.array([True], dtype=bool),\n pd.array([pd.NA], dtype="boolean"),\n ),\n (\n "Int64",\n "float",\n pd.array([pd.NA], dtype="boolean"),\n np.array([True], dtype=bool),\n ),\n (\n "Int64",\n "Int64",\n pd.array([pd.NA], dtype="boolean"),\n pd.array([pd.NA], dtype="boolean"),\n ),\n (\n "Float64",\n "boolean",\n pd.array([pd.NA], dtype="boolean"),\n pd.array([pd.NA], dtype="boolean"),\n ),\n ],\n)\ndef test_masked_mixed_types(dtype1, dtype2, exp_col1, exp_col2):\n # GH#37506\n data = [1.0, np.nan]\n df = DataFrame(\n {"col1": pd.array(data, dtype=dtype1), "col2": pd.array(data, dtype=dtype2)}\n )\n result = df.groupby([1, 1]).agg("all", skipna=False)\n\n expected = DataFrame({"col1": exp_col1, "col2": exp_col2}, index=np.array([1]))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("bool_agg_func", ["any", "all"])\n@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])\ndef test_masked_bool_aggs_skipna(bool_agg_func, dtype, skipna, frame_or_series):\n # GH#40585\n obj = frame_or_series([pd.NA, 1], dtype=dtype)\n expected_res = True\n if not skipna and bool_agg_func == "all":\n expected_res = pd.NA\n expected = frame_or_series([expected_res], index=np.array([1]), dtype="boolean")\n\n result = obj.groupby([1, 1]).agg(bool_agg_func, skipna=skipna)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "bool_agg_func,data,expected_res",\n [\n ("any", [pd.NA, np.nan], False),\n ("any", [pd.NA, 1, np.nan], True),\n ("all", [pd.NA, pd.NaT], True),\n ("all", [pd.NA, False, pd.NaT], False),\n ],\n)\ndef test_object_type_missing_vals(bool_agg_func, data, expected_res, frame_or_series):\n # GH#37501\n obj = frame_or_series(data, dtype=object)\n result = obj.groupby([1] * len(data)).agg(bool_agg_func)\n expected = frame_or_series([expected_res], index=np.array([1]), dtype="bool")\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("bool_agg_func", ["any", "all"])\ndef test_object_NA_raises_with_skipna_false(bool_agg_func):\n # GH#37501\n ser = Series([pd.NA], dtype=object)\n with pytest.raises(TypeError, match="boolean value of NA is ambiguous"):\n ser.groupby([1]).agg(bool_agg_func, skipna=False)\n\n\n@pytest.mark.parametrize("bool_agg_func", ["any", "all"])\ndef test_empty(frame_or_series, bool_agg_func):\n # GH 45231\n kwargs = {"columns": ["a"]} if frame_or_series is DataFrame else {"name": "a"}\n obj = frame_or_series(**kwargs, dtype=object)\n result = getattr(obj.groupby(obj.index), bool_agg_func)()\n expected = frame_or_series(**kwargs, dtype=bool)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("how", ["idxmin", "idxmax"])\ndef test_idxmin_idxmax_extremes(how, any_real_numpy_dtype):\n # GH#57040\n if any_real_numpy_dtype is int or any_real_numpy_dtype is float:\n # No need to test\n return\n info = np.iinfo if "int" in any_real_numpy_dtype else np.finfo\n min_value = info(any_real_numpy_dtype).min\n max_value = info(any_real_numpy_dtype).max\n df = DataFrame(\n {"a": [2, 1, 1, 2], "b": [min_value, max_value, max_value, min_value]},\n dtype=any_real_numpy_dtype,\n )\n gb = df.groupby("a")\n result = getattr(gb, how)()\n expected = DataFrame(\n {"b": [1, 0]}, index=pd.Index([1, 2], name="a", dtype=any_real_numpy_dtype)\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("how", ["idxmin", "idxmax"])\ndef test_idxmin_idxmax_extremes_skipna(skipna, how, float_numpy_dtype):\n # GH#57040\n min_value = np.finfo(float_numpy_dtype).min\n max_value = np.finfo(float_numpy_dtype).max\n df = DataFrame(\n {\n "a": Series(np.repeat(range(1, 6), repeats=2), dtype="intp"),\n "b": Series(\n [\n np.nan,\n min_value,\n np.nan,\n max_value,\n min_value,\n np.nan,\n max_value,\n np.nan,\n np.nan,\n np.nan,\n ],\n dtype=float_numpy_dtype,\n ),\n },\n )\n gb = df.groupby("a")\n\n warn = None if skipna else FutureWarning\n msg = f"The behavior of DataFrameGroupBy.{how} with all-NA values"\n with tm.assert_produces_warning(warn, match=msg):\n result = getattr(gb, how)(skipna=skipna)\n if skipna:\n values = [1, 3, 4, 6, np.nan]\n else:\n values = np.nan\n expected = DataFrame(\n {"b": values}, index=pd.Index(range(1, 6), name="a", dtype="intp")\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func, values",\n [\n ("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),\n ("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),\n ],\n)\n@pytest.mark.parametrize("numeric_only", [True, False])\ndef test_idxmin_idxmax_returns_int_types(func, values, numeric_only):\n # GH 25444\n df = DataFrame(\n {\n "name": ["A", "A", "B", "B"],\n "c_int": [1, 2, 3, 4],\n "c_float": [4.02, 3.03, 2.04, 1.05],\n "c_date": ["2019", "2018", "2016", "2017"],\n }\n )\n df["c_date"] = pd.to_datetime(df["c_date"])\n df["c_date_tz"] = df["c_date"].dt.tz_localize("US/Pacific")\n df["c_timedelta"] = df["c_date"] - df["c_date"].iloc[0]\n df["c_period"] = df["c_date"].dt.to_period("W")\n df["c_Integer"] = df["c_int"].astype("Int64")\n df["c_Floating"] = df["c_float"].astype("Float64")\n\n result = getattr(df.groupby("name"), func)(numeric_only=numeric_only)\n\n expected = DataFrame(values, index=pd.Index(["A", "B"], name="name"))\n if numeric_only:\n expected = expected.drop(columns=["c_date"])\n else:\n expected["c_date_tz"] = expected["c_date"]\n expected["c_timedelta"] = expected["c_date"]\n expected["c_period"] = expected["c_date"]\n expected["c_Integer"] = expected["c_int"]\n expected["c_Floating"] = expected["c_float"]\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data",\n [\n (\n Timestamp("2011-01-15 12:50:28.502376"),\n Timestamp("2011-01-20 12:50:28.593448"),\n ),\n (24650000000000001, 24650000000000002),\n ],\n)\n@pytest.mark.parametrize("method", ["count", "min", "max", "first", "last"])\ndef test_groupby_non_arithmetic_agg_int_like_precision(method, data):\n # GH#6620, GH#9311\n df = DataFrame({"a": [1, 1], "b": data})\n\n grouped = df.groupby("a")\n result = getattr(grouped, method)()\n if method == "count":\n expected_value = 2\n elif method == "first":\n expected_value = data[0]\n elif method == "last":\n expected_value = data[1]\n else:\n expected_value = getattr(df["b"], method)()\n expected = DataFrame({"b": [expected_value]}, index=pd.Index([1], name="a"))\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("how", ["first", "last"])\ndef test_first_last_skipna(any_real_nullable_dtype, sort, skipna, how):\n # GH#57019\n na_value = na_value_for_dtype(pandas_dtype(any_real_nullable_dtype))\n df = DataFrame(\n {\n "a": [2, 1, 1, 2, 3, 3],\n "b": [na_value, 3.0, na_value, 4.0, np.nan, np.nan],\n "c": [na_value, 3.0, na_value, 4.0, np.nan, np.nan],\n },\n dtype=any_real_nullable_dtype,\n )\n gb = df.groupby("a", sort=sort)\n method = getattr(gb, how)\n result = method(skipna=skipna)\n\n ilocs = {\n ("first", True): [3, 1, 4],\n ("first", False): [0, 1, 4],\n ("last", True): [3, 1, 5],\n ("last", False): [3, 2, 5],\n }[how, skipna]\n expected = df.iloc[ilocs].set_index("a")\n if sort:\n expected = expected.sort_index()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_idxmin_idxmax_axis1():\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "B", "C", "D"]\n )\n df["A"] = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4]\n\n gb = df.groupby("A")\n\n warn_msg = "DataFrameGroupBy.idxmax with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n res = gb.idxmax(axis=1)\n\n alt = df.iloc[:, 1:].idxmax(axis=1)\n indexer = res.index.get_level_values(1)\n\n tm.assert_series_equal(alt[indexer], res.droplevel("A"))\n\n df["E"] = date_range("2016-01-01", periods=10)\n gb2 = df.groupby("A")\n\n msg = "'>' not supported between instances of 'Timestamp' and 'float'"\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n gb2.idxmax(axis=1)\n\n\ndef test_groupby_mean_no_overflow():\n # Regression test for (#22487)\n df = DataFrame(\n {\n "user": ["A", "A", "A", "A", "A"],\n "connections": [4970, 4749, 4719, 4704, 18446744073699999744],\n }\n )\n assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840\n\n\ndef test_mean_on_timedelta():\n # GH 17382\n df = DataFrame({"time": pd.to_timedelta(range(10)), "cat": ["A", "B"] * 5})\n result = df.groupby("cat")["time"].mean()\n expected = Series(\n pd.to_timedelta([4, 5]), name="time", index=pd.Index(["A", "B"], name="cat")\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_cython_median():\n arr = np.random.default_rng(2).standard_normal(1000)\n arr[::2] = np.nan\n df = DataFrame(arr)\n\n labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float)\n labels[::17] = np.nan\n\n result = df.groupby(labels).median()\n msg = "using DataFrameGroupBy.median"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n exp = df.groupby(labels).agg(np.nanmedian)\n tm.assert_frame_equal(result, exp)\n\n df = DataFrame(np.random.default_rng(2).standard_normal((1000, 5)))\n msg = "using DataFrameGroupBy.median"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = df.groupby(labels).agg(np.median)\n xp = df.groupby(labels).median()\n tm.assert_frame_equal(rs, xp)\n\n\ndef test_median_empty_bins(observed):\n df = DataFrame(np.random.default_rng(2).integers(0, 44, 500))\n\n grps = range(0, 55, 5)\n bins = pd.cut(df[0], grps)\n\n result = df.groupby(bins, observed=observed).median()\n expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())\n tm.assert_frame_equal(result, expected)\n\n\ndef test_max_min_non_numeric():\n # #2700\n aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})\n\n result = aa.groupby("nn").max()\n assert "ss" in result\n\n result = aa.groupby("nn").max(numeric_only=False)\n assert "ss" in result\n\n result = aa.groupby("nn").min()\n assert "ss" in result\n\n result = aa.groupby("nn").min(numeric_only=False)\n assert "ss" in result\n\n\ndef test_max_min_object_multiple_columns(using_array_manager, using_infer_string):\n # GH#41111 case where the aggregation is valid for some columns but not\n # others; we split object blocks column-wise, consistent with\n # DataFrame._reduce\n\n df = DataFrame(\n {\n "A": [1, 1, 2, 2, 3],\n "B": [1, "foo", 2, "bar", False],\n "C": ["a", "b", "c", "d", "e"],\n }\n )\n df._consolidate_inplace() # should already be consolidate, but double-check\n if not using_array_manager:\n assert len(df._mgr.blocks) == 3 if using_infer_string else 2\n\n gb = df.groupby("A")\n\n result = gb[["C"]].max()\n # "max" is valid for column "C" but not for "B"\n ei = pd.Index([1, 2, 3], name="A")\n expected = DataFrame({"C": ["b", "d", "e"]}, index=ei)\n tm.assert_frame_equal(result, expected)\n\n result = gb[["C"]].min()\n # "min" is valid for column "C" but not for "B"\n ei = pd.Index([1, 2, 3], name="A")\n expected = DataFrame({"C": ["a", "c", "e"]}, index=ei)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_min_date_with_nans():\n # GH26321\n dates = pd.to_datetime(\n Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"\n ).dt.date\n df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})\n\n result = df.groupby("b", as_index=False)["c"].min()["c"]\n expected = pd.to_datetime(\n Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"\n ).dt.date\n tm.assert_series_equal(result, expected)\n\n result = df.groupby("b")["c"].min()\n expected.index.name = "b"\n tm.assert_series_equal(result, expected)\n\n\ndef test_max_inat():\n # GH#40767 dont interpret iNaT as NaN\n ser = Series([1, iNaT])\n key = np.array([1, 1], dtype=np.int64)\n gb = ser.groupby(key)\n\n result = gb.max(min_count=2)\n expected = Series({1: 1}, dtype=np.int64)\n tm.assert_series_equal(result, expected, check_exact=True)\n\n result = gb.min(min_count=2)\n expected = Series({1: iNaT}, dtype=np.int64)\n tm.assert_series_equal(result, expected, check_exact=True)\n\n # not enough entries -> gets masked to NaN\n result = gb.min(min_count=3)\n expected = Series({1: np.nan})\n tm.assert_series_equal(result, expected, check_exact=True)\n\n\ndef test_max_inat_not_all_na():\n # GH#40767 dont interpret iNaT as NaN\n\n # make sure we dont round iNaT+1 to iNaT\n ser = Series([1, iNaT, 2, iNaT + 1])\n gb = ser.groupby([1, 2, 3, 3])\n result = gb.min(min_count=2)\n\n # Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy\n expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1})\n expected.index = expected.index.astype(int)\n tm.assert_series_equal(result, expected, check_exact=True)\n\n\n@pytest.mark.parametrize("func", ["min", "max"])\ndef test_groupby_aggregate_period_column(func):\n # GH 31471\n groups = [1, 2]\n periods = pd.period_range("2020", periods=2, freq="Y")\n df = DataFrame({"a": groups, "b": periods})\n\n result = getattr(df.groupby("a")["b"], func)()\n idx = pd.Index([1, 2], name="a")\n expected = Series(periods, index=idx, name="b")\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["min", "max"])\ndef test_groupby_aggregate_period_frame(func):\n # GH 31471\n groups = [1, 2]\n periods = pd.period_range("2020", periods=2, freq="Y")\n df = DataFrame({"a": groups, "b": periods})\n\n result = getattr(df.groupby("a"), func)()\n idx = pd.Index([1, 2], name="a")\n expected = DataFrame({"b": periods}, index=idx)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_numeric_object_dtype():\n # https://github.com/pandas-dev/pandas/issues/39329\n # simplified case: multiple object columns where one is all-NaN\n # -> gets split as the all-NaN is inferred as float\n df = DataFrame(\n {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4},\n ).astype(object)\n result = df.groupby("key").min()\n expected = (\n DataFrame(\n {"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]},\n )\n .set_index("key")\n .astype(object)\n )\n tm.assert_frame_equal(result, expected)\n\n # same but with numbers\n df = DataFrame(\n {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)},\n ).astype(object)\n result = df.groupby("key").min()\n expected = (\n DataFrame({"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]})\n .set_index("key")\n .astype(object)\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["min", "max"])\ndef test_aggregate_categorical_lost_index(func: str):\n # GH: 28641 groupby drops index, when grouping over categorical column with min/max\n ds = Series(["b"], dtype="category").cat.as_ordered()\n df = DataFrame({"A": [1997], "B": ds})\n result = df.groupby("A").agg({"B": func})\n expected = DataFrame({"B": ["b"]}, index=pd.Index([1997], name="A"))\n\n # ordered categorical dtype should be preserved\n expected["B"] = expected["B"].astype(ds.dtype)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Float64", "Float32", "boolean"])\ndef test_groupby_min_max_nullable(dtype):\n if dtype == "Int64":\n # GH#41743 avoid precision loss\n ts = 1618556707013635762\n elif dtype == "boolean":\n ts = 0\n else:\n ts = 4.0\n\n df = DataFrame({"id": [2, 2], "ts": [ts, ts + 1]})\n df["ts"] = df["ts"].astype(dtype)\n\n gb = df.groupby("id")\n\n result = gb.min()\n expected = df.iloc[:1].set_index("id")\n tm.assert_frame_equal(result, expected)\n\n res_max = gb.max()\n expected_max = df.iloc[1:].set_index("id")\n tm.assert_frame_equal(res_max, expected_max)\n\n result2 = gb.min(min_count=3)\n expected2 = DataFrame({"ts": [pd.NA]}, index=expected.index, dtype=dtype)\n tm.assert_frame_equal(result2, expected2)\n\n res_max2 = gb.max(min_count=3)\n tm.assert_frame_equal(res_max2, expected2)\n\n # Case with NA values\n df2 = DataFrame({"id": [2, 2, 2], "ts": [ts, pd.NA, ts + 1]})\n df2["ts"] = df2["ts"].astype(dtype)\n gb2 = df2.groupby("id")\n\n result3 = gb2.min()\n tm.assert_frame_equal(result3, expected)\n\n res_max3 = gb2.max()\n tm.assert_frame_equal(res_max3, expected_max)\n\n result4 = gb2.min(min_count=100)\n tm.assert_frame_equal(result4, expected2)\n\n res_max4 = gb2.max(min_count=100)\n tm.assert_frame_equal(res_max4, expected2)\n\n\ndef test_min_max_nullable_uint64_empty_group():\n # don't raise NotImplementedError from libgroupby\n cat = pd.Categorical([0] * 10, categories=[0, 1])\n df = DataFrame({"A": cat, "B": pd.array(np.arange(10, dtype=np.uint64))})\n gb = df.groupby("A", observed=False)\n\n res = gb.min()\n\n idx = pd.CategoricalIndex([0, 1], dtype=cat.dtype, name="A")\n expected = DataFrame({"B": pd.array([0, pd.NA], dtype="UInt64")}, index=idx)\n tm.assert_frame_equal(res, expected)\n\n res = gb.max()\n expected.iloc[0, 0] = 9\n tm.assert_frame_equal(res, expected)\n\n\n@pytest.mark.parametrize("func", ["first", "last", "min", "max"])\ndef test_groupby_min_max_categorical(func):\n # GH: 52151\n df = DataFrame(\n {\n "col1": pd.Categorical(["A"], categories=list("AB"), ordered=True),\n "col2": pd.Categorical([1], categories=[1, 2], ordered=True),\n "value": 0.1,\n }\n )\n result = getattr(df.groupby("col1", observed=False), func)()\n\n idx = pd.CategoricalIndex(data=["A", "B"], name="col1", ordered=True)\n expected = DataFrame(\n {\n "col2": pd.Categorical([1, None], categories=[1, 2], ordered=True),\n "value": [0.1, None],\n },\n index=idx,\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["min", "max"])\ndef test_min_empty_string_dtype(func, string_dtype_no_object):\n # GH#55619\n dtype = string_dtype_no_object\n df = DataFrame({"a": ["a"], "b": "a", "c": "a"}, dtype=dtype).iloc[:0]\n result = getattr(df.groupby("a"), func)()\n expected = DataFrame(\n columns=["b", "c"], dtype=dtype, index=pd.Index([], dtype=dtype, name="a")\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_max_nan_bug():\n df = DataFrame(\n {\n "Unnamed: 0": ["-04-23", "-05-06", "-05-07"],\n "Date": [\n "2013-04-23 00:00:00",\n "2013-05-06 00:00:00",\n "2013-05-07 00:00:00",\n ],\n "app": Series([np.nan, np.nan, "OE"]),\n "File": ["log080001.log", "log.log", "xlsx"],\n }\n )\n gb = df.groupby("Date")\n r = gb[["File"]].max()\n e = gb["File"].max().to_frame()\n tm.assert_frame_equal(r, e)\n assert not r["File"].isna().any()\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize("sort", [False, True])\n@pytest.mark.parametrize("dropna", [False, True])\n@pytest.mark.parametrize("as_index", [True, False])\n@pytest.mark.parametrize("with_nan", [True, False])\n@pytest.mark.parametrize("keys", [["joe"], ["joe", "jim"]])\ndef test_series_groupby_nunique(sort, dropna, as_index, with_nan, keys):\n n = 100\n m = 10\n days = date_range("2015-08-23", periods=10)\n df = DataFrame(\n {\n "jim": np.random.default_rng(2).choice(list(ascii_lowercase), n),\n "joe": np.random.default_rng(2).choice(days, n),\n "julie": np.random.default_rng(2).integers(0, m, n),\n }\n )\n if with_nan:\n df = df.astype({"julie": float}) # Explicit cast to avoid implicit cast below\n df.loc[1::17, "jim"] = None\n df.loc[3::37, "joe"] = None\n df.loc[7::19, "julie"] = None\n df.loc[8::19, "julie"] = None\n df.loc[9::19, "julie"] = None\n original_df = df.copy()\n gr = df.groupby(keys, as_index=as_index, sort=sort)\n left = gr["julie"].nunique(dropna=dropna)\n\n gr = df.groupby(keys, as_index=as_index, sort=sort)\n right = gr["julie"].apply(Series.nunique, dropna=dropna)\n if not as_index:\n right = right.reset_index(drop=True)\n\n if as_index:\n tm.assert_series_equal(left, right, check_names=False)\n else:\n tm.assert_frame_equal(left, right, check_names=False)\n tm.assert_frame_equal(df, original_df)\n\n\ndef test_nunique():\n df = DataFrame({"A": list("abbacc"), "B": list("abxacc"), "C": list("abbacx")})\n\n expected = DataFrame({"A": list("abc"), "B": [1, 2, 1], "C": [1, 1, 2]})\n result = df.groupby("A", as_index=False).nunique()\n tm.assert_frame_equal(result, expected)\n\n # as_index\n expected.index = list("abc")\n expected.index.name = "A"\n expected = expected.drop(columns="A")\n result = df.groupby("A").nunique()\n tm.assert_frame_equal(result, expected)\n\n # with na\n result = df.replace({"x": None}).groupby("A").nunique(dropna=False)\n tm.assert_frame_equal(result, expected)\n\n # dropna\n expected = DataFrame({"B": [1] * 3, "C": [1] * 3}, index=list("abc"))\n expected.index.name = "A"\n result = df.replace({"x": None}).groupby("A").nunique()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nunique_with_object():\n # GH 11077\n data = DataFrame(\n [\n [100, 1, "Alice"],\n [200, 2, "Bob"],\n [300, 3, "Charlie"],\n [-400, 4, "Dan"],\n [500, 5, "Edith"],\n ],\n columns=["amount", "id", "name"],\n )\n\n result = data.groupby(["id", "amount"])["name"].nunique()\n index = MultiIndex.from_arrays([data.id, data.amount])\n expected = Series([1] * 5, name="name", index=index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_nunique_with_empty_series():\n # GH 12553\n data = Series(name="name", dtype=object)\n result = data.groupby(level=0).nunique()\n expected = Series(name="name", dtype="int64")\n tm.assert_series_equal(result, expected)\n\n\ndef test_nunique_with_timegrouper():\n # GH 13453\n test = DataFrame(\n {\n "time": [\n Timestamp("2016-06-28 09:35:35"),\n Timestamp("2016-06-28 16:09:30"),\n Timestamp("2016-06-28 16:46:28"),\n ],\n "data": ["1", "2", "3"],\n }\n ).set_index("time")\n result = test.groupby(pd.Grouper(freq="h"))["data"].nunique()\n expected = test.groupby(pd.Grouper(freq="h"))["data"].apply(Series.nunique)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "key, data, dropna, expected",\n [\n (\n ["x", "x", "x"],\n [Timestamp("2019-01-01"), pd.NaT, Timestamp("2019-01-01")],\n True,\n Series([1], index=pd.Index(["x"], name="key"), name="data"),\n ),\n (\n ["x", "x", "x"],\n [dt.date(2019, 1, 1), pd.NaT, dt.date(2019, 1, 1)],\n True,\n Series([1], index=pd.Index(["x"], name="key"), name="data"),\n ),\n (\n ["x", "x", "x", "y", "y"],\n [\n dt.date(2019, 1, 1),\n pd.NaT,\n dt.date(2019, 1, 1),\n pd.NaT,\n dt.date(2019, 1, 1),\n ],\n False,\n Series([2, 2], index=pd.Index(["x", "y"], name="key"), name="data"),\n ),\n (\n ["x", "x", "x", "x", "y"],\n [\n dt.date(2019, 1, 1),\n pd.NaT,\n dt.date(2019, 1, 1),\n pd.NaT,\n dt.date(2019, 1, 1),\n ],\n False,\n Series([2, 1], index=pd.Index(["x", "y"], name="key"), name="data"),\n ),\n ],\n)\ndef test_nunique_with_NaT(key, data, dropna, expected):\n # GH 27951\n df = DataFrame({"key": key, "data": data})\n result = df.groupby(["key"])["data"].nunique(dropna=dropna)\n tm.assert_series_equal(result, expected)\n\n\ndef test_nunique_preserves_column_level_names():\n # GH 23222\n test = DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0"))\n result = test.groupby([0, 0, 0]).nunique()\n expected = DataFrame([2], index=np.array([0]), columns=test.columns)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nunique_transform_with_datetime():\n # GH 35109 - transform with nunique on datetimes results in integers\n df = DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"])\n result = df.groupby([0, 0, 1])["date"].transform("nunique")\n expected = Series([2, 2, 1], name="date")\n tm.assert_series_equal(result, expected)\n\n\ndef test_empty_categorical(observed):\n # GH#21334\n cat = Series([1]).astype("category")\n ser = cat[:0]\n gb = ser.groupby(ser, observed=observed)\n result = gb.nunique()\n if observed:\n expected = Series([], index=cat[:0], dtype="int64")\n else:\n expected = Series([0], index=cat, dtype="int64")\n tm.assert_series_equal(result, expected)\n\n\ndef test_intercept_builtin_sum():\n s = Series([1.0, 2.0, np.nan, 3.0])\n grouped = s.groupby([0, 1, 2, 2])\n\n msg = "using SeriesGroupBy.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result = grouped.agg(builtins.sum)\n msg = "using np.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n result2 = grouped.apply(builtins.sum)\n expected = grouped.sum()\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n\n@pytest.mark.parametrize("min_count", [0, 10])\ndef test_groupby_sum_mincount_boolean(min_count):\n b = True\n a = False\n na = np.nan\n dfg = pd.array([b, b, na, na, a, a, b], dtype="boolean")\n\n df = DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": dfg})\n result = df.groupby("A").sum(min_count=min_count)\n if min_count == 0:\n expected = DataFrame(\n {"B": pd.array([3, 0, 0], dtype="Int64")},\n index=pd.Index([1, 2, 3], name="A"),\n )\n tm.assert_frame_equal(result, expected)\n else:\n expected = DataFrame(\n {"B": pd.array([pd.NA] * 3, dtype="Int64")},\n index=pd.Index([1, 2, 3], name="A"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_sum_below_mincount_nullable_integer():\n # https://github.com/pandas-dev/pandas/issues/32861\n df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64")\n grouped = df.groupby("a")\n idx = pd.Index([0, 1, 2], name="a", dtype="Int64")\n\n result = grouped["b"].sum(min_count=2)\n expected = Series([pd.NA] * 3, dtype="Int64", index=idx, name="b")\n tm.assert_series_equal(result, expected)\n\n result = grouped.sum(min_count=2)\n expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_sum_timedelta_with_nat():\n # GH#42659\n df = DataFrame(\n {\n "a": [1, 1, 2, 2],\n "b": [pd.Timedelta("1d"), pd.Timedelta("2d"), pd.Timedelta("3d"), pd.NaT],\n }\n )\n td3 = pd.Timedelta(days=3)\n\n gb = df.groupby("a")\n\n res = gb.sum()\n expected = DataFrame({"b": [td3, td3]}, index=pd.Index([1, 2], name="a"))\n tm.assert_frame_equal(res, expected)\n\n res = gb["b"].sum()\n tm.assert_series_equal(res, expected["b"])\n\n res = gb["b"].sum(min_count=2)\n expected = Series([td3, pd.NaT], dtype="m8[ns]", name="b", index=expected.index)\n tm.assert_series_equal(res, expected)\n\n\n@pytest.mark.parametrize(\n "dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]\n)\n@pytest.mark.parametrize(\n "method,data",\n [\n ("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),\n ("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),\n ("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),\n ("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),\n ("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),\n ],\n)\ndef test_groupby_non_arithmetic_agg_types(dtype, method, data):\n # GH9311, GH6620\n df = DataFrame(\n [{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]\n )\n\n df["b"] = df.b.astype(dtype)\n\n if "args" not in data:\n data["args"] = []\n\n if "out_type" in data:\n out_type = data["out_type"]\n else:\n out_type = dtype\n\n exp = data["df"]\n df_out = DataFrame(exp)\n\n df_out["b"] = df_out.b.astype(out_type)\n df_out.set_index("a", inplace=True)\n\n grpd = df.groupby("a")\n t = getattr(grpd, method)(*data["args"])\n tm.assert_frame_equal(t, df_out)\n\n\ndef scipy_sem(*args, **kwargs):\n from scipy.stats import sem\n\n return sem(*args, ddof=1, **kwargs)\n\n\n@pytest.mark.parametrize(\n "op,targop",\n [\n ("mean", np.mean),\n ("median", np.median),\n ("std", np.std),\n ("var", np.var),\n ("sum", np.sum),\n ("prod", np.prod),\n ("min", np.min),\n ("max", np.max),\n ("first", lambda x: x.iloc[0]),\n ("last", lambda x: x.iloc[-1]),\n ("count", np.size),\n pytest.param("sem", scipy_sem, marks=td.skip_if_no("scipy")),\n ],\n)\ndef test_ops_general(op, targop):\n df = DataFrame(np.random.default_rng(2).standard_normal(1000))\n labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float)\n\n result = getattr(df.groupby(labels), op)()\n warn = None if op in ("first", "last", "count", "sem") else FutureWarning\n msg = f"using DataFrameGroupBy.{op}"\n with tm.assert_produces_warning(warn, match=msg):\n expected = df.groupby(labels).agg(targop)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "values",\n [\n {\n "a": [1, 1, 1, 2, 2, 2, 3, 3, 3],\n "b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2],\n },\n {"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]},\n ],\n)\n@pytest.mark.parametrize("function", ["mean", "median", "var"])\ndef test_apply_to_nullable_integer_returns_float(values, function):\n # https://github.com/pandas-dev/pandas/issues/32219\n output = 0.5 if function == "var" else 1.5\n arr = np.array([output] * 3, dtype=float)\n idx = pd.Index([1, 2, 3], name="a", dtype="Int64")\n expected = DataFrame({"b": arr}, index=idx).astype("Float64")\n\n groups = DataFrame(values, dtype="Int64").groupby("a")\n\n result = getattr(groups, function)()\n tm.assert_frame_equal(result, expected)\n\n result = groups.agg(function)\n tm.assert_frame_equal(result, expected)\n\n result = groups.agg([function])\n expected.columns = MultiIndex.from_tuples([("b", function)])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "op",\n [\n "sum",\n "prod",\n "min",\n "max",\n "median",\n "mean",\n "skew",\n "std",\n "var",\n "sem",\n ],\n)\n@pytest.mark.parametrize("axis", [0, 1])\n@pytest.mark.parametrize("skipna", [True, False])\n@pytest.mark.parametrize("sort", [True, False])\ndef test_regression_allowlist_methods(op, axis, skipna, sort):\n # GH6944\n # GH 17537\n # explicitly test the allowlist methods\n raw_frame = DataFrame([0])\n if axis == 0:\n frame = raw_frame\n msg = "The 'axis' keyword in DataFrame.groupby is deprecated and will be"\n else:\n frame = raw_frame.T\n msg = "DataFrame.groupby with axis=1 is deprecated"\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouped = frame.groupby(level=0, axis=axis, sort=sort)\n\n if op == "skew":\n # skew has skipna\n result = getattr(grouped, op)(skipna=skipna)\n expected = frame.groupby(level=0).apply(\n lambda h: getattr(h, op)(axis=axis, skipna=skipna)\n )\n if sort:\n expected = expected.sort_index(axis=axis)\n tm.assert_frame_equal(result, expected)\n else:\n result = getattr(grouped, op)()\n expected = frame.groupby(level=0).apply(lambda h: getattr(h, op)(axis=axis))\n if sort:\n expected = expected.sort_index(axis=axis)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_prod_with_int64_dtype():\n # GH#46573\n data = [\n [1, 11],\n [1, 41],\n [1, 17],\n [1, 37],\n [1, 7],\n [1, 29],\n [1, 31],\n [1, 2],\n [1, 3],\n [1, 43],\n [1, 5],\n [1, 47],\n [1, 19],\n [1, 88],\n ]\n df = DataFrame(data, columns=["A", "B"], dtype="int64")\n result = df.groupby(["A"]).prod().reset_index()\n expected = DataFrame({"A": [1], "B": [180970905912331920]}, dtype="int64")\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_reductions.py | test_reductions.py | Python | 36,870 | 0.95 | 0.078298 | 0.066667 | python-kit | 859 | 2024-06-16T04:42:30.891089 | GPL-3.0 | true | 46eb69965a79f8d075be735af8cf7124 |
"""\ntest with the TimeGrouper / grouping with datetimes\n"""\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._config import using_string_dtype\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n offsets,\n)\nimport pandas._testing as tm\nfrom pandas.core.groupby.grouper import Grouper\nfrom pandas.core.groupby.ops import BinGrouper\n\n\n@pytest.fixture\ndef frame_for_truncated_bingrouper():\n """\n DataFrame used by groupby_with_truncated_bingrouper, made into\n a separate fixture for easier reuse in\n test_groupby_apply_timegrouper_with_nat_apply_squeeze\n """\n df = DataFrame(\n {\n "Quantity": [18, 3, 5, 1, 9, 3],\n "Date": [\n Timestamp(2013, 9, 1, 13, 0),\n Timestamp(2013, 9, 1, 13, 5),\n Timestamp(2013, 10, 1, 20, 0),\n Timestamp(2013, 10, 3, 10, 0),\n pd.NaT,\n Timestamp(2013, 9, 2, 14, 0),\n ],\n }\n )\n return df\n\n\n@pytest.fixture\ndef groupby_with_truncated_bingrouper(frame_for_truncated_bingrouper):\n """\n GroupBy object such that gb._grouper is a BinGrouper and\n len(gb._grouper.result_index) < len(gb._grouper.group_keys_seq)\n\n Aggregations on this groupby should have\n\n dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date")\n\n As either the index or an index level.\n """\n df = frame_for_truncated_bingrouper\n\n tdg = Grouper(key="Date", freq="5D")\n gb = df.groupby(tdg)\n\n # check we're testing the case we're interested in\n assert len(gb._grouper.result_index) != len(gb._grouper.group_keys_seq)\n\n return gb\n\n\nclass TestGroupBy:\n # TODO(infer_string) resample sum introduces 0's\n # https://github.com/pandas-dev/pandas/issues/60229\n @pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string)")\n def test_groupby_with_timegrouper(self):\n # GH 4161\n # TimeGrouper requires a sorted index\n # also verifies that the resultant index has the correct name\n df_original = DataFrame(\n {\n "Buyer": "Carl Carl Carl Carl Joe Carl".split(),\n "Quantity": [18, 3, 5, 1, 9, 3],\n "Date": [\n datetime(2013, 9, 1, 13, 0),\n datetime(2013, 9, 1, 13, 5),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 3, 10, 0),\n datetime(2013, 12, 2, 12, 0),\n datetime(2013, 9, 2, 14, 0),\n ],\n }\n )\n\n # GH 6908 change target column's order\n df_reordered = df_original.sort_values(by="Quantity")\n\n for df in [df_original, df_reordered]:\n df = df.set_index(["Date"])\n\n exp_dti = date_range(\n "20130901",\n "20131205",\n freq="5D",\n name="Date",\n inclusive="left",\n unit=df.index.unit,\n )\n expected = DataFrame(\n {"Buyer": 0, "Quantity": 0},\n index=exp_dti,\n )\n # Cast to object to avoid implicit cast when setting entry to "CarlCarlCarl"\n expected = expected.astype({"Buyer": object})\n expected.iloc[0, 0] = "CarlCarlCarl"\n expected.iloc[6, 0] = "CarlCarl"\n expected.iloc[18, 0] = "Joe"\n expected.iloc[[0, 6, 18], 1] = np.array([24, 6, 9], dtype="int64")\n\n result1 = df.resample("5D").sum()\n tm.assert_frame_equal(result1, expected)\n\n df_sorted = df.sort_index()\n result2 = df_sorted.groupby(Grouper(freq="5D")).sum()\n tm.assert_frame_equal(result2, expected)\n\n result3 = df.groupby(Grouper(freq="5D")).sum()\n tm.assert_frame_equal(result3, expected)\n\n @pytest.mark.parametrize("should_sort", [True, False])\n def test_groupby_with_timegrouper_methods(self, should_sort):\n # GH 3881\n # make sure API of timegrouper conforms\n\n df = DataFrame(\n {\n "Branch": "A A A A A B".split(),\n "Buyer": "Carl Mark Carl Joe Joe Carl".split(),\n "Quantity": [1, 3, 5, 8, 9, 3],\n "Date": [\n datetime(2013, 1, 1, 13, 0),\n datetime(2013, 1, 1, 13, 5),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 2, 10, 0),\n datetime(2013, 12, 2, 12, 0),\n datetime(2013, 12, 2, 14, 0),\n ],\n }\n )\n\n if should_sort:\n df = df.sort_values(by="Quantity", ascending=False)\n\n df = df.set_index("Date", drop=False)\n g = df.groupby(Grouper(freq="6ME"))\n assert g.group_keys\n\n assert isinstance(g._grouper, BinGrouper)\n groups = g.groups\n assert isinstance(groups, dict)\n assert len(groups) == 3\n\n def test_timegrouper_with_reg_groups(self):\n # GH 3794\n # allow combination of timegrouper/reg groups\n\n df_original = DataFrame(\n {\n "Branch": "A A A A A A A B".split(),\n "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),\n "Quantity": [1, 3, 5, 1, 8, 1, 9, 3],\n "Date": [\n datetime(2013, 1, 1, 13, 0),\n datetime(2013, 1, 1, 13, 5),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 2, 10, 0),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 2, 10, 0),\n datetime(2013, 12, 2, 12, 0),\n datetime(2013, 12, 2, 14, 0),\n ],\n }\n ).set_index("Date")\n\n df_sorted = df_original.sort_values(by="Quantity", ascending=False)\n\n for df in [df_original, df_sorted]:\n expected = DataFrame(\n {\n "Buyer": "Carl Joe Mark".split(),\n "Quantity": [10, 18, 3],\n "Date": [\n datetime(2013, 12, 31, 0, 0),\n datetime(2013, 12, 31, 0, 0),\n datetime(2013, 12, 31, 0, 0),\n ],\n }\n ).set_index(["Date", "Buyer"])\n\n msg = "The default value of numeric_only"\n result = df.groupby([Grouper(freq="YE"), "Buyer"]).sum(numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(\n {\n "Buyer": "Carl Mark Carl Joe".split(),\n "Quantity": [1, 3, 9, 18],\n "Date": [\n datetime(2013, 1, 1, 0, 0),\n datetime(2013, 1, 1, 0, 0),\n datetime(2013, 7, 1, 0, 0),\n datetime(2013, 7, 1, 0, 0),\n ],\n }\n ).set_index(["Date", "Buyer"])\n result = df.groupby([Grouper(freq="6MS"), "Buyer"]).sum(numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n df_original = DataFrame(\n {\n "Branch": "A A A A A A A B".split(),\n "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),\n "Quantity": [1, 3, 5, 1, 8, 1, 9, 3],\n "Date": [\n datetime(2013, 10, 1, 13, 0),\n datetime(2013, 10, 1, 13, 5),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 2, 10, 0),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 2, 10, 0),\n datetime(2013, 10, 2, 12, 0),\n datetime(2013, 10, 2, 14, 0),\n ],\n }\n ).set_index("Date")\n\n df_sorted = df_original.sort_values(by="Quantity", ascending=False)\n for df in [df_original, df_sorted]:\n expected = DataFrame(\n {\n "Buyer": "Carl Joe Mark Carl Joe".split(),\n "Quantity": [6, 8, 3, 4, 10],\n "Date": [\n datetime(2013, 10, 1, 0, 0),\n datetime(2013, 10, 1, 0, 0),\n datetime(2013, 10, 1, 0, 0),\n datetime(2013, 10, 2, 0, 0),\n datetime(2013, 10, 2, 0, 0),\n ],\n }\n ).set_index(["Date", "Buyer"])\n\n result = df.groupby([Grouper(freq="1D"), "Buyer"]).sum(numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby([Grouper(freq="1ME"), "Buyer"]).sum(numeric_only=True)\n expected = DataFrame(\n {\n "Buyer": "Carl Joe Mark".split(),\n "Quantity": [10, 18, 3],\n "Date": [\n datetime(2013, 10, 31, 0, 0),\n datetime(2013, 10, 31, 0, 0),\n datetime(2013, 10, 31, 0, 0),\n ],\n }\n ).set_index(["Date", "Buyer"])\n tm.assert_frame_equal(result, expected)\n\n # passing the name\n df = df.reset_index()\n result = df.groupby([Grouper(freq="1ME", key="Date"), "Buyer"]).sum(\n numeric_only=True\n )\n tm.assert_frame_equal(result, expected)\n\n with pytest.raises(KeyError, match="'The grouper name foo is not found'"):\n df.groupby([Grouper(freq="1ME", key="foo"), "Buyer"]).sum()\n\n # passing the level\n df = df.set_index("Date")\n result = df.groupby([Grouper(freq="1ME", level="Date"), "Buyer"]).sum(\n numeric_only=True\n )\n tm.assert_frame_equal(result, expected)\n result = df.groupby([Grouper(freq="1ME", level=0), "Buyer"]).sum(\n numeric_only=True\n )\n tm.assert_frame_equal(result, expected)\n\n with pytest.raises(ValueError, match="The level foo is not valid"):\n df.groupby([Grouper(freq="1ME", level="foo"), "Buyer"]).sum()\n\n # multi names\n df = df.copy()\n df["Date"] = df.index + offsets.MonthEnd(2)\n result = df.groupby([Grouper(freq="1ME", key="Date"), "Buyer"]).sum(\n numeric_only=True\n )\n expected = DataFrame(\n {\n "Buyer": "Carl Joe Mark".split(),\n "Quantity": [10, 18, 3],\n "Date": [\n datetime(2013, 11, 30, 0, 0),\n datetime(2013, 11, 30, 0, 0),\n datetime(2013, 11, 30, 0, 0),\n ],\n }\n ).set_index(["Date", "Buyer"])\n tm.assert_frame_equal(result, expected)\n\n # error as we have both a level and a name!\n msg = "The Grouper cannot specify both a key and a level!"\n with pytest.raises(ValueError, match=msg):\n df.groupby(\n [Grouper(freq="1ME", key="Date", level="Date"), "Buyer"]\n ).sum()\n\n # single groupers\n expected = DataFrame(\n [[31]],\n columns=["Quantity"],\n index=DatetimeIndex(\n [datetime(2013, 10, 31, 0, 0)], freq=offsets.MonthEnd(), name="Date"\n ),\n )\n result = df.groupby(Grouper(freq="1ME")).sum(numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby([Grouper(freq="1ME")]).sum(numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n expected.index = expected.index.shift(1)\n assert expected.index.freq == offsets.MonthEnd()\n result = df.groupby(Grouper(freq="1ME", key="Date")).sum(numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby([Grouper(freq="1ME", key="Date")]).sum(\n numeric_only=True\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("freq", ["D", "ME", "YE", "QE-APR"])\n def test_timegrouper_with_reg_groups_freq(self, freq):\n # GH 6764 multiple grouping with/without sort\n df = DataFrame(\n {\n "date": pd.to_datetime(\n [\n "20121002",\n "20121007",\n "20130130",\n "20130202",\n "20130305",\n "20121002",\n "20121207",\n "20130130",\n "20130202",\n "20130305",\n "20130202",\n "20130305",\n ]\n ),\n "user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],\n "whole_cost": [\n 1790,\n 364,\n 280,\n 259,\n 201,\n 623,\n 90,\n 312,\n 359,\n 301,\n 359,\n 801,\n ],\n "cost1": [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12],\n }\n ).set_index("date")\n\n expected = (\n df.groupby("user_id")["whole_cost"]\n .resample(freq)\n .sum(min_count=1) # XXX\n .dropna()\n .reorder_levels(["date", "user_id"])\n .sort_index()\n .astype("int64")\n )\n expected.name = "whole_cost"\n\n result1 = (\n df.sort_index().groupby([Grouper(freq=freq), "user_id"])["whole_cost"].sum()\n )\n tm.assert_series_equal(result1, expected)\n\n result2 = df.groupby([Grouper(freq=freq), "user_id"])["whole_cost"].sum()\n tm.assert_series_equal(result2, expected)\n\n def test_timegrouper_get_group(self):\n # GH 6914\n\n df_original = DataFrame(\n {\n "Buyer": "Carl Joe Joe Carl Joe Carl".split(),\n "Quantity": [18, 3, 5, 1, 9, 3],\n "Date": [\n datetime(2013, 9, 1, 13, 0),\n datetime(2013, 9, 1, 13, 5),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 3, 10, 0),\n datetime(2013, 12, 2, 12, 0),\n datetime(2013, 9, 2, 14, 0),\n ],\n }\n )\n df_reordered = df_original.sort_values(by="Quantity")\n\n # single grouping\n expected_list = [\n df_original.iloc[[0, 1, 5]],\n df_original.iloc[[2, 3]],\n df_original.iloc[[4]],\n ]\n dt_list = ["2013-09-30", "2013-10-31", "2013-12-31"]\n\n for df in [df_original, df_reordered]:\n grouped = df.groupby(Grouper(freq="ME", key="Date"))\n for t, expected in zip(dt_list, expected_list):\n dt = Timestamp(t)\n result = grouped.get_group(dt)\n tm.assert_frame_equal(result, expected)\n\n # multiple grouping\n expected_list = [\n df_original.iloc[[1]],\n df_original.iloc[[3]],\n df_original.iloc[[4]],\n ]\n g_list = [("Joe", "2013-09-30"), ("Carl", "2013-10-31"), ("Joe", "2013-12-31")]\n\n for df in [df_original, df_reordered]:\n grouped = df.groupby(["Buyer", Grouper(freq="ME", key="Date")])\n for (b, t), expected in zip(g_list, expected_list):\n dt = Timestamp(t)\n result = grouped.get_group((b, dt))\n tm.assert_frame_equal(result, expected)\n\n # with index\n df_original = df_original.set_index("Date")\n df_reordered = df_original.sort_values(by="Quantity")\n\n expected_list = [\n df_original.iloc[[0, 1, 5]],\n df_original.iloc[[2, 3]],\n df_original.iloc[[4]],\n ]\n\n for df in [df_original, df_reordered]:\n grouped = df.groupby(Grouper(freq="ME"))\n for t, expected in zip(dt_list, expected_list):\n dt = Timestamp(t)\n result = grouped.get_group(dt)\n tm.assert_frame_equal(result, expected)\n\n def test_timegrouper_apply_return_type_series(self):\n # Using `apply` with the `TimeGrouper` should give the\n # same return type as an `apply` with a `Grouper`.\n # Issue #11742\n df = DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]})\n df_dt = df.copy()\n df_dt["date"] = pd.to_datetime(df_dt["date"])\n\n def sumfunc_series(x):\n return Series([x["value"].sum()], ("sum",))\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df.groupby(Grouper(key="date")).apply(sumfunc_series)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df_dt.groupby(Grouper(freq="ME", key="date")).apply(sumfunc_series)\n tm.assert_frame_equal(\n result.reset_index(drop=True), expected.reset_index(drop=True)\n )\n\n def test_timegrouper_apply_return_type_value(self):\n # Using `apply` with the `TimeGrouper` should give the\n # same return type as an `apply` with a `Grouper`.\n # Issue #11742\n df = DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]})\n df_dt = df.copy()\n df_dt["date"] = pd.to_datetime(df_dt["date"])\n\n def sumfunc_value(x):\n return x.value.sum()\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df.groupby(Grouper(key="date")).apply(sumfunc_value)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df_dt.groupby(Grouper(freq="ME", key="date")).apply(sumfunc_value)\n tm.assert_series_equal(\n result.reset_index(drop=True), expected.reset_index(drop=True)\n )\n\n def test_groupby_groups_datetimeindex(self):\n # GH#1430\n periods = 1000\n ind = date_range(start="2012/1/1", freq="5min", periods=periods)\n df = DataFrame(\n {"high": np.arange(periods), "low": np.arange(periods)}, index=ind\n )\n grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))\n\n # it works!\n groups = grouped.groups\n assert isinstance(next(iter(groups.keys())), datetime)\n\n def test_groupby_groups_datetimeindex2(self):\n # GH#11442\n index = date_range("2015/01/01", periods=5, name="date")\n df = DataFrame({"A": [5, 6, 7, 8, 9], "B": [1, 2, 3, 4, 5]}, index=index)\n result = df.groupby(level="date").groups\n dates = ["2015-01-05", "2015-01-04", "2015-01-03", "2015-01-02", "2015-01-01"]\n expected = {\n Timestamp(date): DatetimeIndex([date], name="date") for date in dates\n }\n tm.assert_dict_equal(result, expected)\n\n grouped = df.groupby(level="date")\n for date in dates:\n result = grouped.get_group(date)\n data = [[df.loc[date, "A"], df.loc[date, "B"]]]\n expected_index = DatetimeIndex(\n [date], name="date", freq="D", dtype=index.dtype\n )\n expected = DataFrame(data, columns=list("AB"), index=expected_index)\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_groups_datetimeindex_tz(self):\n # GH 3950\n dates = [\n "2011-07-19 07:00:00",\n "2011-07-19 08:00:00",\n "2011-07-19 09:00:00",\n "2011-07-19 07:00:00",\n "2011-07-19 08:00:00",\n "2011-07-19 09:00:00",\n ]\n df = DataFrame(\n {\n "label": ["a", "a", "a", "b", "b", "b"],\n "datetime": dates,\n "value1": np.arange(6, dtype="int64"),\n "value2": [1, 2] * 3,\n }\n )\n df["datetime"] = df["datetime"].apply(lambda d: Timestamp(d, tz="US/Pacific"))\n\n exp_idx1 = DatetimeIndex(\n [\n "2011-07-19 07:00:00",\n "2011-07-19 07:00:00",\n "2011-07-19 08:00:00",\n "2011-07-19 08:00:00",\n "2011-07-19 09:00:00",\n "2011-07-19 09:00:00",\n ],\n tz="US/Pacific",\n name="datetime",\n )\n exp_idx2 = Index(["a", "b"] * 3, name="label")\n exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])\n expected = DataFrame(\n {"value1": [0, 3, 1, 4, 2, 5], "value2": [1, 2, 2, 1, 1, 2]},\n index=exp_idx,\n columns=["value1", "value2"],\n )\n\n result = df.groupby(["datetime", "label"]).sum()\n tm.assert_frame_equal(result, expected)\n\n # by level\n didx = DatetimeIndex(dates, tz="Asia/Tokyo")\n df = DataFrame(\n {"value1": np.arange(6, dtype="int64"), "value2": [1, 2, 3, 1, 2, 3]},\n index=didx,\n )\n\n exp_idx = DatetimeIndex(\n ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"],\n tz="Asia/Tokyo",\n )\n expected = DataFrame(\n {"value1": [3, 5, 7], "value2": [2, 4, 6]},\n index=exp_idx,\n columns=["value1", "value2"],\n )\n\n result = df.groupby(level=0).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_frame_datetime64_handling_groupby(self):\n # it works!\n df = DataFrame(\n [(3, np.datetime64("2012-07-03")), (3, np.datetime64("2012-07-04"))],\n columns=["a", "date"],\n )\n result = df.groupby("a").first()\n assert result["date"][3] == Timestamp("2012-07-03")\n\n def test_groupby_multi_timezone(self):\n # combining multiple / different timezones yields UTC\n df = DataFrame(\n {\n "value": range(5),\n "date": [\n "2000-01-28 16:47:00",\n "2000-01-29 16:48:00",\n "2000-01-30 16:49:00",\n "2000-01-31 16:50:00",\n "2000-01-01 16:50:00",\n ],\n "tz": [\n "America/Chicago",\n "America/Chicago",\n "America/Los_Angeles",\n "America/Chicago",\n "America/New_York",\n ],\n }\n )\n\n result = df.groupby("tz", group_keys=False).date.apply(\n lambda x: pd.to_datetime(x).dt.tz_localize(x.name)\n )\n\n expected = Series(\n [\n Timestamp("2000-01-28 16:47:00-0600", tz="America/Chicago"),\n Timestamp("2000-01-29 16:48:00-0600", tz="America/Chicago"),\n Timestamp("2000-01-30 16:49:00-0800", tz="America/Los_Angeles"),\n Timestamp("2000-01-31 16:50:00-0600", tz="America/Chicago"),\n Timestamp("2000-01-01 16:50:00-0500", tz="America/New_York"),\n ],\n name="date",\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n tz = "America/Chicago"\n res_values = df.groupby("tz").date.get_group(tz)\n result = pd.to_datetime(res_values).dt.tz_localize(tz)\n exp_values = Series(\n ["2000-01-28 16:47:00", "2000-01-29 16:48:00", "2000-01-31 16:50:00"],\n index=[0, 1, 3],\n name="date",\n )\n expected = pd.to_datetime(exp_values).dt.tz_localize(tz)\n tm.assert_series_equal(result, expected)\n\n def test_groupby_groups_periods(self):\n dates = [\n "2011-07-19 07:00:00",\n "2011-07-19 08:00:00",\n "2011-07-19 09:00:00",\n "2011-07-19 07:00:00",\n "2011-07-19 08:00:00",\n "2011-07-19 09:00:00",\n ]\n df = DataFrame(\n {\n "label": ["a", "a", "a", "b", "b", "b"],\n "period": [pd.Period(d, freq="h") for d in dates],\n "value1": np.arange(6, dtype="int64"),\n "value2": [1, 2] * 3,\n }\n )\n\n exp_idx1 = pd.PeriodIndex(\n [\n "2011-07-19 07:00:00",\n "2011-07-19 07:00:00",\n "2011-07-19 08:00:00",\n "2011-07-19 08:00:00",\n "2011-07-19 09:00:00",\n "2011-07-19 09:00:00",\n ],\n freq="h",\n name="period",\n )\n exp_idx2 = Index(["a", "b"] * 3, name="label")\n exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])\n expected = DataFrame(\n {"value1": [0, 3, 1, 4, 2, 5], "value2": [1, 2, 2, 1, 1, 2]},\n index=exp_idx,\n columns=["value1", "value2"],\n )\n\n result = df.groupby(["period", "label"]).sum()\n tm.assert_frame_equal(result, expected)\n\n # by level\n didx = pd.PeriodIndex(dates, freq="h")\n df = DataFrame(\n {"value1": np.arange(6, dtype="int64"), "value2": [1, 2, 3, 1, 2, 3]},\n index=didx,\n )\n\n exp_idx = pd.PeriodIndex(\n ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"],\n freq="h",\n )\n expected = DataFrame(\n {"value1": [3, 5, 7], "value2": [2, 4, 6]},\n index=exp_idx,\n columns=["value1", "value2"],\n )\n\n result = df.groupby(level=0).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_first_datetime64(self):\n df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])\n df[1] = df[1].astype("M8[ns]")\n\n assert issubclass(df[1].dtype.type, np.datetime64)\n\n result = df.groupby(level=0).first()\n got_dt = result[1].dtype\n assert issubclass(got_dt.type, np.datetime64)\n\n result = df[1].groupby(level=0).first()\n got_dt = result.dtype\n assert issubclass(got_dt.type, np.datetime64)\n\n def test_groupby_max_datetime64(self):\n # GH 5869\n # datetimelike dtype conversion from int\n df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)})\n # TODO: can we retain second reso in .apply here?\n expected = df.groupby("A")["A"].apply(lambda x: x.max()).astype("M8[s]")\n result = df.groupby("A")["A"].max()\n tm.assert_series_equal(result, expected)\n\n def test_groupby_datetime64_32_bit(self):\n # GH 6410 / numpy 4328\n # 32-bit under 1.9-dev indexing issue\n\n df = DataFrame({"A": range(2), "B": [Timestamp("2000-01-1")] * 2})\n result = df.groupby("A")["B"].transform("min")\n expected = Series([Timestamp("2000-01-1")] * 2, name="B")\n tm.assert_series_equal(result, expected)\n\n def test_groupby_with_timezone_selection(self):\n # GH 11616\n # Test that column selection returns output in correct timezone.\n\n df = DataFrame(\n {\n "factor": np.random.default_rng(2).integers(0, 3, size=60),\n "time": date_range("01/01/2000 00:00", periods=60, freq="s", tz="UTC"),\n }\n )\n df1 = df.groupby("factor").max()["time"]\n df2 = df.groupby("factor")["time"].max()\n tm.assert_series_equal(df1, df2)\n\n def test_timezone_info(self):\n # see gh-11682: Timezone info lost when broadcasting\n # scalar datetime to DataFrame\n\n df = DataFrame({"a": [1], "b": [datetime.now(pytz.utc)]})\n assert df["b"][0].tzinfo == pytz.utc\n df = DataFrame({"a": [1, 2, 3]})\n df["b"] = datetime.now(pytz.utc)\n assert df["b"][0].tzinfo == pytz.utc\n\n def test_datetime_count(self):\n df = DataFrame(\n {"a": [1, 2, 3] * 2, "dates": date_range("now", periods=6, freq="min")}\n )\n result = df.groupby("a").dates.count()\n expected = Series([2, 2, 2], index=Index([1, 2, 3], name="a"), name="dates")\n tm.assert_series_equal(result, expected)\n\n def test_first_last_max_min_on_time_data(self):\n # GH 10295\n # Verify that NaT is not in the result of max, min, first and last on\n # Dataframe with datetime or timedelta values.\n df_test = DataFrame(\n {\n "dt": [\n np.nan,\n "2015-07-24 10:10",\n "2015-07-25 11:11",\n "2015-07-23 12:12",\n np.nan,\n ],\n "td": [\n np.nan,\n timedelta(days=1),\n timedelta(days=2),\n timedelta(days=3),\n np.nan,\n ],\n }\n )\n df_test.dt = pd.to_datetime(df_test.dt)\n df_test["group"] = "A"\n df_ref = df_test[df_test.dt.notna()]\n\n grouped_test = df_test.groupby("group")\n grouped_ref = df_ref.groupby("group")\n\n tm.assert_frame_equal(grouped_ref.max(), grouped_test.max())\n tm.assert_frame_equal(grouped_ref.min(), grouped_test.min())\n tm.assert_frame_equal(grouped_ref.first(), grouped_test.first())\n tm.assert_frame_equal(grouped_ref.last(), grouped_test.last())\n\n def test_nunique_with_timegrouper_and_nat(self):\n # GH 17575\n test = DataFrame(\n {\n "time": [\n Timestamp("2016-06-28 09:35:35"),\n pd.NaT,\n Timestamp("2016-06-28 16:46:28"),\n ],\n "data": ["1", "2", "3"],\n }\n )\n\n grouper = Grouper(key="time", freq="h")\n result = test.groupby(grouper)["data"].nunique()\n expected = test[test.time.notnull()].groupby(grouper)["data"].nunique()\n expected.index = expected.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n def test_scalar_call_versus_list_call(self):\n # Issue: 17530\n data_frame = {\n "location": ["shanghai", "beijing", "shanghai"],\n "time": Series(\n ["2017-08-09 13:32:23", "2017-08-11 23:23:15", "2017-08-11 22:23:15"],\n dtype="datetime64[ns]",\n ),\n "value": [1, 2, 3],\n }\n data_frame = DataFrame(data_frame).set_index("time")\n grouper = Grouper(freq="D")\n\n grouped = data_frame.groupby(grouper)\n result = grouped.count()\n grouped = data_frame.groupby([grouper])\n expected = grouped.count()\n\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_period_index(self):\n # GH 32108\n periods = 2\n index = pd.period_range(\n start="2018-01", periods=periods, freq="M", name="Month"\n )\n period_series = Series(range(periods), index=index)\n result = period_series.groupby(period_series.index.month).sum()\n\n expected = Series(\n range(periods), index=Index(range(1, periods + 1), name=index.name)\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_apply_timegrouper_with_nat_dict_returns(\n self, groupby_with_truncated_bingrouper\n ):\n # GH#43500 case where gb._grouper.result_index and gb._grouper.group_keys_seq\n # have different lengths that goes through the `isinstance(values[0], dict)`\n # path\n gb = groupby_with_truncated_bingrouper\n\n res = gb["Quantity"].apply(lambda x: {"foo": len(x)})\n\n df = gb.obj\n unit = df["Date"]._values.unit\n dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date", unit=unit)\n mi = MultiIndex.from_arrays([dti, ["foo"] * len(dti)])\n expected = Series([3, 0, 0, 0, 0, 0, 2], index=mi, name="Quantity")\n tm.assert_series_equal(res, expected)\n\n def test_groupby_apply_timegrouper_with_nat_scalar_returns(\n self, groupby_with_truncated_bingrouper\n ):\n # GH#43500 Previously raised ValueError bc used index with incorrect\n # length in wrap_applied_result\n gb = groupby_with_truncated_bingrouper\n\n res = gb["Quantity"].apply(lambda x: x.iloc[0] if len(x) else np.nan)\n\n df = gb.obj\n unit = df["Date"]._values.unit\n dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date", unit=unit)\n expected = Series(\n [18, np.nan, np.nan, np.nan, np.nan, np.nan, 5],\n index=dti._with_freq(None),\n name="Quantity",\n )\n\n tm.assert_series_equal(res, expected)\n\n def test_groupby_apply_timegrouper_with_nat_apply_squeeze(\n self, frame_for_truncated_bingrouper\n ):\n df = frame_for_truncated_bingrouper\n\n # We need to create a GroupBy object with only one non-NaT group,\n # so use a huge freq so that all non-NaT dates will be grouped together\n tdg = Grouper(key="Date", freq="100YE")\n gb = df.groupby(tdg)\n\n # check that we will go through the singular_series path\n # in _wrap_applied_output_series\n assert gb.ngroups == 1\n assert gb._selected_obj._get_axis(gb.axis).nlevels == 1\n\n # function that returns a Series\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = gb.apply(lambda x: x["Quantity"] * 2)\n\n dti = Index([Timestamp("2013-12-31")], dtype=df["Date"].dtype, name="Date")\n expected = DataFrame(\n [[36, 6, 6, 10, 2]],\n index=dti,\n columns=Index([0, 1, 5, 2, 3], name="Quantity"),\n )\n tm.assert_frame_equal(res, expected)\n\n @pytest.mark.single_cpu\n def test_groupby_agg_numba_timegrouper_with_nat(\n self, groupby_with_truncated_bingrouper\n ):\n pytest.importorskip("numba")\n\n # See discussion in GH#43487\n gb = groupby_with_truncated_bingrouper\n\n result = gb["Quantity"].aggregate(\n lambda values, index: np.nanmean(values), engine="numba"\n )\n\n expected = gb["Quantity"].aggregate("mean")\n tm.assert_series_equal(result, expected)\n\n result_df = gb[["Quantity"]].aggregate(\n lambda values, index: np.nanmean(values), engine="numba"\n )\n expected_df = gb[["Quantity"]].aggregate("mean")\n tm.assert_frame_equal(result_df, expected_df)\n | .venv\Lib\site-packages\pandas\tests\groupby\test_timegrouper.py | test_timegrouper.py | Python | 34,984 | 0.95 | 0.049587 | 0.073547 | vue-tools | 888 | 2024-01-25T21:39:54.741920 | BSD-3-Clause | true | 5ffaeee8f341aaaf7183307b87e04896 |
def get_groupby_method_args(name, obj):\n """\n Get required arguments for a groupby method.\n\n When parametrizing a test over groupby methods (e.g. "sum", "mean", "fillna"),\n it is often the case that arguments are required for certain methods.\n\n Parameters\n ----------\n name: str\n Name of the method.\n obj: Series or DataFrame\n pandas object that is being grouped.\n\n Returns\n -------\n A tuple of required arguments for the method.\n """\n if name in ("nth", "fillna", "take"):\n return (0,)\n if name == "quantile":\n return (0.5,)\n if name == "corrwith":\n return (obj,)\n return ()\n | .venv\Lib\site-packages\pandas\tests\groupby\__init__.py | __init__.py | Python | 659 | 0.85 | 0.28 | 0 | python-kit | 358 | 2025-02-04T15:00:19.606313 | Apache-2.0 | true | 5096bb186644810da70fea2a64a66006 |
"""\ntest .agg behavior / note that .apply is tested generally in test_groupby.py\n"""\nimport datetime\nimport functools\nfrom functools import partial\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import SpecificationError\n\nfrom pandas.core.dtypes.common import is_integer_dtype\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n concat,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.core.groupby.grouper import Grouping\n\n\ndef test_groupby_agg_no_extra_calls():\n # GH#31760\n df = DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]})\n gb = df.groupby("key")["value"]\n\n def dummy_func(x):\n assert len(x) != 0\n return x.sum()\n\n gb.agg(dummy_func)\n\n\ndef test_agg_regression1(tsframe):\n grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])\n result = grouped.agg("mean")\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_must_agg(df):\n grouped = df.groupby("A")["C"]\n\n msg = "Must produce aggregated value"\n with pytest.raises(Exception, match=msg):\n grouped.agg(lambda x: x.describe())\n with pytest.raises(Exception, match=msg):\n grouped.agg(lambda x: x.index[:2])\n\n\ndef test_agg_ser_multi_key(df):\n f = lambda x: x.sum()\n results = df.C.groupby([df.A, df.B]).aggregate(f)\n expected = df.groupby(["A", "B"]).sum()["C"]\n tm.assert_series_equal(results, expected)\n\n\ndef test_groupby_aggregation_mixed_dtype():\n # GH 6212\n expected = DataFrame(\n {\n "v1": [5, 5, 7, np.nan, 3, 3, 4, 1],\n "v2": [55, 55, 77, np.nan, 33, 33, 44, 11],\n },\n index=MultiIndex.from_tuples(\n [\n (1, 95),\n (1, 99),\n (2, 95),\n (2, 99),\n ("big", "damp"),\n ("blue", "dry"),\n ("red", "red"),\n ("red", "wet"),\n ],\n names=["by1", "by2"],\n ),\n )\n\n df = DataFrame(\n {\n "v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],\n "v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],\n "by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],\n "by2": [\n "wet",\n "dry",\n 99,\n 95,\n np.nan,\n "damp",\n 95,\n 99,\n "red",\n 99,\n np.nan,\n np.nan,\n ],\n }\n )\n\n g = df.groupby(["by1", "by2"])\n result = g[["v1", "v2"]].mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregation_multi_level_column():\n # GH 29772\n lst = [\n [True, True, True, False],\n [True, False, np.nan, False],\n [True, True, np.nan, False],\n [True, True, np.nan, False],\n ]\n df = DataFrame(\n data=lst,\n columns=MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]),\n )\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby(level=1, axis=1)\n result = gb.sum(numeric_only=False)\n expected = DataFrame({0: [2.0, True, True, True], 1: [1, 0, 1, 1]})\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_apply_corner(ts, tsframe):\n # nothing to group, all NA\n grouped = ts.groupby(ts * np.nan, group_keys=False)\n assert ts.dtype == np.float64\n\n # groupby float64 values results in a float64 Index\n exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64))\n tm.assert_series_equal(grouped.sum(), exp)\n tm.assert_series_equal(grouped.agg("sum"), exp)\n tm.assert_series_equal(grouped.apply("sum"), exp, check_index_type=False)\n\n # DataFrame\n grouped = tsframe.groupby(tsframe["A"] * np.nan, group_keys=False)\n exp_df = DataFrame(\n columns=tsframe.columns,\n dtype=float,\n index=Index([], name="A", dtype=np.float64),\n )\n tm.assert_frame_equal(grouped.sum(), exp_df)\n tm.assert_frame_equal(grouped.agg("sum"), exp_df)\n\n msg = "The behavior of DataFrame.sum with axis=None is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):\n res = grouped.apply(np.sum)\n tm.assert_frame_equal(res, exp_df)\n\n\ndef test_agg_grouping_is_list_tuple(ts):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((30, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=pd.date_range("2000-01-01", periods=30, freq="B"),\n )\n\n grouped = df.groupby(lambda x: x.year)\n grouper = grouped._grouper.groupings[0].grouping_vector\n grouped._grouper.groupings[0] = Grouping(ts.index, list(grouper))\n\n result = grouped.agg("mean")\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n grouped._grouper.groupings[0] = Grouping(ts.index, tuple(grouper))\n\n result = grouped.agg("mean")\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_python_multiindex(multiindex_dataframe_random_data):\n grouped = multiindex_dataframe_random_data.groupby(["A", "B"])\n\n result = grouped.agg("mean")\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]\n)\ndef test_aggregate_str_func(tsframe, groupbyfunc):\n grouped = tsframe.groupby(groupbyfunc)\n\n # single series\n result = grouped["A"].agg("std")\n expected = grouped["A"].std()\n tm.assert_series_equal(result, expected)\n\n # group frame by function name\n result = grouped.aggregate("var")\n expected = grouped.var()\n tm.assert_frame_equal(result, expected)\n\n # group frame by function dict\n result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"})\n expected = DataFrame(\n {\n "A": grouped["A"].var(),\n "B": grouped["B"].std(),\n "C": grouped["C"].mean(),\n "D": grouped["D"].sem(),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_std_masked_dtype(any_numeric_ea_dtype):\n # GH#35516\n df = DataFrame(\n {\n "a": [2, 1, 1, 1, 2, 2, 1],\n "b": Series([pd.NA, 1, 2, 1, 1, 1, 2], dtype="Float64"),\n }\n )\n result = df.groupby("a").std()\n expected = DataFrame(\n {"b": [0.57735, 0]}, index=Index([1, 2], name="a"), dtype="Float64"\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_str_with_kwarg_axis_1_raises(df, reduction_func):\n gb = df.groupby(level=0)\n warn_msg = f"DataFrameGroupBy.{reduction_func} with axis=1 is deprecated"\n if reduction_func in ("idxmax", "idxmin"):\n error = TypeError\n msg = "'[<>]' not supported between instances of 'float' and 'str'"\n warn = FutureWarning\n else:\n error = ValueError\n msg = f"Operation {reduction_func} does not support axis=1"\n warn = None\n with pytest.raises(error, match=msg):\n with tm.assert_produces_warning(warn, match=warn_msg):\n gb.agg(reduction_func, axis=1)\n\n\n@pytest.mark.parametrize(\n "func, expected, dtype, result_dtype_dict",\n [\n ("sum", [5, 7, 9], "int64", {}),\n ("std", [4.5**0.5] * 3, int, {"i": float, "j": float, "k": float}),\n ("var", [4.5] * 3, int, {"i": float, "j": float, "k": float}),\n ("sum", [5, 7, 9], "Int64", {"j": "int64"}),\n ("std", [4.5**0.5] * 3, "Int64", {"i": float, "j": float, "k": float}),\n ("var", [4.5] * 3, "Int64", {"i": "float64", "j": "float64", "k": "float64"}),\n ],\n)\ndef test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype_dict):\n # GH#43209\n df = DataFrame(\n [[1, 2, 3, 4, 5, 6]] * 3,\n columns=MultiIndex.from_product([["a", "b"], ["i", "j", "k"]]),\n ).astype({("a", "j"): dtype, ("b", "j"): dtype})\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby(level=1, axis=1)\n result = gb.agg(func)\n expected = DataFrame([expected] * 3, columns=["i", "j", "k"]).astype(\n result_dtype_dict\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func, expected_data, result_dtype_dict",\n [\n ("sum", [[2, 4], [10, 12], [18, 20]], {10: "int64", 20: "int64"}),\n # std should ideally return Int64 / Float64 #43330\n ("std", [[2**0.5] * 2] * 3, "float64"),\n ("var", [[2] * 2] * 3, {10: "float64", 20: "float64"}),\n ],\n)\ndef test_groupby_mixed_cols_axis1(func, expected_data, result_dtype_dict):\n # GH#43209\n df = DataFrame(\n np.arange(12).reshape(3, 4),\n index=Index([0, 1, 0], name="y"),\n columns=Index([10, 20, 10, 20], name="x"),\n dtype="int64",\n ).astype({10: "Int64"})\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby("x", axis=1)\n result = gb.agg(func)\n expected = DataFrame(\n data=expected_data,\n index=Index([0, 1, 0], name="y"),\n columns=Index([10, 20], name="x"),\n ).astype(result_dtype_dict)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_item_by_item(df):\n grouped = df.groupby("A")\n\n aggfun_0 = lambda ser: ser.size\n result = grouped.agg(aggfun_0)\n foosum = (df.A == "foo").sum()\n barsum = (df.A == "bar").sum()\n K = len(result.columns)\n\n # GH5782\n exp = Series(np.array([foosum] * K), index=list("BCD"), name="foo")\n tm.assert_series_equal(result.xs("foo"), exp)\n\n exp = Series(np.array([barsum] * K), index=list("BCD"), name="bar")\n tm.assert_almost_equal(result.xs("bar"), exp)\n\n def aggfun_1(ser):\n return ser.size\n\n result = DataFrame().groupby(df.A).agg(aggfun_1)\n assert isinstance(result, DataFrame)\n assert len(result) == 0\n\n\ndef test_wrap_agg_out(three_group):\n grouped = three_group.groupby(["A", "B"])\n\n def func(ser):\n if ser.dtype in (object, "string"):\n raise TypeError("Test error message")\n return ser.sum()\n\n with pytest.raises(TypeError, match="Test error message"):\n grouped.aggregate(func)\n result = grouped[["D", "E", "F"]].aggregate(func)\n exp_grouped = three_group.loc[:, ["A", "B", "D", "E", "F"]]\n expected = exp_grouped.groupby(["A", "B"]).aggregate(func)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_multiple_functions_maintain_order(df):\n # GH #610\n funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)]\n msg = "is currently using SeriesGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("A")["C"].agg(funcs)\n exp_cols = Index(["mean", "max", "min"])\n\n tm.assert_index_equal(result.columns, exp_cols)\n\n\ndef test_series_index_name(df):\n grouped = df.loc[:, ["C"]].groupby(df["A"])\n result = grouped.agg(lambda x: x.mean())\n assert result.index.name == "A"\n\n\ndef test_agg_multiple_functions_same_name():\n # GH 30880\n df = DataFrame(\n np.random.default_rng(2).standard_normal((1000, 3)),\n index=pd.date_range("1/1/2012", freq="s", periods=1000),\n columns=["A", "B", "C"],\n )\n result = df.resample("3min").agg(\n {"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}\n )\n expected_index = pd.date_range("1/1/2012", freq="3min", periods=6)\n expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")])\n expected_values = np.array(\n [df.resample("3min").A.quantile(q=q).values for q in [0.9999, 0.1111]]\n ).T\n expected = DataFrame(\n expected_values, columns=expected_columns, index=expected_index\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_multiple_functions_same_name_with_ohlc_present():\n # GH 30880\n # ohlc expands dimensions, so different test to the above is required.\n df = DataFrame(\n np.random.default_rng(2).standard_normal((1000, 3)),\n index=pd.date_range("1/1/2012", freq="s", periods=1000, name="dti"),\n columns=Index(["A", "B", "C"], name="alpha"),\n )\n result = df.resample("3min").agg(\n {"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}\n )\n expected_index = pd.date_range("1/1/2012", freq="3min", periods=6, name="dti")\n expected_columns = MultiIndex.from_tuples(\n [\n ("A", "ohlc", "open"),\n ("A", "ohlc", "high"),\n ("A", "ohlc", "low"),\n ("A", "ohlc", "close"),\n ("A", "quantile", "A"),\n ("A", "quantile", "A"),\n ],\n names=["alpha", None, None],\n )\n non_ohlc_expected_values = np.array(\n [df.resample("3min").A.quantile(q=q).values for q in [0.9999, 0.1111]]\n ).T\n expected_values = np.hstack(\n [df.resample("3min").A.ohlc(), non_ohlc_expected_values]\n )\n expected = DataFrame(\n expected_values, columns=expected_columns, index=expected_index\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_functions_tuples_and_non_tuples(df):\n # #1359\n # Columns B and C would cause partial failure\n df = df.drop(columns=["B", "C"])\n\n funcs = [("foo", "mean"), "std"]\n ex_funcs = [("foo", "mean"), ("std", "std")]\n\n result = df.groupby("A")["D"].agg(funcs)\n expected = df.groupby("A")["D"].agg(ex_funcs)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("A").agg(funcs)\n expected = df.groupby("A").agg(ex_funcs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_more_flexible_frame_multi_function(df):\n grouped = df.groupby("A")\n\n exmean = grouped.agg({"C": "mean", "D": "mean"})\n exstd = grouped.agg({"C": "std", "D": "std"})\n\n expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)\n expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)\n\n d = {"C": ["mean", "std"], "D": ["mean", "std"]}\n result = grouped.aggregate(d)\n\n tm.assert_frame_equal(result, expected)\n\n # be careful\n result = grouped.aggregate({"C": "mean", "D": ["mean", "std"]})\n expected = grouped.aggregate({"C": "mean", "D": ["mean", "std"]})\n tm.assert_frame_equal(result, expected)\n\n def numpymean(x):\n return np.mean(x)\n\n def numpystd(x):\n return np.std(x, ddof=1)\n\n # this uses column selection & renaming\n msg = r"nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n d = {"C": "mean", "D": {"foo": "mean", "bar": "std"}}\n grouped.aggregate(d)\n\n # But without renaming, these functions are OK\n d = {"C": ["mean"], "D": [numpymean, numpystd]}\n grouped.aggregate(d)\n\n\ndef test_multi_function_flexible_mix(df):\n # GH #1268\n grouped = df.groupby("A")\n\n # Expected\n d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}}\n # this uses column selection & renaming\n msg = r"nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n grouped.aggregate(d)\n\n # Test 1\n d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}\n # this uses column selection & renaming\n with pytest.raises(SpecificationError, match=msg):\n grouped.aggregate(d)\n\n # Test 2\n d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}\n # this uses column selection & renaming\n with pytest.raises(SpecificationError, match=msg):\n grouped.aggregate(d)\n\n\ndef test_groupby_agg_coercing_bools():\n # issue 14873\n dat = DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]})\n gp = dat.groupby("a")\n\n index = Index([1, 2], name="a")\n\n result = gp["b"].aggregate(lambda x: (x != 0).all())\n expected = Series([False, True], index=index, name="b")\n tm.assert_series_equal(result, expected)\n\n result = gp["c"].aggregate(lambda x: x.isnull().all())\n expected = Series([True, False], index=index, name="c")\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_agg_dict_with_getitem():\n # issue 25471\n dat = DataFrame({"A": ["A", "A", "B", "B", "B"], "B": [1, 2, 1, 1, 2]})\n result = dat.groupby("A")[["B"]].agg({"B": "sum"})\n\n expected = DataFrame({"B": [3, 4]}, index=["A", "B"]).rename_axis("A", axis=0)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_agg_dict_dup_columns():\n # GH#55006\n df = DataFrame(\n [[1, 2, 3, 4], [1, 3, 4, 5], [2, 4, 5, 6]],\n columns=["a", "b", "c", "c"],\n )\n gb = df.groupby("a")\n result = gb.agg({"b": "sum"})\n expected = DataFrame({"b": [5, 4]}, index=Index([1, 2], name="a"))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "op",\n [\n lambda x: x.sum(),\n lambda x: x.cumsum(),\n lambda x: x.transform("sum"),\n lambda x: x.transform("cumsum"),\n lambda x: x.agg("sum"),\n lambda x: x.agg("cumsum"),\n ],\n)\ndef test_bool_agg_dtype(op):\n # GH 7001\n # Bool sum aggregations result in int\n df = DataFrame({"a": [1, 1], "b": [False, True]})\n s = df.set_index("a")["b"]\n\n result = op(df.groupby("a"))["b"].dtype\n assert is_integer_dtype(result)\n\n result = op(s.groupby("a")).dtype\n assert is_integer_dtype(result)\n\n\n@pytest.mark.parametrize(\n "keys, agg_index",\n [\n (["a"], Index([1], name="a")),\n (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),\n ],\n)\n@pytest.mark.parametrize(\n "input_dtype", ["bool", "int32", "int64", "float32", "float64"]\n)\n@pytest.mark.parametrize(\n "result_dtype", ["bool", "int32", "int64", "float32", "float64"]\n)\n@pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])\ndef test_callable_result_dtype_frame(\n keys, agg_index, input_dtype, result_dtype, method\n):\n # GH 21240\n df = DataFrame({"a": [1], "b": [2], "c": [True]})\n df["c"] = df["c"].astype(input_dtype)\n op = getattr(df.groupby(keys)[["c"]], method)\n result = op(lambda x: x.astype(result_dtype).iloc[0])\n expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index\n expected = DataFrame({"c": [df["c"].iloc[0]]}, index=expected_index).astype(\n result_dtype\n )\n if method == "apply":\n expected.columns.names = [0]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "keys, agg_index",\n [\n (["a"], Index([1], name="a")),\n (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),\n ],\n)\n@pytest.mark.parametrize("input", [True, 1, 1.0])\n@pytest.mark.parametrize("dtype", [bool, int, float])\n@pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])\ndef test_callable_result_dtype_series(keys, agg_index, input, dtype, method):\n # GH 21240\n df = DataFrame({"a": [1], "b": [2], "c": [input]})\n op = getattr(df.groupby(keys)["c"], method)\n result = op(lambda x: x.astype(dtype).iloc[0])\n expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index\n expected = Series([df["c"].iloc[0]], index=expected_index, name="c").astype(dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_order_aggregate_multiple_funcs():\n # GH 25692\n df = DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]})\n\n res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"])\n result = res.columns.levels[1]\n\n expected = Index(["sum", "max", "mean", "ohlc", "min"])\n\n tm.assert_index_equal(result, expected)\n\n\ndef test_ohlc_ea_dtypes(any_numeric_ea_dtype):\n # GH#37493\n df = DataFrame(\n {"a": [1, 1, 2, 3, 4, 4], "b": [22, 11, pd.NA, 10, 20, pd.NA]},\n dtype=any_numeric_ea_dtype,\n )\n gb = df.groupby("a")\n result = gb.ohlc()\n expected = DataFrame(\n [[22, 22, 11, 11], [pd.NA] * 4, [10] * 4, [20] * 4],\n columns=MultiIndex.from_product([["b"], ["open", "high", "low", "close"]]),\n index=Index([1, 2, 3, 4], dtype=any_numeric_ea_dtype, name="a"),\n dtype=any_numeric_ea_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n gb2 = df.groupby("a", as_index=False)\n result2 = gb2.ohlc()\n expected2 = expected.reset_index()\n tm.assert_frame_equal(result2, expected2)\n\n\n@pytest.mark.parametrize("dtype", [np.int64, np.uint64])\n@pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"])\ndef test_uint64_type_handling(dtype, how):\n # GH 26310\n df = DataFrame({"x": 6903052872240755750, "y": [1, 2]})\n expected = df.groupby("y").agg({"x": how})\n df.x = df.x.astype(dtype)\n result = df.groupby("y").agg({"x": how})\n if how not in ("mean", "median"):\n # mean and median always result in floats\n result.x = result.x.astype(np.int64)\n tm.assert_frame_equal(result, expected, check_exact=True)\n\n\ndef test_func_duplicates_raises():\n # GH28426\n msg = "Function names"\n df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})\n with pytest.raises(SpecificationError, match=msg):\n df.groupby("A").agg(["min", "min"])\n\n\n@pytest.mark.parametrize(\n "index",\n [\n pd.CategoricalIndex(list("abc")),\n pd.interval_range(0, 3),\n pd.period_range("2020", periods=3, freq="D"),\n MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),\n ],\n)\ndef test_agg_index_has_complex_internals(index):\n # GH 31223\n df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)\n result = df.groupby("group").agg({"value": Series.nunique})\n expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_split_block():\n # https://github.com/pandas-dev/pandas/issues/31522\n df = DataFrame(\n {\n "key1": ["a", "a", "b", "b", "a"],\n "key2": ["one", "two", "one", "two", "one"],\n "key3": ["three", "three", "three", "six", "six"],\n }\n )\n result = df.groupby("key1").min()\n expected = DataFrame(\n {"key2": ["one", "one"], "key3": ["six", "six"]},\n index=Index(["a", "b"], name="key1"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_split_object_part_datetime():\n # https://github.com/pandas-dev/pandas/pull/31616\n df = DataFrame(\n {\n "A": pd.date_range("2000", periods=4),\n "B": ["a", "b", "c", "d"],\n "C": [1, 2, 3, 4],\n "D": ["b", "c", "d", "e"],\n "E": pd.date_range("2000", periods=4),\n "F": [1, 2, 3, 4],\n }\n ).astype(object)\n result = df.groupby([0, 0, 0, 0]).min()\n expected = DataFrame(\n {\n "A": [pd.Timestamp("2000")],\n "B": ["a"],\n "C": [1],\n "D": ["b"],\n "E": [pd.Timestamp("2000")],\n "F": [1],\n },\n index=np.array([0]),\n dtype=object,\n )\n tm.assert_frame_equal(result, expected)\n\n\nclass TestNamedAggregationSeries:\n def test_series_named_agg(self):\n df = Series([1, 2, 3, 4])\n gr = df.groupby([0, 0, 1, 1])\n result = gr.agg(a="sum", b="min")\n expected = DataFrame(\n {"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=np.array([0, 1])\n )\n tm.assert_frame_equal(result, expected)\n\n result = gr.agg(b="min", a="sum")\n expected = expected[["b", "a"]]\n tm.assert_frame_equal(result, expected)\n\n def test_no_args_raises(self):\n gr = Series([1, 2]).groupby([0, 1])\n with pytest.raises(TypeError, match="Must provide"):\n gr.agg()\n\n # but we do allow this\n result = gr.agg([])\n expected = DataFrame(columns=[])\n tm.assert_frame_equal(result, expected)\n\n def test_series_named_agg_duplicates_no_raises(self):\n # GH28426\n gr = Series([1, 2, 3]).groupby([0, 0, 1])\n grouped = gr.agg(a="sum", b="sum")\n expected = DataFrame({"a": [3, 3], "b": [3, 3]}, index=np.array([0, 1]))\n tm.assert_frame_equal(expected, grouped)\n\n def test_mangled(self):\n gr = Series([1, 2, 3]).groupby([0, 0, 1])\n result = gr.agg(a=lambda x: 0, b=lambda x: 1)\n expected = DataFrame({"a": [0, 0], "b": [1, 1]}, index=np.array([0, 1]))\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "inp",\n [\n pd.NamedAgg(column="anything", aggfunc="min"),\n ("anything", "min"),\n ["anything", "min"],\n ],\n )\n def test_named_agg_nametuple(self, inp):\n # GH34422\n s = Series([1, 1, 2, 2, 3, 3, 4, 5])\n msg = f"func is expected but received {type(inp).__name__}"\n with pytest.raises(TypeError, match=msg):\n s.groupby(s.values).agg(a=inp)\n\n\nclass TestNamedAggregationDataFrame:\n def test_agg_relabel(self):\n df = DataFrame(\n {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}\n )\n result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max"))\n expected = DataFrame(\n {"a_max": [1, 3], "b_max": [6, 8]},\n index=Index(["a", "b"], name="group"),\n columns=["a_max", "b_max"],\n )\n tm.assert_frame_equal(result, expected)\n\n # order invariance\n p98 = functools.partial(np.percentile, q=98)\n result = df.groupby("group").agg(\n b_min=("B", "min"),\n a_min=("A", "min"),\n a_mean=("A", "mean"),\n a_max=("A", "max"),\n b_max=("B", "max"),\n a_98=("A", p98),\n )\n expected = DataFrame(\n {\n "b_min": [5, 7],\n "a_min": [0, 2],\n "a_mean": [0.5, 2.5],\n "a_max": [1, 3],\n "b_max": [6, 8],\n "a_98": [0.98, 2.98],\n },\n index=Index(["a", "b"], name="group"),\n columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_agg_relabel_non_identifier(self):\n df = DataFrame(\n {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}\n )\n\n result = df.groupby("group").agg(**{"my col": ("A", "max")})\n expected = DataFrame({"my col": [1, 3]}, index=Index(["a", "b"], name="group"))\n tm.assert_frame_equal(result, expected)\n\n def test_duplicate_no_raises(self):\n # GH 28426, if use same input function on same column,\n # no error should raise\n df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})\n\n grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min"))\n expected = DataFrame({"a": [1, 3], "b": [1, 3]}, index=Index([0, 1], name="A"))\n tm.assert_frame_equal(grouped, expected)\n\n quant50 = functools.partial(np.percentile, q=50)\n quant70 = functools.partial(np.percentile, q=70)\n quant50.__name__ = "quant50"\n quant70.__name__ = "quant70"\n\n test = DataFrame({"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]})\n\n grouped = test.groupby("col1").agg(\n quantile_50=("col2", quant50), quantile_70=("col2", quant70)\n )\n expected = DataFrame(\n {"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]},\n index=Index(["a", "b"], name="col1"),\n )\n tm.assert_frame_equal(grouped, expected)\n\n def test_agg_relabel_with_level(self):\n df = DataFrame(\n {"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]},\n index=MultiIndex.from_product([["A", "B"], ["a", "b"]]),\n )\n result = df.groupby(level=0).agg(\n aa=("A", "max"), bb=("A", "min"), cc=("B", "mean")\n )\n expected = DataFrame(\n {"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"]\n )\n tm.assert_frame_equal(result, expected)\n\n def test_agg_relabel_other_raises(self):\n df = DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]})\n grouped = df.groupby("A")\n match = "Must provide"\n with pytest.raises(TypeError, match=match):\n grouped.agg(foo=1)\n\n with pytest.raises(TypeError, match=match):\n grouped.agg()\n\n with pytest.raises(TypeError, match=match):\n grouped.agg(a=("B", "max"), b=(1, 2, 3))\n\n def test_missing_raises(self):\n df = DataFrame({"A": [0, 1], "B": [1, 2]})\n match = re.escape("Column(s) ['C'] do not exist")\n with pytest.raises(KeyError, match=match):\n df.groupby("A").agg(c=("C", "sum"))\n\n def test_agg_namedtuple(self):\n df = DataFrame({"A": [0, 1], "B": [1, 2]})\n result = df.groupby("A").agg(\n b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count")\n )\n expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count"))\n tm.assert_frame_equal(result, expected)\n\n def test_mangled(self):\n df = DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]})\n result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1))\n expected = DataFrame({"b": [0, 0], "c": [1, 1]}, index=Index([0, 1], name="A"))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3",\n [\n (\n (("y", "A"), "max"),\n (("y", "A"), np.mean),\n (("y", "B"), "mean"),\n [1, 3],\n [0.5, 2.5],\n [5.5, 7.5],\n ),\n (\n (("y", "A"), lambda x: max(x)),\n (("y", "A"), lambda x: 1),\n (("y", "B"), np.mean),\n [1, 3],\n [1, 1],\n [5.5, 7.5],\n ),\n (\n pd.NamedAgg(("y", "A"), "max"),\n pd.NamedAgg(("y", "B"), np.mean),\n pd.NamedAgg(("y", "A"), lambda x: 1),\n [1, 3],\n [5.5, 7.5],\n [1, 1],\n ),\n ],\n)\ndef test_agg_relabel_multiindex_column(\n agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3\n):\n # GH 29422, add tests for multiindex column cases\n df = DataFrame(\n {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}\n )\n df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])\n idx = Index(["a", "b"], name=("x", "group"))\n\n result = df.groupby(("x", "group")).agg(a_max=(("y", "A"), "max"))\n expected = DataFrame({"a_max": [1, 3]}, index=idx)\n tm.assert_frame_equal(result, expected)\n\n msg = "is currently using SeriesGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby(("x", "group")).agg(\n col_1=agg_col1, col_2=agg_col2, col_3=agg_col3\n )\n expected = DataFrame(\n {"col_1": agg_result1, "col_2": agg_result2, "col_3": agg_result3}, index=idx\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_relabel_multiindex_raises_not_exist():\n # GH 29422, add test for raises scenario when aggregate column does not exist\n df = DataFrame(\n {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}\n )\n df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])\n\n with pytest.raises(KeyError, match="do not exist"):\n df.groupby(("x", "group")).agg(a=(("Y", "a"), "max"))\n\n\ndef test_agg_relabel_multiindex_duplicates():\n # GH29422, add test for raises scenario when getting duplicates\n # GH28426, after this change, duplicates should also work if the relabelling is\n # different\n df = DataFrame(\n {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}\n )\n df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])\n\n result = df.groupby(("x", "group")).agg(\n a=(("y", "A"), "min"), b=(("y", "A"), "min")\n )\n idx = Index(["a", "b"], name=("x", "group"))\n expected = DataFrame({"a": [0, 2], "b": [0, 2]}, index=idx)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}])\ndef test_groupby_aggregate_empty_key(kwargs):\n # GH: 32580\n df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})\n result = df.groupby("a").agg(kwargs)\n expected = DataFrame(\n [1, 4],\n index=Index([1, 2], dtype="int64", name="a"),\n columns=MultiIndex.from_tuples([["c", "min"]]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregate_empty_key_empty_return():\n # GH: 32580 Check if everything works, when return is empty\n df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})\n result = df.groupby("a").agg({"b": []})\n expected = DataFrame(columns=MultiIndex(levels=[["b"], []], codes=[[], []]))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregate_empty_with_multiindex_frame():\n # GH 39178\n df = DataFrame(columns=["a", "b", "c"])\n result = df.groupby(["a", "b"], group_keys=False).agg(d=("c", list))\n expected = DataFrame(\n columns=["d"], index=MultiIndex([[], []], [[], []], names=["a", "b"])\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_grouby_agg_loses_results_with_as_index_false_relabel():\n # GH 32240: When the aggregate function relabels column names and\n # as_index=False is specified, the results are dropped.\n\n df = DataFrame(\n {"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}\n )\n\n grouped = df.groupby("key", as_index=False)\n result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))\n expected = DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():\n # GH 32240: When the aggregate function relabels column names and\n # as_index=False is specified, the results are dropped. Check if\n # multiindex is returned in the right order\n\n df = DataFrame(\n {\n "key": ["x", "y", "x", "y", "x", "x"],\n "key1": ["a", "b", "c", "b", "a", "c"],\n "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75],\n }\n )\n\n grouped = df.groupby(["key", "key1"], as_index=False)\n result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))\n expected = DataFrame(\n {"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]}\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]\n)\ndef test_multiindex_custom_func(func):\n # GH 31777\n data = [[1, 4, 2], [5, 7, 1]]\n df = DataFrame(\n data,\n columns=MultiIndex.from_arrays(\n [[1, 1, 2], [3, 4, 3]], names=["Sisko", "Janeway"]\n ),\n )\n result = df.groupby(np.array([0, 1])).agg(func)\n expected_dict = {\n (1, 3): {0: 1.0, 1: 5.0},\n (1, 4): {0: 4.0, 1: 7.0},\n (2, 3): {0: 2.0, 1: 1.0},\n }\n expected = DataFrame(expected_dict, index=np.array([0, 1]), columns=df.columns)\n tm.assert_frame_equal(result, expected)\n\n\ndef myfunc(s):\n return np.percentile(s, q=0.90)\n\n\n@pytest.mark.parametrize("func", [lambda s: np.percentile(s, q=0.90), myfunc])\ndef test_lambda_named_agg(func):\n # see gh-28467\n animals = DataFrame(\n {\n "kind": ["cat", "dog", "cat", "dog"],\n "height": [9.1, 6.0, 9.5, 34.0],\n "weight": [7.9, 7.5, 9.9, 198.0],\n }\n )\n\n result = animals.groupby("kind").agg(\n mean_height=("height", "mean"), perc90=("height", func)\n )\n expected = DataFrame(\n [[9.3, 9.1036], [20.0, 6.252]],\n columns=["mean_height", "perc90"],\n index=Index(["cat", "dog"], name="kind"),\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_mixed_types():\n # GH 16916\n df = DataFrame(\n data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc")\n )\n df["grouping"] = ["group 1", "group 1", 2]\n result = df.groupby("grouping").aggregate(lambda x: x.tolist())\n expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]\n expected = DataFrame(\n expected_data,\n index=Index([2, "group 1"], dtype="object", name="grouping"),\n columns=Index(["X", "Y", "Z"]),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.xfail(reason="Not implemented;see GH 31256")\ndef test_aggregate_udf_na_extension_type():\n # https://github.com/pandas-dev/pandas/pull/31359\n # This is currently failing to cast back to Int64Dtype.\n # The presence of the NA causes two problems\n # 1. NA is not an instance of Int64Dtype.type (numpy.int64)\n # 2. The presence of an NA forces object type, so the non-NA values is\n # a Python int rather than a NumPy int64. Python ints aren't\n # instances of numpy.int64.\n def aggfunc(x):\n if all(x > 2):\n return 1\n else:\n return pd.NA\n\n df = DataFrame({"A": pd.array([1, 2, 3])})\n result = df.groupby([1, 1, 2]).agg(aggfunc)\n expected = DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2])\n tm.assert_frame_equal(result, expected)\n\n\nclass TestLambdaMangling:\n def test_basic(self):\n df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})\n result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})\n\n expected = DataFrame(\n {("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]},\n index=Index([0, 1], name="A"),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_mangle_series_groupby(self):\n gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])\n result = gr.agg([lambda x: 0, lambda x: 1])\n exp_data = {"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]}\n expected = DataFrame(exp_data, index=np.array([0, 1]))\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.")\n def test_with_kwargs(self):\n f1 = lambda x, y, b=1: x.sum() + y + b\n f2 = lambda x, y, b=2: x.sum() + y * b\n result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)\n expected = DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})\n tm.assert_frame_equal(result, expected)\n\n result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)\n expected = DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})\n tm.assert_frame_equal(result, expected)\n\n def test_agg_with_one_lambda(self):\n # GH 25719, write tests for DataFrameGroupby.agg with only one lambda\n df = DataFrame(\n {\n "kind": ["cat", "dog", "cat", "dog"],\n "height": [9.1, 6.0, 9.5, 34.0],\n "weight": [7.9, 7.5, 9.9, 198.0],\n }\n )\n\n columns = ["height_sqr_min", "height_max", "weight_max"]\n expected = DataFrame(\n {\n "height_sqr_min": [82.81, 36.00],\n "height_max": [9.5, 34.0],\n "weight_max": [9.9, 198.0],\n },\n index=Index(["cat", "dog"], name="kind"),\n columns=columns,\n )\n\n # check pd.NameAgg case\n result1 = df.groupby(by="kind").agg(\n height_sqr_min=pd.NamedAgg(\n column="height", aggfunc=lambda x: np.min(x**2)\n ),\n height_max=pd.NamedAgg(column="height", aggfunc="max"),\n weight_max=pd.NamedAgg(column="weight", aggfunc="max"),\n )\n tm.assert_frame_equal(result1, expected)\n\n # check agg(key=(col, aggfunc)) case\n result2 = df.groupby(by="kind").agg(\n height_sqr_min=("height", lambda x: np.min(x**2)),\n height_max=("height", "max"),\n weight_max=("weight", "max"),\n )\n tm.assert_frame_equal(result2, expected)\n\n def test_agg_multiple_lambda(self):\n # GH25719, test for DataFrameGroupby.agg with multiple lambdas\n # with mixed aggfunc\n df = DataFrame(\n {\n "kind": ["cat", "dog", "cat", "dog"],\n "height": [9.1, 6.0, 9.5, 34.0],\n "weight": [7.9, 7.5, 9.9, 198.0],\n }\n )\n columns = [\n "height_sqr_min",\n "height_max",\n "weight_max",\n "height_max_2",\n "weight_min",\n ]\n expected = DataFrame(\n {\n "height_sqr_min": [82.81, 36.00],\n "height_max": [9.5, 34.0],\n "weight_max": [9.9, 198.0],\n "height_max_2": [9.5, 34.0],\n "weight_min": [7.9, 7.5],\n },\n index=Index(["cat", "dog"], name="kind"),\n columns=columns,\n )\n\n # check agg(key=(col, aggfunc)) case\n result1 = df.groupby(by="kind").agg(\n height_sqr_min=("height", lambda x: np.min(x**2)),\n height_max=("height", "max"),\n weight_max=("weight", "max"),\n height_max_2=("height", lambda x: np.max(x)),\n weight_min=("weight", lambda x: np.min(x)),\n )\n tm.assert_frame_equal(result1, expected)\n\n # check pd.NamedAgg case\n result2 = df.groupby(by="kind").agg(\n height_sqr_min=pd.NamedAgg(\n column="height", aggfunc=lambda x: np.min(x**2)\n ),\n height_max=pd.NamedAgg(column="height", aggfunc="max"),\n weight_max=pd.NamedAgg(column="weight", aggfunc="max"),\n height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),\n weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),\n )\n tm.assert_frame_equal(result2, expected)\n\n\ndef test_groupby_get_by_index():\n # GH 33439\n df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})\n res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])})\n expected = DataFrame({"A": ["S", "W"], "B": [1.0, 2.0]}).set_index("A")\n tm.assert_frame_equal(res, expected)\n\n\n@pytest.mark.parametrize(\n "grp_col_dict, exp_data",\n [\n ({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}),\n ({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}),\n ({"nr": "min"}, {"nr": [1, 5]}),\n ],\n)\ndef test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):\n # test single aggregations on ordered categorical cols GHGH27800\n\n # create the result dataframe\n input_df = DataFrame(\n {\n "nr": [1, 2, 3, 4, 5, 6, 7, 8],\n "cat_ord": list("aabbccdd"),\n "cat": list("aaaabbbb"),\n }\n )\n\n input_df = input_df.astype({"cat": "category", "cat_ord": "category"})\n input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()\n result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict)\n\n # create expected dataframe\n cat_index = pd.CategoricalIndex(\n ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"\n )\n\n expected_df = DataFrame(data=exp_data, index=cat_index)\n\n if "cat_ord" in expected_df:\n # ordered categorical columns should be preserved\n dtype = input_df["cat_ord"].dtype\n expected_df["cat_ord"] = expected_df["cat_ord"].astype(dtype)\n\n tm.assert_frame_equal(result_df, expected_df)\n\n\n@pytest.mark.parametrize(\n "grp_col_dict, exp_data",\n [\n ({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]),\n ({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]),\n ({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]),\n ],\n)\ndef test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):\n # test combined aggregations on ordered categorical cols GH27800\n\n # create the result dataframe\n input_df = DataFrame(\n {\n "nr": [1, 2, 3, 4, 5, 6, 7, 8],\n "cat_ord": list("aabbccdd"),\n "cat": list("aaaabbbb"),\n }\n )\n\n input_df = input_df.astype({"cat": "category", "cat_ord": "category"})\n input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()\n result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict)\n\n # create expected dataframe\n cat_index = pd.CategoricalIndex(\n ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"\n )\n\n # unpack the grp_col_dict to create the multi-index tuple\n # this tuple will be used to create the expected dataframe index\n multi_index_list = []\n for k, v in grp_col_dict.items():\n if isinstance(v, list):\n multi_index_list.extend([k, value] for value in v)\n else:\n multi_index_list.append([k, v])\n multi_index = MultiIndex.from_tuples(tuple(multi_index_list))\n\n expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index)\n for col in expected_df.columns:\n if isinstance(col, tuple) and "cat_ord" in col:\n # ordered categorical should be preserved\n expected_df[col] = expected_df[col].astype(input_df["cat_ord"].dtype)\n\n tm.assert_frame_equal(result_df, expected_df)\n\n\ndef test_nonagg_agg():\n # GH 35490 - Single/Multiple agg of non-agg function give same results\n # TODO: agg should raise for functions that don't aggregate\n df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]})\n g = df.groupby("a")\n\n result = g.agg(["cumsum"])\n result.columns = result.columns.droplevel(-1)\n expected = g.agg("cumsum")\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_datetime_objects():\n # https://github.com/pandas-dev/pandas/issues/36003\n # ensure we don't raise an error but keep object dtype for out-of-bounds\n # datetimes\n df = DataFrame(\n {\n "A": ["X", "Y"],\n "B": [\n datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),\n datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),\n ],\n }\n )\n result = df.groupby("A").B.max()\n expected = df.set_index("A")["B"]\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_index_object_dtype():\n # GH 40014\n df = DataFrame({"c0": ["x", "x", "x"], "c1": ["x", "x", "y"], "p": [0, 1, 2]})\n df.index = df.index.astype("O")\n grouped = df.groupby(["c0", "c1"])\n res = grouped.p.agg(lambda x: all(x > 0))\n # Check that providing a user-defined function in agg()\n # produces the correct index shape when using an object-typed index.\n expected_index = MultiIndex.from_tuples(\n [("x", "x"), ("x", "y")], names=("c0", "c1")\n )\n expected = Series([False, True], index=expected_index, name="p")\n tm.assert_series_equal(res, expected)\n\n\ndef test_timeseries_groupby_agg():\n # GH#43290\n\n def func(ser):\n if ser.isna().all():\n return None\n return np.sum(ser)\n\n df = DataFrame([1.0], index=[pd.Timestamp("2018-01-16 00:00:00+00:00")])\n res = df.groupby(lambda x: 1).agg(func)\n\n expected = DataFrame([[1.0]], index=[1])\n tm.assert_frame_equal(res, expected)\n\n\ndef test_groupby_agg_precision(any_real_numeric_dtype):\n if any_real_numeric_dtype in tm.ALL_INT_NUMPY_DTYPES:\n max_value = np.iinfo(any_real_numeric_dtype).max\n if any_real_numeric_dtype in tm.FLOAT_NUMPY_DTYPES:\n max_value = np.finfo(any_real_numeric_dtype).max\n if any_real_numeric_dtype in tm.FLOAT_EA_DTYPES:\n max_value = np.finfo(any_real_numeric_dtype.lower()).max\n if any_real_numeric_dtype in tm.ALL_INT_EA_DTYPES:\n max_value = np.iinfo(any_real_numeric_dtype.lower()).max\n\n df = DataFrame(\n {\n "key1": ["a"],\n "key2": ["b"],\n "key3": pd.array([max_value], dtype=any_real_numeric_dtype),\n }\n )\n arrays = [["a"], ["b"]]\n index = MultiIndex.from_arrays(arrays, names=("key1", "key2"))\n\n expected = DataFrame(\n {"key3": pd.array([max_value], dtype=any_real_numeric_dtype)}, index=index\n )\n result = df.groupby(["key1", "key2"]).agg(lambda x: x)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregate_directory(reduction_func):\n # GH#32793\n if reduction_func in ["corrwith", "nth"]:\n return None\n\n obj = DataFrame([[0, 1], [0, np.nan]])\n\n result_reduced_series = obj.groupby(0).agg(reduction_func)\n result_reduced_frame = obj.groupby(0).agg({1: reduction_func})\n\n if reduction_func in ["size", "ngroup"]:\n # names are different: None / 1\n tm.assert_series_equal(\n result_reduced_series, result_reduced_frame[1], check_names=False\n )\n else:\n tm.assert_frame_equal(result_reduced_series, result_reduced_frame)\n tm.assert_series_equal(\n result_reduced_series.dtypes, result_reduced_frame.dtypes\n )\n\n\ndef test_group_mean_timedelta_nat():\n # GH43132\n data = Series(["1 day", "3 days", "NaT"], dtype="timedelta64[ns]")\n expected = Series(["2 days"], dtype="timedelta64[ns]", index=np.array([0]))\n\n result = data.groupby([0, 0, 0]).mean()\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "input_data, expected_output",\n [\n ( # no timezone\n ["2021-01-01T00:00", "NaT", "2021-01-01T02:00"],\n ["2021-01-01T01:00"],\n ),\n ( # timezone\n ["2021-01-01T00:00-0100", "NaT", "2021-01-01T02:00-0100"],\n ["2021-01-01T01:00-0100"],\n ),\n ],\n)\ndef test_group_mean_datetime64_nat(input_data, expected_output):\n # GH43132\n data = to_datetime(Series(input_data))\n expected = to_datetime(Series(expected_output, index=np.array([0])))\n\n result = data.groupby([0, 0, 0]).mean()\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func, output", [("mean", [8 + 18j, 10 + 22j]), ("sum", [40 + 90j, 50 + 110j])]\n)\ndef test_groupby_complex(func, output):\n # GH#43701\n data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))\n result = data.groupby(data.index % 2).agg(func)\n expected = Series(output)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["min", "max", "var"])\ndef test_groupby_complex_raises(func):\n # GH#43701\n data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))\n msg = "No matching signature found"\n with pytest.raises(TypeError, match=msg):\n data.groupby(data.index % 2).agg(func)\n\n\n@pytest.mark.parametrize(\n "func", [["min"], ["mean", "max"], {"b": "sum"}, {"b": "prod", "c": "median"}]\n)\ndef test_multi_axis_1_raises(func):\n # GH#46995\n df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5], "c": [6, 7, 8]})\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby("a", axis=1)\n with pytest.raises(NotImplementedError, match="axis other than 0 is not supported"):\n gb.agg(func)\n\n\n@pytest.mark.parametrize(\n "test, constant",\n [\n ([[20, "A"], [20, "B"], [10, "C"]], {0: [10, 20], 1: ["C", ["A", "B"]]}),\n ([[20, "A"], [20, "B"], [30, "C"]], {0: [20, 30], 1: [["A", "B"], "C"]}),\n ([["a", 1], ["a", 1], ["b", 2], ["b", 3]], {0: ["a", "b"], 1: [1, [2, 3]]}),\n pytest.param(\n [["a", 1], ["a", 2], ["b", 3], ["b", 3]],\n {0: ["a", "b"], 1: [[1, 2], 3]},\n marks=pytest.mark.xfail,\n ),\n ],\n)\ndef test_agg_of_mode_list(test, constant):\n # GH#25581\n df1 = DataFrame(test)\n result = df1.groupby(0).agg(Series.mode)\n # Mode usually only returns 1 value, but can return a list in the case of a tie.\n\n expected = DataFrame(constant)\n expected = expected.set_index(0)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dataframe_groupy_agg_list_like_func_with_args():\n # GH#50624\n df = DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]})\n gb = df.groupby("y")\n\n def foo1(x, a=1, c=0):\n return x.sum() + a + c\n\n def foo2(x, b=2, c=0):\n return x.sum() + b + c\n\n msg = r"foo1\(\) got an unexpected keyword argument 'b'"\n with pytest.raises(TypeError, match=msg):\n gb.agg([foo1, foo2], 3, b=3, c=4)\n\n result = gb.agg([foo1, foo2], 3, c=4)\n expected = DataFrame(\n [[8, 8], [9, 9], [10, 10]],\n index=Index(["a", "b", "c"], name="y"),\n columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_series_groupy_agg_list_like_func_with_args():\n # GH#50624\n s = Series([1, 2, 3])\n sgb = s.groupby(s)\n\n def foo1(x, a=1, c=0):\n return x.sum() + a + c\n\n def foo2(x, b=2, c=0):\n return x.sum() + b + c\n\n msg = r"foo1\(\) got an unexpected keyword argument 'b'"\n with pytest.raises(TypeError, match=msg):\n sgb.agg([foo1, foo2], 3, b=3, c=4)\n\n result = sgb.agg([foo1, foo2], 3, c=4)\n expected = DataFrame(\n [[8, 8], [9, 9], [10, 10]], index=Index([1, 2, 3]), columns=["foo1", "foo2"]\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_groupings_selection():\n # GH#51186 - a selected grouping should be in the output of agg\n df = DataFrame({"a": [1, 1, 2], "b": [3, 3, 4], "c": [5, 6, 7]})\n gb = df.groupby(["a", "b"])\n selected_gb = gb[["b", "c"]]\n result = selected_gb.agg(lambda x: x.sum())\n index = MultiIndex(\n levels=[[1, 2], [3, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"]\n )\n expected = DataFrame({"b": [6, 4], "c": [11, 7]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_multiple_with_as_index_false_subset_to_a_single_column():\n # GH#50724\n df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})\n gb = df.groupby("a", as_index=False)["b"]\n result = gb.agg(["sum", "mean"])\n expected = DataFrame({"a": [1, 2], "sum": [7, 5], "mean": [3.5, 5.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_with_as_index_false_with_list():\n # GH#52849\n df = DataFrame({"a1": [0, 0, 1], "a2": [2, 3, 3], "b": [4, 5, 6]})\n gb = df.groupby(by=["a1", "a2"], as_index=False)\n result = gb.agg(["sum"])\n\n expected = DataFrame(\n data=[[0, 2, 4], [0, 3, 5], [1, 3, 6]],\n columns=MultiIndex.from_tuples([("a1", ""), ("a2", ""), ("b", "sum")]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_agg_extension_timedelta_cumsum_with_named_aggregation():\n # GH#41720\n expected = DataFrame(\n {\n "td": {\n 0: pd.Timedelta("0 days 01:00:00"),\n 1: pd.Timedelta("0 days 01:15:00"),\n 2: pd.Timedelta("0 days 01:15:00"),\n }\n }\n )\n df = DataFrame(\n {\n "td": Series(\n ["0 days 01:00:00", "0 days 00:15:00", "0 days 01:15:00"],\n dtype="timedelta64[ns]",\n ),\n "grps": ["a", "a", "b"],\n }\n )\n gb = df.groupby("grps")\n result = gb.agg(td=("td", "cumsum"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregation_empty_group():\n # https://github.com/pandas-dev/pandas/issues/18869\n def func(x):\n if len(x) == 0:\n raise ValueError("length must not be 0")\n return len(x)\n\n df = DataFrame(\n {"A": pd.Categorical(["a", "a"], categories=["a", "b", "c"]), "B": [1, 1]}\n )\n msg = "length must not be 0"\n with pytest.raises(ValueError, match=msg):\n df.groupby("A", observed=False).agg(func)\n | .venv\Lib\site-packages\pandas\tests\groupby\aggregate\test_aggregate.py | test_aggregate.py | Python | 55,550 | 0.75 | 0.087321 | 0.084477 | node-utils | 475 | 2023-09-17T15:40:52.468195 | Apache-2.0 | true | 45c0f3dab2af9724c2a891c9d84024ba |
"""\ntest cython .agg behavior\n"""\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import (\n is_float_dtype,\n is_integer_dtype,\n)\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n NaT,\n Series,\n Timedelta,\n Timestamp,\n bdate_range,\n)\nimport pandas._testing as tm\nimport pandas.core.common as com\n\n\n@pytest.mark.parametrize(\n "op_name",\n [\n "count",\n "sum",\n "std",\n "var",\n "sem",\n "mean",\n pytest.param(\n "median",\n # ignore mean of empty slice\n # and all-NaN\n marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")],\n ),\n "prod",\n "min",\n "max",\n ],\n)\ndef test_cythonized_aggers(op_name):\n data = {\n "A": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1.0, np.nan, np.nan],\n "B": ["A", "B"] * 6,\n "C": np.random.default_rng(2).standard_normal(12),\n }\n df = DataFrame(data)\n df.loc[2:10:2, "C"] = np.nan\n\n op = lambda x: getattr(x, op_name)()\n\n # single column\n grouped = df.drop(["B"], axis=1).groupby("A")\n exp = {cat: op(group["C"]) for cat, group in grouped}\n exp = DataFrame({"C": exp})\n exp.index.name = "A"\n result = op(grouped)\n tm.assert_frame_equal(result, exp)\n\n # multiple columns\n grouped = df.groupby(["A", "B"])\n expd = {}\n for (cat1, cat2), group in grouped:\n expd.setdefault(cat1, {})[cat2] = op(group["C"])\n exp = DataFrame(expd).T.stack(future_stack=True)\n exp.index.names = ["A", "B"]\n exp.name = "C"\n\n result = op(grouped)["C"]\n if op_name in ["sum", "prod"]:\n tm.assert_series_equal(result, exp)\n\n\ndef test_cython_agg_boolean():\n frame = DataFrame(\n {\n "a": np.random.default_rng(2).integers(0, 5, 50),\n "b": np.random.default_rng(2).integers(0, 2, 50).astype("bool"),\n }\n )\n result = frame.groupby("a")["b"].mean()\n msg = "using SeriesGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n expected = frame.groupby("a")["b"].agg(np.mean)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_cython_agg_nothing_to_agg():\n frame = DataFrame(\n {"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25}\n )\n\n msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"\n with pytest.raises(TypeError, match=msg):\n frame.groupby("a")["b"].mean(numeric_only=True)\n\n frame = DataFrame(\n {"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25}\n )\n\n result = frame[["b"]].groupby(frame["a"]).mean(numeric_only=True)\n expected = DataFrame(\n [],\n index=frame["a"].sort_values().drop_duplicates(),\n columns=Index([], dtype="str"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_cython_agg_nothing_to_agg_with_dates():\n frame = DataFrame(\n {\n "a": np.random.default_rng(2).integers(0, 5, 50),\n "b": ["foo", "bar"] * 25,\n "dates": pd.date_range("now", periods=50, freq="min"),\n }\n )\n msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"\n with pytest.raises(TypeError, match=msg):\n frame.groupby("b").dates.mean(numeric_only=True)\n\n\ndef test_cython_agg_frame_columns():\n # #2113\n df = DataFrame({"x": [1, 2, 3], "y": [3, 4, 5]})\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby(level=0, axis="columns").mean()\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby(level=0, axis="columns").mean()\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby(level=0, axis="columns").mean()\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby(level=0, axis="columns").mean()\n\n\ndef test_cython_agg_return_dict():\n # GH 16741\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.random.default_rng(2).standard_normal(8),\n }\n )\n\n ts = df.groupby("A")["B"].agg(lambda x: x.value_counts().to_dict())\n expected = Series(\n [{"two": 1, "one": 1, "three": 1}, {"two": 2, "one": 2, "three": 1}],\n index=Index(["bar", "foo"], name="A"),\n name="B",\n )\n tm.assert_series_equal(ts, expected)\n\n\ndef test_cython_fail_agg():\n dr = bdate_range("1/1/2000", periods=50)\n ts = Series(["A", "B", "C", "D", "E"] * 10, dtype=object, index=dr)\n\n grouped = ts.groupby(lambda x: x.month)\n summed = grouped.sum()\n msg = "using SeriesGroupBy.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#53425\n expected = grouped.agg(np.sum).astype(object)\n tm.assert_series_equal(summed, expected)\n\n\n@pytest.mark.parametrize(\n "op, targop",\n [\n ("mean", np.mean),\n ("median", np.median),\n ("var", np.var),\n ("sum", np.sum),\n ("prod", np.prod),\n ("min", np.min),\n ("max", np.max),\n ("first", lambda x: x.iloc[0]),\n ("last", lambda x: x.iloc[-1]),\n ],\n)\ndef test__cython_agg_general(op, targop):\n df = DataFrame(np.random.default_rng(2).standard_normal(1000))\n labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float)\n\n result = df.groupby(labels)._cython_agg_general(op, alt=None, numeric_only=True)\n warn = FutureWarning if targop in com._cython_table else None\n msg = f"using DataFrameGroupBy.{op}"\n with tm.assert_produces_warning(warn, match=msg):\n # GH#53425\n expected = df.groupby(labels).agg(targop)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "op, targop",\n [\n ("mean", np.mean),\n ("median", lambda x: np.median(x) if len(x) > 0 else np.nan),\n ("var", lambda x: np.var(x, ddof=1)),\n ("min", np.min),\n ("max", np.max),\n ],\n)\ndef test_cython_agg_empty_buckets(op, targop, observed):\n df = DataFrame([11, 12, 13])\n grps = range(0, 55, 5)\n\n # calling _cython_agg_general directly, instead of via the user API\n # which sets different values for min_count, so do that here.\n g = df.groupby(pd.cut(df[0], grps), observed=observed)\n result = g._cython_agg_general(op, alt=None, numeric_only=True)\n\n g = df.groupby(pd.cut(df[0], grps), observed=observed)\n expected = g.agg(lambda x: targop(x))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_cython_agg_empty_buckets_nanops(observed):\n # GH-18869 can't call nanops on empty groups, so hardcode expected\n # for these\n df = DataFrame([11, 12, 13], columns=["a"])\n grps = np.arange(0, 25, 5, dtype=int)\n # add / sum\n result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(\n "sum", alt=None, numeric_only=True\n )\n intervals = pd.interval_range(0, 20, freq=5)\n expected = DataFrame(\n {"a": [0, 0, 36, 0]},\n index=pd.CategoricalIndex(intervals, name="a", ordered=True),\n )\n if observed:\n expected = expected[expected.a != 0]\n\n tm.assert_frame_equal(result, expected)\n\n # prod\n result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(\n "prod", alt=None, numeric_only=True\n )\n expected = DataFrame(\n {"a": [1, 1, 1716, 1]},\n index=pd.CategoricalIndex(intervals, name="a", ordered=True),\n )\n if observed:\n expected = expected[expected.a != 1]\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("op", ["first", "last", "max", "min"])\n@pytest.mark.parametrize(\n "data", [Timestamp("2016-10-14 21:00:44.557"), Timedelta("17088 days 21:00:44.557")]\n)\ndef test_cython_with_timestamp_and_nat(op, data):\n # https://github.com/pandas-dev/pandas/issues/19526\n df = DataFrame({"a": [0, 1], "b": [data, NaT]})\n index = Index([0, 1], name="a")\n\n # We will group by a and test the cython aggregations\n expected = DataFrame({"b": [data, NaT]}, index=index)\n\n result = df.groupby("a").aggregate(op)\n tm.assert_frame_equal(expected, result)\n\n\n@pytest.mark.parametrize(\n "agg",\n [\n "min",\n "max",\n "count",\n "sum",\n "prod",\n "var",\n "mean",\n "median",\n "ohlc",\n "cumprod",\n "cumsum",\n "shift",\n "any",\n "all",\n "quantile",\n "first",\n "last",\n "rank",\n "cummin",\n "cummax",\n ],\n)\ndef test_read_only_buffer_source_agg(agg):\n # https://github.com/pandas-dev/pandas/issues/36014\n df = DataFrame(\n {\n "sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0],\n "species": ["setosa", "setosa", "setosa", "setosa", "setosa"],\n }\n )\n df._mgr.arrays[0].flags.writeable = False\n\n result = df.groupby(["species"]).agg({"sepal_length": agg})\n expected = df.copy().groupby(["species"]).agg({"sepal_length": agg})\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "op_name",\n [\n "count",\n "sum",\n "std",\n "var",\n "sem",\n "mean",\n "median",\n "prod",\n "min",\n "max",\n ],\n)\ndef test_cython_agg_nullable_int(op_name):\n # ensure that the cython-based aggregations don't fail for nullable dtype\n # (eg https://github.com/pandas-dev/pandas/issues/37415)\n df = DataFrame(\n {\n "A": ["A", "B"] * 5,\n "B": pd.array([1, 2, 3, 4, 5, 6, 7, 8, 9, pd.NA], dtype="Int64"),\n }\n )\n result = getattr(df.groupby("A")["B"], op_name)()\n df2 = df.assign(B=df["B"].astype("float64"))\n expected = getattr(df2.groupby("A")["B"], op_name)()\n if op_name in ("mean", "median"):\n convert_integer = False\n else:\n convert_integer = True\n expected = expected.convert_dtypes(convert_integer=convert_integer)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])\ndef test_count_masked_returns_masked_dtype(dtype):\n df = DataFrame(\n {\n "A": [1, 1],\n "B": pd.array([1, pd.NA], dtype=dtype),\n "C": pd.array([1, 1], dtype=dtype),\n }\n )\n result = df.groupby("A").count()\n expected = DataFrame(\n [[1, 2]], index=Index([1], name="A"), columns=["B", "C"], dtype="Int64"\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("with_na", [True, False])\n@pytest.mark.parametrize(\n "op_name, action",\n [\n # ("count", "always_int"),\n ("sum", "large_int"),\n # ("std", "always_float"),\n ("var", "always_float"),\n # ("sem", "always_float"),\n ("mean", "always_float"),\n ("median", "always_float"),\n ("prod", "large_int"),\n ("min", "preserve"),\n ("max", "preserve"),\n ("first", "preserve"),\n ("last", "preserve"),\n ],\n)\n@pytest.mark.parametrize(\n "data",\n [\n pd.array([1, 2, 3, 4], dtype="Int64"),\n pd.array([1, 2, 3, 4], dtype="Int8"),\n pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float32"),\n pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64"),\n pd.array([True, True, False, False], dtype="boolean"),\n ],\n)\ndef test_cython_agg_EA_known_dtypes(data, op_name, action, with_na):\n if with_na:\n data[3] = pd.NA\n\n df = DataFrame({"key": ["a", "a", "b", "b"], "col": data})\n grouped = df.groupby("key")\n\n if action == "always_int":\n # always Int64\n expected_dtype = pd.Int64Dtype()\n elif action == "large_int":\n # for any int/bool use Int64, for float preserve dtype\n if is_float_dtype(data.dtype):\n expected_dtype = data.dtype\n elif is_integer_dtype(data.dtype):\n # match the numpy dtype we'd get with the non-nullable analogue\n expected_dtype = data.dtype\n else:\n expected_dtype = pd.Int64Dtype()\n elif action == "always_float":\n # for any int/bool use Float64, for float preserve dtype\n if is_float_dtype(data.dtype):\n expected_dtype = data.dtype\n else:\n expected_dtype = pd.Float64Dtype()\n elif action == "preserve":\n expected_dtype = data.dtype\n\n result = getattr(grouped, op_name)()\n assert result["col"].dtype == expected_dtype\n\n result = grouped.aggregate(op_name)\n assert result["col"].dtype == expected_dtype\n\n result = getattr(grouped["col"], op_name)()\n assert result.dtype == expected_dtype\n\n result = grouped["col"].aggregate(op_name)\n assert result.dtype == expected_dtype\n | .venv\Lib\site-packages\pandas\tests\groupby\aggregate\test_cython.py | test_cython.py | Python | 12,866 | 0.95 | 0.077803 | 0.071618 | node-utils | 732 | 2023-12-27T17:55:32.977382 | BSD-3-Clause | true | d79504b167f512a4ec8a6b9432e1e14e |
import numpy as np\nimport pytest\n\nfrom pandas.compat import is_platform_arm\nfrom pandas.errors import NumbaUtilError\n\nfrom pandas import (\n DataFrame,\n Index,\n NamedAgg,\n Series,\n option_context,\n)\nimport pandas._testing as tm\nfrom pandas.util.version import Version\n\npytestmark = [pytest.mark.single_cpu]\n\nnumba = pytest.importorskip("numba")\npytestmark.append(\n pytest.mark.skipif(\n Version(numba.__version__) == Version("0.61") and is_platform_arm(),\n reason=f"Segfaults on ARM platforms with numba {numba.__version__}",\n )\n)\n\n\ndef test_correct_function_signature():\n pytest.importorskip("numba")\n\n def incorrect_function(x):\n return sum(x) * 2.7\n\n data = DataFrame(\n {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},\n columns=["key", "data"],\n )\n with pytest.raises(NumbaUtilError, match="The first 2"):\n data.groupby("key").agg(incorrect_function, engine="numba")\n\n with pytest.raises(NumbaUtilError, match="The first 2"):\n data.groupby("key")["data"].agg(incorrect_function, engine="numba")\n\n\ndef test_check_nopython_kwargs():\n pytest.importorskip("numba")\n\n def incorrect_function(values, index):\n return sum(values) * 2.7\n\n data = DataFrame(\n {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},\n columns=["key", "data"],\n )\n with pytest.raises(NumbaUtilError, match="numba does not support"):\n data.groupby("key").agg(incorrect_function, engine="numba", a=1)\n\n with pytest.raises(NumbaUtilError, match="numba does not support"):\n data.groupby("key")["data"].agg(incorrect_function, engine="numba", a=1)\n\n\n@pytest.mark.filterwarnings("ignore")\n# Filter warnings when parallel=True and the function can't be parallelized by Numba\n@pytest.mark.parametrize("jit", [True, False])\n@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])\n@pytest.mark.parametrize("as_index", [True, False])\ndef test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):\n pytest.importorskip("numba")\n\n def func_numba(values, index):\n return np.mean(values) * 2.7\n\n if jit:\n # Test accepted jitted functions\n import numba\n\n func_numba = numba.jit(func_numba)\n\n data = DataFrame(\n {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]\n )\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n grouped = data.groupby(0, as_index=as_index)\n if pandas_obj == "Series":\n grouped = grouped[1]\n\n result = grouped.agg(func_numba, engine="numba", engine_kwargs=engine_kwargs)\n expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore")\n# Filter warnings when parallel=True and the function can't be parallelized by Numba\n@pytest.mark.parametrize("jit", [True, False])\n@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])\ndef test_cache(jit, pandas_obj, nogil, parallel, nopython):\n # Test that the functions are cached correctly if we switch functions\n pytest.importorskip("numba")\n\n def func_1(values, index):\n return np.mean(values) - 3.4\n\n def func_2(values, index):\n return np.mean(values) * 2.7\n\n if jit:\n import numba\n\n func_1 = numba.jit(func_1)\n func_2 = numba.jit(func_2)\n\n data = DataFrame(\n {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]\n )\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n grouped = data.groupby(0)\n if pandas_obj == "Series":\n grouped = grouped[1]\n\n result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)\n expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")\n tm.assert_equal(result, expected)\n\n # Add func_2 to the cache\n result = grouped.agg(func_2, engine="numba", engine_kwargs=engine_kwargs)\n expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")\n tm.assert_equal(result, expected)\n\n # Retest func_1 which should use the cache\n result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)\n expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")\n tm.assert_equal(result, expected)\n\n\ndef test_use_global_config():\n pytest.importorskip("numba")\n\n def func_1(values, index):\n return np.mean(values) - 3.4\n\n data = DataFrame(\n {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]\n )\n grouped = data.groupby(0)\n expected = grouped.agg(func_1, engine="numba")\n with option_context("compute.use_numba", True):\n result = grouped.agg(func_1, engine=None)\n tm.assert_frame_equal(expected, result)\n\n\n@pytest.mark.parametrize(\n "agg_kwargs",\n [\n {"func": ["min", "max"]},\n {"func": "min"},\n {"func": {1: ["min", "max"], 2: "sum"}},\n {"bmin": NamedAgg(column=1, aggfunc="min")},\n ],\n)\ndef test_multifunc_numba_vs_cython_frame(agg_kwargs):\n pytest.importorskip("numba")\n data = DataFrame(\n {\n 0: ["a", "a", "b", "b", "a"],\n 1: [1.0, 2.0, 3.0, 4.0, 5.0],\n 2: [1, 2, 3, 4, 5],\n },\n columns=[0, 1, 2],\n )\n grouped = data.groupby(0)\n result = grouped.agg(**agg_kwargs, engine="numba")\n expected = grouped.agg(**agg_kwargs, engine="cython")\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "agg_kwargs,expected_func",\n [\n ({"func": lambda values, index: values.sum()}, "sum"),\n # FIXME\n pytest.param(\n {\n "func": [\n lambda values, index: values.sum(),\n lambda values, index: values.min(),\n ]\n },\n ["sum", "min"],\n marks=pytest.mark.xfail(\n reason="This doesn't work yet! Fails in nopython pipeline!"\n ),\n ),\n ],\n)\ndef test_multifunc_numba_udf_frame(agg_kwargs, expected_func):\n pytest.importorskip("numba")\n data = DataFrame(\n {\n 0: ["a", "a", "b", "b", "a"],\n 1: [1.0, 2.0, 3.0, 4.0, 5.0],\n 2: [1, 2, 3, 4, 5],\n },\n columns=[0, 1, 2],\n )\n grouped = data.groupby(0)\n result = grouped.agg(**agg_kwargs, engine="numba")\n expected = grouped.agg(expected_func, engine="cython")\n # check_dtype can be removed if GH 44952 is addressed\n # Currently, UDFs still always return float64 while reductions can preserve dtype\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n\n@pytest.mark.parametrize(\n "agg_kwargs",\n [{"func": ["min", "max"]}, {"func": "min"}, {"min_val": "min", "max_val": "max"}],\n)\ndef test_multifunc_numba_vs_cython_series(agg_kwargs):\n pytest.importorskip("numba")\n labels = ["a", "a", "b", "b", "a"]\n data = Series([1.0, 2.0, 3.0, 4.0, 5.0])\n grouped = data.groupby(labels)\n agg_kwargs["engine"] = "numba"\n result = grouped.agg(**agg_kwargs)\n agg_kwargs["engine"] = "cython"\n expected = grouped.agg(**agg_kwargs)\n if isinstance(expected, DataFrame):\n tm.assert_frame_equal(result, expected)\n else:\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.single_cpu\n@pytest.mark.parametrize(\n "data,agg_kwargs",\n [\n (Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": ["min", "max"]}),\n (Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": "min"}),\n (\n DataFrame(\n {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]\n ),\n {"func": ["min", "max"]},\n ),\n (\n DataFrame(\n {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]\n ),\n {"func": "min"},\n ),\n (\n DataFrame(\n {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]\n ),\n {"func": {1: ["min", "max"], 2: "sum"}},\n ),\n (\n DataFrame(\n {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]\n ),\n {"min_col": NamedAgg(column=1, aggfunc="min")},\n ),\n ],\n)\ndef test_multifunc_numba_kwarg_propagation(data, agg_kwargs):\n pytest.importorskip("numba")\n labels = ["a", "a", "b", "b", "a"]\n grouped = data.groupby(labels)\n result = grouped.agg(**agg_kwargs, engine="numba", engine_kwargs={"parallel": True})\n expected = grouped.agg(**agg_kwargs, engine="numba")\n if isinstance(expected, DataFrame):\n tm.assert_frame_equal(result, expected)\n else:\n tm.assert_series_equal(result, expected)\n\n\ndef test_args_not_cached():\n # GH 41647\n pytest.importorskip("numba")\n\n def sum_last(values, index, n):\n return values[-n:].sum()\n\n df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]})\n grouped_x = df.groupby("id")["x"]\n result = grouped_x.agg(sum_last, 1, engine="numba")\n expected = Series([1.0] * 2, name="x", index=Index([0, 1], name="id"))\n tm.assert_series_equal(result, expected)\n\n result = grouped_x.agg(sum_last, 2, engine="numba")\n expected = Series([2.0] * 2, name="x", index=Index([0, 1], name="id"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_index_data_correctly_passed():\n # GH 43133\n pytest.importorskip("numba")\n\n def f(values, index):\n return np.mean(index)\n\n df = DataFrame({"group": ["A", "A", "B"], "v": [4, 5, 6]}, index=[-1, -2, -3])\n result = df.groupby("group").aggregate(f, engine="numba")\n expected = DataFrame(\n [-1.5, -3.0], columns=["v"], index=Index(["A", "B"], name="group")\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_engine_kwargs_not_cached():\n # If the user passes a different set of engine_kwargs don't return the same\n # jitted function\n pytest.importorskip("numba")\n nogil = True\n parallel = False\n nopython = True\n\n def func_kwargs(values, index):\n return nogil + parallel + nopython\n\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n df = DataFrame({"value": [0, 0, 0]})\n result = df.groupby(level=0).aggregate(\n func_kwargs, engine="numba", engine_kwargs=engine_kwargs\n )\n expected = DataFrame({"value": [2.0, 2.0, 2.0]})\n tm.assert_frame_equal(result, expected)\n\n nogil = False\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n result = df.groupby(level=0).aggregate(\n func_kwargs, engine="numba", engine_kwargs=engine_kwargs\n )\n expected = DataFrame({"value": [1.0, 1.0, 1.0]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore")\ndef test_multiindex_one_key(nogil, parallel, nopython):\n pytest.importorskip("numba")\n\n def numba_func(values, index):\n return 1\n\n df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n result = df.groupby("A").agg(\n numba_func, engine="numba", engine_kwargs=engine_kwargs\n )\n expected = DataFrame([1.0], index=Index([1], name="A"), columns=["C"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiindex_multi_key_not_supported(nogil, parallel, nopython):\n pytest.importorskip("numba")\n\n def numba_func(values, index):\n return 1\n\n df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n with pytest.raises(NotImplementedError, match="more than 1 grouping labels"):\n df.groupby(["A", "B"]).agg(\n numba_func, engine="numba", engine_kwargs=engine_kwargs\n )\n\n\ndef test_multilabel_numba_vs_cython(numba_supported_reductions):\n pytest.importorskip("numba")\n reduction, kwargs = numba_supported_reductions\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.random.default_rng(2).standard_normal(8),\n }\n )\n gb = df.groupby(["A", "B"])\n res_agg = gb.agg(reduction, engine="numba", **kwargs)\n expected_agg = gb.agg(reduction, engine="cython", **kwargs)\n tm.assert_frame_equal(res_agg, expected_agg)\n # Test that calling the aggregation directly also works\n direct_res = getattr(gb, reduction)(engine="numba", **kwargs)\n direct_expected = getattr(gb, reduction)(engine="cython", **kwargs)\n tm.assert_frame_equal(direct_res, direct_expected)\n\n\ndef test_multilabel_udf_numba_vs_cython():\n pytest.importorskip("numba")\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.random.default_rng(2).standard_normal(8),\n }\n )\n gb = df.groupby(["A", "B"])\n result = gb.agg(lambda values, index: values.min(), engine="numba")\n expected = gb.agg(lambda x: x.min(), engine="cython")\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\aggregate\test_numba.py | test_numba.py | Python | 13,366 | 0.95 | 0.099502 | 0.042169 | python-kit | 794 | 2023-09-27T09:44:55.713944 | Apache-2.0 | true | 37b7b43ee14a7a61a14e7dd8be3c5a5c |
"""\ntest all other .agg behavior\n"""\n\nimport datetime as dt\nfrom functools import partial\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import SpecificationError\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n PeriodIndex,\n Series,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\n\nfrom pandas.io.formats.printing import pprint_thing\n\n\ndef test_agg_partial_failure_raises():\n # GH#43741\n\n df = DataFrame(\n {\n "data1": np.random.default_rng(2).standard_normal(5),\n "data2": np.random.default_rng(2).standard_normal(5),\n "key1": ["a", "a", "b", "b", "a"],\n "key2": ["one", "two", "one", "two", "one"],\n }\n )\n grouped = df.groupby("key1")\n\n def peak_to_peak(arr):\n return arr.max() - arr.min()\n\n with pytest.raises(TypeError, match="unsupported operand type"):\n grouped.agg([peak_to_peak])\n\n with pytest.raises(TypeError, match="unsupported operand type"):\n grouped.agg(peak_to_peak)\n\n\ndef test_agg_datetimes_mixed():\n data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]]\n\n df1 = DataFrame(\n {\n "key": [x[0] for x in data],\n "date": [x[1] for x in data],\n "value": [x[2] for x in data],\n }\n )\n\n data = [\n [\n row[0],\n (dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None),\n row[2],\n ]\n for row in data\n ]\n\n df2 = DataFrame(\n {\n "key": [x[0] for x in data],\n "date": [x[1] for x in data],\n "value": [x[2] for x in data],\n }\n )\n\n df1["weights"] = df1["value"] / df1["value"].sum()\n gb1 = df1.groupby("date").aggregate("sum")\n\n df2["weights"] = df1["value"] / df1["value"].sum()\n gb2 = df2.groupby("date").aggregate("sum")\n\n assert len(gb1) == len(gb2)\n\n\ndef test_agg_period_index():\n prng = period_range("2012-1-1", freq="M", periods=3)\n df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)), index=prng)\n rs = df.groupby(level=0).sum()\n assert isinstance(rs.index, PeriodIndex)\n\n # GH 3579\n index = period_range(start="1999-01", periods=5, freq="M")\n s1 = Series(np.random.default_rng(2).random(len(index)), index=index)\n s2 = Series(np.random.default_rng(2).random(len(index)), index=index)\n df = DataFrame.from_dict({"s1": s1, "s2": s2})\n grouped = df.groupby(df.index.month)\n list(grouped)\n\n\ndef test_agg_dict_parameter_cast_result_dtypes():\n # GH 12821\n\n df = DataFrame(\n {\n "class": ["A", "A", "B", "B", "C", "C", "D", "D"],\n "time": date_range("1/1/2011", periods=8, freq="h"),\n }\n )\n df.loc[[0, 1, 2, 5], "time"] = None\n\n # test for `first` function\n exp = df.loc[[0, 3, 4, 6]].set_index("class")\n grouped = df.groupby("class")\n tm.assert_frame_equal(grouped.first(), exp)\n tm.assert_frame_equal(grouped.agg("first"), exp)\n tm.assert_frame_equal(grouped.agg({"time": "first"}), exp)\n tm.assert_series_equal(grouped.time.first(), exp["time"])\n tm.assert_series_equal(grouped.time.agg("first"), exp["time"])\n\n # test for `last` function\n exp = df.loc[[0, 3, 4, 7]].set_index("class")\n grouped = df.groupby("class")\n tm.assert_frame_equal(grouped.last(), exp)\n tm.assert_frame_equal(grouped.agg("last"), exp)\n tm.assert_frame_equal(grouped.agg({"time": "last"}), exp)\n tm.assert_series_equal(grouped.time.last(), exp["time"])\n tm.assert_series_equal(grouped.time.agg("last"), exp["time"])\n\n # count\n exp = Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")\n tm.assert_series_equal(grouped.time.agg(len), exp)\n tm.assert_series_equal(grouped.time.size(), exp)\n\n exp = Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")\n tm.assert_series_equal(grouped.time.count(), exp)\n\n\ndef test_agg_cast_results_dtypes():\n # similar to GH12821\n # xref #11444\n u = [dt.datetime(2015, x + 1, 1) for x in range(12)]\n v = list("aaabbbbbbccd")\n df = DataFrame({"X": v, "Y": u})\n\n result = df.groupby("X")["Y"].agg(len)\n expected = df.groupby("X")["Y"].count()\n tm.assert_series_equal(result, expected)\n\n\ndef test_aggregate_float64_no_int64():\n # see gh-11199\n df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]})\n\n expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])\n expected.index.name = "b"\n\n result = df.groupby("b")[["a"]].mean()\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])\n expected.index.name = "b"\n\n result = df.groupby("b")[["a", "c"]].mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_api_consistency():\n # GH 9052\n # make sure that the aggregates via dict\n # are consistent\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "two", "two", "two", "one", "two"],\n "C": np.random.default_rng(2).standard_normal(8) + 1.0,\n "D": np.arange(8),\n }\n )\n\n grouped = df.groupby(["A", "B"])\n c_mean = grouped["C"].mean()\n c_sum = grouped["C"].sum()\n d_mean = grouped["D"].mean()\n d_sum = grouped["D"].sum()\n\n result = grouped["D"].agg(["sum", "mean"])\n expected = pd.concat([d_sum, d_mean], axis=1)\n expected.columns = ["sum", "mean"]\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped.agg(["sum", "mean"])\n expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)\n expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped[["D", "C"]].agg(["sum", "mean"])\n expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)\n expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped.agg({"C": "mean", "D": "sum"})\n expected = pd.concat([d_sum, c_mean], axis=1)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]})\n expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)\n expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]])\n\n msg = r"Column\(s\) \['r', 'r2'\] do not exist"\n with pytest.raises(KeyError, match=msg):\n grouped[["D", "C"]].agg({"r": "sum", "r2": "mean"})\n\n\ndef test_agg_dict_renaming_deprecation():\n # 15931\n df = DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})\n\n msg = r"nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n df.groupby("A").agg(\n {"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}}\n )\n\n msg = r"Column\(s\) \['ma'\] do not exist"\n with pytest.raises(KeyError, match=msg):\n df.groupby("A")[["B", "C"]].agg({"ma": "max"})\n\n msg = r"nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n df.groupby("A").B.agg({"foo": "count"})\n\n\ndef test_agg_compat():\n # GH 12334\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "two", "two", "two", "one", "two"],\n "C": np.random.default_rng(2).standard_normal(8) + 1.0,\n "D": np.arange(8),\n }\n )\n\n g = df.groupby(["A", "B"])\n\n msg = r"nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n g["D"].agg({"C": ["sum", "std"]})\n\n with pytest.raises(SpecificationError, match=msg):\n g["D"].agg({"C": "sum", "D": "std"})\n\n\ndef test_agg_nested_dicts():\n # API change for disallowing these types of nested dicts\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "two", "two", "two", "one", "two"],\n "C": np.random.default_rng(2).standard_normal(8) + 1.0,\n "D": np.arange(8),\n }\n )\n\n g = df.groupby(["A", "B"])\n\n msg = r"nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}})\n\n with pytest.raises(SpecificationError, match=msg):\n g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}})\n\n # same name as the original column\n # GH9052\n with pytest.raises(SpecificationError, match=msg):\n g["D"].agg({"result1": np.sum, "result2": np.mean})\n\n with pytest.raises(SpecificationError, match=msg):\n g["D"].agg({"D": np.sum, "result2": np.mean})\n\n\ndef test_agg_item_by_item_raise_typeerror():\n df = DataFrame(np.random.default_rng(2).integers(10, size=(20, 10)))\n\n def raiseException(df):\n pprint_thing("----------------------------------------")\n pprint_thing(df.to_string())\n raise TypeError("test")\n\n with pytest.raises(TypeError, match="test"):\n df.groupby(0).agg(raiseException)\n\n\ndef test_series_agg_multikey():\n ts = Series(\n np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)\n )\n grouped = ts.groupby([lambda x: x.year, lambda x: x.month])\n\n result = grouped.agg("sum")\n expected = grouped.sum()\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_agg_multi_pure_python():\n data = DataFrame(\n {\n "A": [\n "foo",\n "foo",\n "foo",\n "foo",\n "bar",\n "bar",\n "bar",\n "bar",\n "foo",\n "foo",\n "foo",\n ],\n "B": [\n "one",\n "one",\n "one",\n "two",\n "one",\n "one",\n "one",\n "two",\n "two",\n "two",\n "one",\n ],\n "C": [\n "dull",\n "dull",\n "shiny",\n "dull",\n "dull",\n "shiny",\n "shiny",\n "dull",\n "shiny",\n "shiny",\n "shiny",\n ],\n "D": np.random.default_rng(2).standard_normal(11),\n "E": np.random.default_rng(2).standard_normal(11),\n "F": np.random.default_rng(2).standard_normal(11),\n }\n )\n\n def bad(x):\n if isinstance(x.values, np.ndarray):\n assert len(x.values.base) > 0\n return "foo"\n\n result = data.groupby(["A", "B"]).agg(bad)\n expected = data.groupby(["A", "B"]).agg(lambda x: "foo")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_consistency():\n # agg with ([]) and () not consistent\n # GH 6715\n def P1(a):\n return np.percentile(a.dropna(), q=1)\n\n df = DataFrame(\n {\n "col1": [1, 2, 3, 4],\n "col2": [10, 25, 26, 31],\n "date": [\n dt.date(2013, 2, 10),\n dt.date(2013, 2, 10),\n dt.date(2013, 2, 11),\n dt.date(2013, 2, 11),\n ],\n }\n )\n\n g = df.groupby("date")\n\n expected = g.agg([P1])\n expected.columns = expected.columns.levels[0]\n\n result = g.agg(P1)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_callables():\n # GH 7929\n df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64)\n\n class fn_class:\n def __call__(self, x):\n return sum(x)\n\n equiv_callables = [\n sum,\n np.sum,\n lambda x: sum(x),\n lambda x: x.sum(),\n partial(sum),\n fn_class(),\n ]\n\n expected = df.groupby("foo").agg("sum")\n for ecall in equiv_callables:\n warn = FutureWarning if ecall is sum or ecall is np.sum else None\n msg = "using DataFrameGroupBy.sum"\n with tm.assert_produces_warning(warn, match=msg):\n result = df.groupby("foo").agg(ecall)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_over_numpy_arrays():\n # GH 3788\n df = DataFrame(\n [\n [1, np.array([10, 20, 30])],\n [1, np.array([40, 50, 60])],\n [2, np.array([20, 30, 40])],\n ],\n columns=["category", "arraydata"],\n )\n gb = df.groupby("category")\n\n expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]\n expected_index = Index([1, 2], name="category")\n expected_column = ["arraydata"]\n expected = DataFrame(expected_data, index=expected_index, columns=expected_column)\n\n alt = gb.sum(numeric_only=False)\n tm.assert_frame_equal(alt, expected)\n\n result = gb.agg("sum", numeric_only=False)\n tm.assert_frame_equal(result, expected)\n\n # FIXME: the original version of this test called `gb.agg(sum)`\n # and that raises TypeError if `numeric_only=False` is passed\n\n\n@pytest.mark.parametrize("as_period", [True, False])\ndef test_agg_tzaware_non_datetime_result(as_period):\n # discussed in GH#29589, fixed in GH#29641, operating on tzaware values\n # with function that is not dtype-preserving\n dti = date_range("2012-01-01", periods=4, tz="UTC")\n if as_period:\n dti = dti.tz_localize(None).to_period("D")\n\n df = DataFrame({"a": [0, 0, 1, 1], "b": dti})\n gb = df.groupby("a")\n\n # Case that _does_ preserve the dtype\n result = gb["b"].agg(lambda x: x.iloc[0])\n expected = Series(dti[::2], name="b")\n expected.index.name = "a"\n tm.assert_series_equal(result, expected)\n\n # Cases that do _not_ preserve the dtype\n result = gb["b"].agg(lambda x: x.iloc[0].year)\n expected = Series([2012, 2012], name="b")\n expected.index.name = "a"\n tm.assert_series_equal(result, expected)\n\n result = gb["b"].agg(lambda x: x.iloc[-1] - x.iloc[0])\n expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b")\n expected.index.name = "a"\n if as_period:\n expected = Series([pd.offsets.Day(1), pd.offsets.Day(1)], name="b")\n expected.index.name = "a"\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_timezone_round_trip():\n # GH 15426\n ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific")\n df = DataFrame({"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]})\n\n result1 = df.groupby("a")["b"].agg("min").iloc[0]\n result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0]\n result3 = df.groupby("a")["b"].min().iloc[0]\n\n assert result1 == ts\n assert result2 == ts\n assert result3 == ts\n\n dates = [\n pd.Timestamp(f"2016-01-0{i:d} 12:00:00", tz="US/Pacific") for i in range(1, 5)\n ]\n df = DataFrame({"A": ["a", "b"] * 2, "B": dates})\n grouped = df.groupby("A")\n\n ts = df["B"].iloc[0]\n assert ts == grouped.nth(0)["B"].iloc[0]\n assert ts == grouped.head(1)["B"].iloc[0]\n assert ts == grouped.first()["B"].iloc[0]\n\n # GH#27110 applying iloc should return a DataFrame\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1]\n\n ts = df["B"].iloc[2]\n assert ts == grouped.last()["B"].iloc[0]\n\n # GH#27110 applying iloc should return a DataFrame\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1]\n\n\ndef test_sum_uint64_overflow():\n # see gh-14758\n # Convert to uint64 and don't overflow\n df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)\n df = df + 9223372036854775807\n\n index = Index(\n [9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64\n )\n expected = DataFrame(\n {1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},\n index=index,\n dtype=object,\n )\n\n expected.index.name = 0\n result = df.groupby(0).sum(numeric_only=False)\n tm.assert_frame_equal(result, expected)\n\n # out column is non-numeric, so with numeric_only=True it is dropped\n result2 = df.groupby(0).sum(numeric_only=True)\n expected2 = expected[[]]\n tm.assert_frame_equal(result2, expected2)\n\n\n@pytest.mark.parametrize(\n "structure, expected",\n [\n (tuple, DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),\n (list, DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),\n (\n lambda x: tuple(x),\n DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),\n ),\n (\n lambda x: list(x),\n DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),\n ),\n ],\n)\ndef test_agg_structs_dataframe(structure, expected):\n df = DataFrame(\n {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}\n )\n\n result = df.groupby(["A", "B"]).aggregate(structure)\n expected.index.names = ["A", "B"]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "structure, expected",\n [\n (tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),\n (list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),\n (lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),\n (lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),\n ],\n)\ndef test_agg_structs_series(structure, expected):\n # Issue #18079\n df = DataFrame(\n {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}\n )\n\n result = df.groupby("A")["C"].aggregate(structure)\n expected.index.name = "A"\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_category_nansum(observed):\n categories = ["a", "b", "c"]\n df = DataFrame(\n {"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}\n )\n msg = "using SeriesGroupBy.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("A", observed=observed).B.agg(np.nansum)\n expected = Series(\n [3, 3, 0],\n index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),\n name="B",\n )\n if observed:\n expected = expected[expected != 0]\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_list_like_func():\n # GH 18473\n df = DataFrame({"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]})\n grouped = df.groupby("A", as_index=False, sort=False)\n result = grouped.agg({"B": lambda x: list(x)})\n expected = DataFrame(\n {"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]}\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_lambda_with_timezone():\n # GH 23683\n df = DataFrame(\n {\n "tag": [1, 1],\n "date": [\n pd.Timestamp("2018-01-01", tz="UTC"),\n pd.Timestamp("2018-01-02", tz="UTC"),\n ],\n }\n )\n result = df.groupby("tag").agg({"date": lambda e: e.head(1)})\n expected = DataFrame(\n [pd.Timestamp("2018-01-01", tz="UTC")],\n index=Index([1], name="tag"),\n columns=["date"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "err_cls",\n [\n NotImplementedError,\n RuntimeError,\n KeyError,\n IndexError,\n OSError,\n ValueError,\n ArithmeticError,\n AttributeError,\n ],\n)\ndef test_groupby_agg_err_catching(err_cls):\n # make sure we suppress anything other than TypeError or AssertionError\n # in _python_agg_general\n\n # Use a non-standard EA to make sure we don't go down ndarray paths\n from pandas.tests.extension.decimal.array import (\n DecimalArray,\n make_data,\n to_decimal,\n )\n\n data = make_data()[:5]\n df = DataFrame(\n {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}\n )\n\n expected = Series(to_decimal([data[0], data[3]]))\n\n def weird_func(x):\n # weird function that raise something other than TypeError or IndexError\n # in _python_agg_general\n if len(x) == 0:\n raise err_cls\n return x.iloc[0]\n\n result = df["decimals"].groupby(df["id1"]).agg(weird_func)\n tm.assert_series_equal(result, expected, check_names=False)\n | .venv\Lib\site-packages\pandas\tests\groupby\aggregate\test_other.py | test_other.py | Python | 20,708 | 0.95 | 0.102071 | 0.075229 | node-utils | 692 | 2023-12-18T08:58:51.430813 | GPL-3.0 | true | cf908e660bce3aa55210e218cc6913f1 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\aggregate\__pycache__\test_aggregate.cpython-313.pyc | test_aggregate.cpython-313.pyc | Other | 89,794 | 0.75 | 0.00098 | 0.017635 | vue-tools | 365 | 2024-09-06T21:15:04.211664 | GPL-3.0 | true | 5bdddec17716ae9dbefe33575e7827d5 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\aggregate\__pycache__\test_cython.cpython-313.pyc | test_cython.cpython-313.pyc | Other | 20,691 | 0.8 | 0 | 0.007168 | awesome-app | 597 | 2025-01-20T04:15:14.427208 | GPL-3.0 | true | c403e8e35cdea0bf3097f968a8c5fdc6 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\aggregate\__pycache__\test_numba.cpython-313.pyc | test_numba.cpython-313.pyc | Other | 19,684 | 0.95 | 0 | 0.030435 | vue-tools | 35 | 2023-08-09T21:46:34.693828 | BSD-3-Clause | true | aaf0f3a7c557e8276bf88086d538103a |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\aggregate\__pycache__\test_other.cpython-313.pyc | test_other.cpython-313.pyc | Other | 35,942 | 0.8 | 0.002165 | 0.014151 | awesome-app | 930 | 2024-05-16T12:36:57.404946 | BSD-3-Clause | true | f096177728d989e304b079ac360df94e |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\aggregate\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 205 | 0.7 | 0 | 0 | vue-tools | 912 | 2024-06-05T12:30:32.098429 | Apache-2.0 | true | eb94ba3bd35cdb54a8a9811e9ea65587 |
import numpy as np\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n)\nimport pandas._testing as tm\n\n\ndef test_corrwith_with_1_axis():\n # GH 47723\n df = DataFrame({"a": [1, 1, 2], "b": [3, 7, 4]})\n gb = df.groupby("a")\n\n msg = "DataFrameGroupBy.corrwith with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = gb.corrwith(df, axis=1)\n index = Index(\n data=[(1, 0), (1, 1), (1, 2), (2, 2), (2, 0), (2, 1)],\n name=("a", None),\n )\n expected = Series([np.nan] * 6, index=index)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_corrwith.py | test_corrwith.py | Python | 615 | 0.95 | 0.041667 | 0.05 | react-lib | 705 | 2024-12-06T22:03:35.527125 | Apache-2.0 | true | 7789020cb38c1fe9b5cde95bba4ec62f |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef test_apply_describe_bug(multiindex_dataframe_random_data):\n grouped = multiindex_dataframe_random_data.groupby(level="first")\n grouped.describe() # it works!\n\n\ndef test_series_describe_multikey():\n ts = Series(\n np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)\n )\n grouped = ts.groupby([lambda x: x.year, lambda x: x.month])\n result = grouped.describe()\n tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False)\n tm.assert_series_equal(result["std"], grouped.std(), check_names=False)\n tm.assert_series_equal(result["min"], grouped.min(), check_names=False)\n\n\ndef test_series_describe_single():\n ts = Series(\n np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)\n )\n grouped = ts.groupby(lambda x: x.month)\n result = grouped.apply(lambda x: x.describe())\n expected = grouped.describe().stack(future_stack=True)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("keys", ["key1", ["key1", "key2"]])\ndef test_series_describe_as_index(as_index, keys):\n # GH#49256\n df = DataFrame(\n {\n "key1": ["one", "two", "two", "three", "two"],\n "key2": ["one", "two", "two", "three", "two"],\n "foo2": [1, 2, 4, 4, 6],\n }\n )\n gb = df.groupby(keys, as_index=as_index)["foo2"]\n result = gb.describe()\n expected = DataFrame(\n {\n "key1": ["one", "three", "two"],\n "count": [1.0, 1.0, 3.0],\n "mean": [1.0, 4.0, 4.0],\n "std": [np.nan, np.nan, 2.0],\n "min": [1.0, 4.0, 2.0],\n "25%": [1.0, 4.0, 3.0],\n "50%": [1.0, 4.0, 4.0],\n "75%": [1.0, 4.0, 5.0],\n "max": [1.0, 4.0, 6.0],\n }\n )\n if len(keys) == 2:\n expected.insert(1, "key2", expected["key1"])\n if as_index:\n expected = expected.set_index(keys)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_frame_describe_multikey(tsframe, using_infer_string):\n grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])\n result = grouped.describe()\n desc_groups = []\n for col in tsframe:\n group = grouped[col].describe()\n # GH 17464 - Remove duplicate MultiIndex levels\n group_col = MultiIndex(\n levels=[Index([col], dtype=tsframe.columns.dtype), group.columns],\n codes=[[0] * len(group.columns), range(len(group.columns))],\n )\n group = DataFrame(group.values, columns=group_col, index=group.index)\n desc_groups.append(group)\n expected = pd.concat(desc_groups, axis=1)\n tm.assert_frame_equal(result, expected)\n\n # remainder of the tests fails with string dtype but is testing deprecated behaviour\n if using_infer_string:\n return\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)\n result = groupedT.describe()\n expected = tsframe.describe().T\n # reverting the change from https://github.com/pandas-dev/pandas/pull/35441/\n expected.index = MultiIndex(\n levels=[[0, 1], expected.index],\n codes=[[0, 0, 1, 1], range(len(expected.index))],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_frame_describe_tupleindex():\n # GH 14848 - regression from 0.19.0 to 0.19.1\n df1 = DataFrame(\n {\n "x": [1, 2, 3, 4, 5] * 3,\n "y": [10, 20, 30, 40, 50] * 3,\n "z": [100, 200, 300, 400, 500] * 3,\n }\n )\n df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5\n df2 = df1.rename(columns={"k": "key"})\n msg = "Names should be list-like for a MultiIndex"\n with pytest.raises(ValueError, match=msg):\n df1.groupby("k").describe()\n with pytest.raises(ValueError, match=msg):\n df2.groupby("key").describe()\n\n\ndef test_frame_describe_unstacked_format():\n # GH 4792\n prices = {\n Timestamp("2011-01-06 10:59:05", tz=None): 24990,\n Timestamp("2011-01-06 12:43:33", tz=None): 25499,\n Timestamp("2011-01-06 12:54:09", tz=None): 25499,\n }\n volumes = {\n Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,\n Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,\n Timestamp("2011-01-06 12:54:09", tz=None): 100000000,\n }\n df = DataFrame({"PRICE": prices, "VOLUME": volumes})\n result = df.groupby("PRICE").VOLUME.describe()\n data = [\n df[df.PRICE == 24990].VOLUME.describe().values.tolist(),\n df[df.PRICE == 25499].VOLUME.describe().values.tolist(),\n ]\n expected = DataFrame(\n data,\n index=Index([24990, 25499], name="PRICE"),\n columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.filterwarnings(\n "ignore:"\n "indexing past lexsort depth may impact performance:"\n "pandas.errors.PerformanceWarning"\n)\n@pytest.mark.parametrize("as_index", [True, False])\n@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])\ndef test_describe_with_duplicate_output_column_names(as_index, keys):\n # GH 35314\n df = DataFrame(\n {\n "a1": [99, 99, 99, 88, 88, 88],\n "a2": [99, 99, 99, 88, 88, 88],\n "b": [1, 2, 3, 4, 5, 6],\n "c": [10, 20, 30, 40, 50, 60],\n },\n columns=["a1", "a2", "b", "b"],\n copy=False,\n )\n if keys == ["a1"]:\n df = df.drop(columns="a2")\n\n expected = (\n DataFrame.from_records(\n [\n ("b", "count", 3.0, 3.0),\n ("b", "mean", 5.0, 2.0),\n ("b", "std", 1.0, 1.0),\n ("b", "min", 4.0, 1.0),\n ("b", "25%", 4.5, 1.5),\n ("b", "50%", 5.0, 2.0),\n ("b", "75%", 5.5, 2.5),\n ("b", "max", 6.0, 3.0),\n ("b", "count", 3.0, 3.0),\n ("b", "mean", 5.0, 2.0),\n ("b", "std", 1.0, 1.0),\n ("b", "min", 4.0, 1.0),\n ("b", "25%", 4.5, 1.5),\n ("b", "50%", 5.0, 2.0),\n ("b", "75%", 5.5, 2.5),\n ("b", "max", 6.0, 3.0),\n ],\n )\n .set_index([0, 1])\n .T\n )\n expected.columns.names = [None, None]\n if len(keys) == 2:\n expected.index = MultiIndex(\n levels=[[88, 99], [88, 99]], codes=[[0, 1], [0, 1]], names=["a1", "a2"]\n )\n else:\n expected.index = Index([88, 99], name="a1")\n\n if not as_index:\n expected = expected.reset_index()\n\n result = df.groupby(keys, as_index=as_index).describe()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_describe_duplicate_columns():\n # GH#50806\n df = DataFrame([[0, 1, 2, 3]])\n df.columns = [0, 1, 2, 0]\n gb = df.groupby(df[1])\n result = gb.describe(percentiles=[])\n\n columns = ["count", "mean", "std", "min", "50%", "max"]\n frames = [\n DataFrame([[1.0, val, np.nan, val, val, val]], index=[1], columns=columns)\n for val in (0.0, 2.0, 3.0)\n ]\n expected = pd.concat(frames, axis=1)\n expected.columns = MultiIndex(\n levels=[[0, 2], columns],\n codes=[6 * [0] + 6 * [1] + 6 * [0], 3 * list(range(6))],\n )\n expected.index.names = [1]\n tm.assert_frame_equal(result, expected)\n\n\nclass TestGroupByNonCythonPaths:\n # GH#5610 non-cython calls should not include the grouper\n # Tests for code not expected to go through cython paths.\n\n @pytest.fixture\n def df(self):\n df = DataFrame(\n [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]],\n columns=["A", "B", "C"],\n )\n return df\n\n @pytest.fixture\n def gb(self, df):\n gb = df.groupby("A")\n return gb\n\n @pytest.fixture\n def gni(self, df):\n gni = df.groupby("A", as_index=False)\n return gni\n\n def test_describe(self, df, gb, gni):\n # describe\n expected_index = Index([1, 3], name="A")\n expected_col = MultiIndex(\n levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],\n codes=[[0] * 8, list(range(8))],\n )\n expected = DataFrame(\n [\n [1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],\n [0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],\n ],\n index=expected_index,\n columns=expected_col,\n )\n result = gb.describe()\n tm.assert_frame_equal(result, expected)\n\n expected = expected.reset_index()\n result = gni.describe()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", [int, float, object])\n@pytest.mark.parametrize(\n "kwargs",\n [\n {"percentiles": [0.10, 0.20, 0.30], "include": "all", "exclude": None},\n {"percentiles": [0.10, 0.20, 0.30], "include": None, "exclude": ["int"]},\n {"percentiles": [0.10, 0.20, 0.30], "include": ["int"], "exclude": None},\n ],\n)\ndef test_groupby_empty_dataset(dtype, kwargs):\n # GH#41575\n df = DataFrame([[1, 2, 3]], columns=["A", "B", "C"], dtype=dtype)\n df["B"] = df["B"].astype(int)\n df["C"] = df["C"].astype(float)\n\n result = df.iloc[:0].groupby("A").describe(**kwargs)\n expected = df.groupby("A").describe(**kwargs).reset_index(drop=True).iloc[:0]\n tm.assert_frame_equal(result, expected)\n\n result = df.iloc[:0].groupby("A").B.describe(**kwargs)\n expected = df.groupby("A").B.describe(**kwargs).reset_index(drop=True).iloc[:0]\n expected.index = Index([], dtype=df.columns.dtype)\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_describe.py | test_describe.py | Python | 9,884 | 0.95 | 0.083056 | 0.045455 | python-kit | 53 | 2024-08-16T23:00:20.889671 | BSD-3-Clause | true | d25d13921452b692350d988ecd83d75f |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n NaT,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef test_group_shift_with_null_key():\n # This test is designed to replicate the segfault in issue #13813.\n n_rows = 1200\n\n # Generate a moderately large dataframe with occasional missing\n # values in column `B`, and then group by [`A`, `B`]. This should\n # force `-1` in `labels` array of `g._grouper.group_info` exactly\n # at those places, where the group-by key is partially missing.\n df = DataFrame(\n [(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],\n dtype=float,\n columns=["A", "B", "Z"],\n index=None,\n )\n g = df.groupby(["A", "B"])\n\n expected = DataFrame(\n [(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],\n dtype=float,\n columns=["Z"],\n index=None,\n )\n result = g.shift(-1)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_group_shift_with_fill_value():\n # GH #24128\n n_rows = 24\n df = DataFrame(\n [(i % 12, i % 3, i) for i in range(n_rows)],\n dtype=float,\n columns=["A", "B", "Z"],\n index=None,\n )\n g = df.groupby(["A", "B"])\n\n expected = DataFrame(\n [(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],\n dtype=float,\n columns=["Z"],\n index=None,\n )\n result = g.shift(-1, fill_value=0)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_group_shift_lose_timezone():\n # GH 30134\n now_dt = Timestamp.utcnow().as_unit("ns")\n df = DataFrame({"a": [1, 1], "date": now_dt})\n result = df.groupby("a").shift(0).iloc[0]\n expected = Series({"date": now_dt}, name=result.name)\n tm.assert_series_equal(result, expected)\n\n\ndef test_group_diff_real_series(any_real_numpy_dtype):\n df = DataFrame(\n {"a": [1, 2, 3, 3, 2], "b": [1, 2, 3, 4, 5]},\n dtype=any_real_numpy_dtype,\n )\n result = df.groupby("a")["b"].diff()\n exp_dtype = "float"\n if any_real_numpy_dtype in ["int8", "int16", "float32"]:\n exp_dtype = "float32"\n expected = Series([np.nan, np.nan, np.nan, 1.0, 3.0], dtype=exp_dtype, name="b")\n tm.assert_series_equal(result, expected)\n\n\ndef test_group_diff_real_frame(any_real_numpy_dtype):\n df = DataFrame(\n {\n "a": [1, 2, 3, 3, 2],\n "b": [1, 2, 3, 4, 5],\n "c": [1, 2, 3, 4, 6],\n },\n dtype=any_real_numpy_dtype,\n )\n result = df.groupby("a").diff()\n exp_dtype = "float"\n if any_real_numpy_dtype in ["int8", "int16", "float32"]:\n exp_dtype = "float32"\n expected = DataFrame(\n {\n "b": [np.nan, np.nan, np.nan, 1.0, 3.0],\n "c": [np.nan, np.nan, np.nan, 1.0, 4.0],\n },\n dtype=exp_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data",\n [\n [\n Timestamp("2013-01-01"),\n Timestamp("2013-01-02"),\n Timestamp("2013-01-03"),\n ],\n [Timedelta("5 days"), Timedelta("6 days"), Timedelta("7 days")],\n ],\n)\ndef test_group_diff_datetimelike(data, unit):\n df = DataFrame({"a": [1, 2, 2], "b": data})\n df["b"] = df["b"].dt.as_unit(unit)\n result = df.groupby("a")["b"].diff()\n expected = Series([NaT, NaT, Timedelta("1 days")], name="b").dt.as_unit(unit)\n tm.assert_series_equal(result, expected)\n\n\ndef test_group_diff_bool():\n df = DataFrame({"a": [1, 2, 3, 3, 2], "b": [True, True, False, False, True]})\n result = df.groupby("a")["b"].diff()\n expected = Series([np.nan, np.nan, np.nan, False, False], name="b")\n tm.assert_series_equal(result, expected)\n\n\ndef test_group_diff_object_raises(object_dtype):\n df = DataFrame(\n {"a": ["foo", "bar", "bar"], "b": ["baz", "foo", "foo"]}, dtype=object_dtype\n )\n with pytest.raises(TypeError, match=r"unsupported operand type\(s\) for -"):\n df.groupby("a")["b"].diff()\n\n\ndef test_empty_shift_with_fill():\n # GH 41264, single-index check\n df = DataFrame(columns=["a", "b", "c"])\n shifted = df.groupby(["a"]).shift(1)\n shifted_with_fill = df.groupby(["a"]).shift(1, fill_value=0)\n tm.assert_frame_equal(shifted, shifted_with_fill)\n tm.assert_index_equal(shifted.index, shifted_with_fill.index)\n\n\ndef test_multindex_empty_shift_with_fill():\n # GH 41264, multi-index check\n df = DataFrame(columns=["a", "b", "c"])\n shifted = df.groupby(["a", "b"]).shift(1)\n shifted_with_fill = df.groupby(["a", "b"]).shift(1, fill_value=0)\n tm.assert_frame_equal(shifted, shifted_with_fill)\n tm.assert_index_equal(shifted.index, shifted_with_fill.index)\n\n\ndef test_shift_periods_freq():\n # GH 54093\n data = {"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]}\n df = DataFrame(data, index=date_range(start="20100101", periods=6))\n result = df.groupby(df.index).shift(periods=-2, freq="D")\n expected = DataFrame(data, index=date_range(start="2009-12-30", periods=6))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_shift_deprecate_freq_and_fill_value():\n # GH 53832\n data = {"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]}\n df = DataFrame(data, index=date_range(start="20100101", periods=6))\n msg = (\n "Passing a 'freq' together with a 'fill_value' silently ignores the fill_value"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby(df.index).shift(periods=-2, freq="D", fill_value="1")\n\n\ndef test_shift_disallow_suffix_if_periods_is_int():\n # GH#44424\n data = {"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]}\n df = DataFrame(data)\n msg = "Cannot specify `suffix` if `periods` is an int."\n with pytest.raises(ValueError, match=msg):\n df.groupby("b").shift(1, suffix="fails")\n\n\ndef test_group_shift_with_multiple_periods():\n # GH#44424\n df = DataFrame({"a": [1, 2, 3, 3, 2], "b": [True, True, False, False, True]})\n\n shifted_df = df.groupby("b")[["a"]].shift([0, 1])\n expected_df = DataFrame(\n {"a_0": [1, 2, 3, 3, 2], "a_1": [np.nan, 1.0, np.nan, 3.0, 2.0]}\n )\n tm.assert_frame_equal(shifted_df, expected_df)\n\n # series\n shifted_series = df.groupby("b")["a"].shift([0, 1])\n tm.assert_frame_equal(shifted_series, expected_df)\n\n\ndef test_group_shift_with_multiple_periods_and_freq():\n # GH#44424\n df = DataFrame(\n {"a": [1, 2, 3, 4, 5], "b": [True, True, False, False, True]},\n index=date_range("1/1/2000", periods=5, freq="h"),\n )\n shifted_df = df.groupby("b")[["a"]].shift(\n [0, 1],\n freq="h",\n )\n expected_df = DataFrame(\n {\n "a_0": [1.0, 2.0, 3.0, 4.0, 5.0, np.nan],\n "a_1": [\n np.nan,\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n ],\n },\n index=date_range("1/1/2000", periods=6, freq="h"),\n )\n tm.assert_frame_equal(shifted_df, expected_df)\n\n\ndef test_group_shift_with_multiple_periods_and_fill_value():\n # GH#44424\n df = DataFrame(\n {"a": [1, 2, 3, 4, 5], "b": [True, True, False, False, True]},\n )\n shifted_df = df.groupby("b")[["a"]].shift([0, 1], fill_value=-1)\n expected_df = DataFrame(\n {"a_0": [1, 2, 3, 4, 5], "a_1": [-1, 1, -1, 3, 2]},\n )\n tm.assert_frame_equal(shifted_df, expected_df)\n\n\ndef test_group_shift_with_multiple_periods_and_both_fill_and_freq_deprecated():\n # GH#44424\n df = DataFrame(\n {"a": [1, 2, 3, 4, 5], "b": [True, True, False, False, True]},\n index=date_range("1/1/2000", periods=5, freq="h"),\n )\n msg = (\n "Passing a 'freq' together with a 'fill_value' silently ignores the "\n "fill_value"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.groupby("b")[["a"]].shift([1, 2], fill_value=1, freq="h")\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_groupby_shift_diff.py | test_groupby_shift_diff.py | Python | 7,925 | 0.95 | 0.109804 | 0.079812 | react-lib | 708 | 2024-08-24T12:42:34.212869 | GPL-3.0 | true | 3798c0f9d580675700c47ac160156c1b |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "in_vals, out_vals",\n [\n # Basics: strictly increasing (T), strictly decreasing (F),\n # abs val increasing (F), non-strictly increasing (T)\n ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]),\n # Test with inf vals\n (\n [1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],\n [True, False, True, False],\n ),\n # Test with nan vals; should always be False\n (\n [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],\n [False, False, False, False],\n ),\n ],\n)\ndef test_is_monotonic_increasing(in_vals, out_vals):\n # GH 17015\n source_dict = {\n "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],\n "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],\n "C": in_vals,\n }\n df = DataFrame(source_dict)\n result = df.groupby("B").C.is_monotonic_increasing\n index = Index(list("abcd"), name="B")\n expected = Series(index=index, data=out_vals, name="C")\n tm.assert_series_equal(result, expected)\n\n # Also check result equal to manually taking x.is_monotonic_increasing.\n expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "in_vals, out_vals",\n [\n # Basics: strictly decreasing (T), strictly increasing (F),\n # abs val decreasing (F), non-strictly increasing (T)\n ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]),\n # Test with inf vals\n (\n [np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],\n [True, True, False, True],\n ),\n # Test with nan vals; should always be False\n (\n [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],\n [False, False, False, False],\n ),\n ],\n)\ndef test_is_monotonic_decreasing(in_vals, out_vals):\n # GH 17015\n source_dict = {\n "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],\n "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],\n "C": in_vals,\n }\n\n df = DataFrame(source_dict)\n result = df.groupby("B").C.is_monotonic_decreasing\n index = Index(list("abcd"), name="B")\n expected = Series(index=index, data=out_vals, name="C")\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_is_monotonic.py | test_is_monotonic.py | Python | 2,566 | 0.95 | 0.025641 | 0.15493 | python-kit | 465 | 2024-11-02T13:12:19.545664 | BSD-3-Clause | true | 6709c8336374a4fd14e0ae4ed30faa15 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n MultiIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef test_nlargest():\n a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])\n b = Series(list("a" * 5 + "b" * 5))\n gb = a.groupby(b)\n r = gb.nlargest(3)\n e = Series(\n [7, 5, 3, 10, 9, 6],\n index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),\n )\n tm.assert_series_equal(r, e)\n\n a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])\n gb = a.groupby(b)\n e = Series(\n [3, 2, 1, 3, 3, 2],\n index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),\n )\n tm.assert_series_equal(gb.nlargest(3, keep="last"), e)\n\n\ndef test_nlargest_mi_grouper():\n # see gh-21411\n npr = np.random.default_rng(2)\n\n dts = date_range("20180101", periods=10)\n iterables = [dts, ["one", "two"]]\n\n idx = MultiIndex.from_product(iterables, names=["first", "second"])\n s = Series(npr.standard_normal(20), index=idx)\n\n result = s.groupby("first").nlargest(1)\n\n exp_idx = MultiIndex.from_tuples(\n [\n (dts[0], dts[0], "one"),\n (dts[1], dts[1], "one"),\n (dts[2], dts[2], "one"),\n (dts[3], dts[3], "two"),\n (dts[4], dts[4], "one"),\n (dts[5], dts[5], "one"),\n (dts[6], dts[6], "one"),\n (dts[7], dts[7], "one"),\n (dts[8], dts[8], "one"),\n (dts[9], dts[9], "one"),\n ],\n names=["first", "first", "second"],\n )\n\n exp_values = [\n 0.18905338179353307,\n -0.41306354339189344,\n 1.799707382720902,\n 0.7738065867276614,\n 0.28121066979764925,\n 0.9775674511260357,\n -0.3288239040579627,\n 0.45495807124085547,\n 0.5452887139646817,\n 0.12682784711186987,\n ]\n\n expected = Series(exp_values, index=exp_idx)\n tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3)\n\n\ndef test_nsmallest():\n a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])\n b = Series(list("a" * 5 + "b" * 5))\n gb = a.groupby(b)\n r = gb.nsmallest(3)\n e = Series(\n [1, 2, 3, 0, 4, 6],\n index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),\n )\n tm.assert_series_equal(r, e)\n\n a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])\n gb = a.groupby(b)\n e = Series(\n [0, 1, 1, 0, 1, 2],\n index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),\n )\n tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)\n\n\n@pytest.mark.parametrize(\n "data, groups",\n [([0, 1, 2, 3], [0, 0, 1, 1]), ([0], [0])],\n)\n@pytest.mark.parametrize("dtype", [None, *tm.ALL_INT_NUMPY_DTYPES])\n@pytest.mark.parametrize("method", ["nlargest", "nsmallest"])\ndef test_nlargest_and_smallest_noop(data, groups, dtype, method):\n # GH 15272, GH 16345, GH 29129\n # Test nlargest/smallest when it results in a noop,\n # i.e. input is sorted and group size <= n\n if dtype is not None:\n data = np.array(data, dtype=dtype)\n if method == "nlargest":\n data = list(reversed(data))\n ser = Series(data, name="a")\n result = getattr(ser.groupby(groups), method)(n=2)\n expidx = np.array(groups, dtype=int) if isinstance(groups, list) else groups\n expected = Series(data, index=MultiIndex.from_arrays([expidx, ser.index]), name="a")\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_nlargest_nsmallest.py | test_nlargest_nsmallest.py | Python | 3,401 | 0.95 | 0.06087 | 0.040816 | awesome-app | 271 | 2025-01-16T16:31:01.207720 | MIT | true | 016cf3989fa736567eccbba7d796c4ef |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n isna,\n)\nimport pandas._testing as tm\n\n\ndef test_first_last_nth(df):\n # tests for first / last / nth\n grouped = df.groupby("A")\n first = grouped.first()\n expected = df.loc[[1, 0], ["B", "C", "D"]]\n expected.index = Index(["bar", "foo"], name="A")\n expected = expected.sort_index()\n tm.assert_frame_equal(first, expected)\n\n nth = grouped.nth(0)\n expected = df.loc[[0, 1]]\n tm.assert_frame_equal(nth, expected)\n\n last = grouped.last()\n expected = df.loc[[5, 7], ["B", "C", "D"]]\n expected.index = Index(["bar", "foo"], name="A")\n tm.assert_frame_equal(last, expected)\n\n nth = grouped.nth(-1)\n expected = df.iloc[[5, 7]]\n tm.assert_frame_equal(nth, expected)\n\n nth = grouped.nth(1)\n expected = df.iloc[[2, 3]]\n tm.assert_frame_equal(nth, expected)\n\n # it works!\n grouped["B"].first()\n grouped["B"].last()\n grouped["B"].nth(0)\n\n df = df.copy()\n df.loc[df["A"] == "foo", "B"] = np.nan\n grouped = df.groupby("A")\n assert isna(grouped["B"].first()["foo"])\n assert isna(grouped["B"].last()["foo"])\n assert isna(grouped["B"].nth(0).iloc[0])\n\n # v0.14.0 whatsnew\n df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])\n g = df.groupby("A")\n result = g.first()\n expected = df.iloc[[1, 2]].set_index("A")\n tm.assert_frame_equal(result, expected)\n\n expected = df.iloc[[1, 2]]\n result = g.nth(0, dropna="any")\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["first", "last"])\ndef test_first_last_with_na_object(method, nulls_fixture):\n # https://github.com/pandas-dev/pandas/issues/32123\n groups = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby("a")\n result = getattr(groups, method)()\n\n if method == "first":\n values = [1, 3]\n else:\n values = [2, 3]\n\n values = np.array(values, dtype=result["b"].dtype)\n idx = Index([1, 2], name="a")\n expected = DataFrame({"b": values}, index=idx)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("index", [0, -1])\ndef test_nth_with_na_object(index, nulls_fixture):\n # https://github.com/pandas-dev/pandas/issues/32123\n df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]})\n groups = df.groupby("a")\n result = groups.nth(index)\n expected = df.iloc[[0, 2]] if index == 0 else df.iloc[[1, 3]]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["first", "last"])\ndef test_first_last_with_None(method):\n # https://github.com/pandas-dev/pandas/issues/32800\n # None should be preserved as object dtype\n df = DataFrame.from_dict({"id": ["a"], "value": [None]})\n groups = df.groupby("id", as_index=False)\n result = getattr(groups, method)()\n\n tm.assert_frame_equal(result, df)\n\n\n@pytest.mark.parametrize("method", ["first", "last"])\n@pytest.mark.parametrize(\n "df, expected",\n [\n (\n DataFrame({"id": "a", "value": [None, "foo", np.nan]}),\n DataFrame({"value": ["foo"]}, index=Index(["a"], name="id")),\n ),\n (\n DataFrame({"id": "a", "value": [np.nan]}, dtype=object),\n DataFrame({"value": [None]}, index=Index(["a"], name="id")),\n ),\n ],\n)\ndef test_first_last_with_None_expanded(method, df, expected):\n # GH 32800, 38286\n result = getattr(df.groupby("id"), method)()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_first_last_nth_dtypes():\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.array(np.random.default_rng(2).standard_normal(8), dtype="float32"),\n }\n )\n df["E"] = True\n df["F"] = 1\n\n # tests for first / last / nth\n grouped = df.groupby("A")\n first = grouped.first()\n expected = df.loc[[1, 0], ["B", "C", "D", "E", "F"]]\n expected.index = Index(["bar", "foo"], name="A")\n expected = expected.sort_index()\n tm.assert_frame_equal(first, expected)\n\n last = grouped.last()\n expected = df.loc[[5, 7], ["B", "C", "D", "E", "F"]]\n expected.index = Index(["bar", "foo"], name="A")\n expected = expected.sort_index()\n tm.assert_frame_equal(last, expected)\n\n nth = grouped.nth(1)\n expected = df.iloc[[2, 3]]\n tm.assert_frame_equal(nth, expected)\n\n\ndef test_first_last_nth_dtypes2():\n # GH 2763, first/last shifting dtypes\n idx = list(range(10))\n idx.append(9)\n ser = Series(data=range(11), index=idx, name="IntCol")\n assert ser.dtype == "int64"\n f = ser.groupby(level=0).first()\n assert f.dtype == "int64"\n\n\ndef test_first_last_nth_nan_dtype():\n # GH 33591\n df = DataFrame({"data": ["A"], "nans": Series([None], dtype=object)})\n grouped = df.groupby("data")\n\n expected = df.set_index("data").nans\n tm.assert_series_equal(grouped.nans.first(), expected)\n tm.assert_series_equal(grouped.nans.last(), expected)\n\n expected = df.nans\n tm.assert_series_equal(grouped.nans.nth(-1), expected)\n tm.assert_series_equal(grouped.nans.nth(0), expected)\n\n\ndef test_first_strings_timestamps():\n # GH 11244\n test = DataFrame(\n {\n Timestamp("2012-01-01 00:00:00"): ["a", "b"],\n Timestamp("2012-01-02 00:00:00"): ["c", "d"],\n "name": ["e", "e"],\n "aaaa": ["f", "g"],\n }\n )\n result = test.groupby("name").first()\n expected = DataFrame(\n [["a", "c", "f"]],\n columns=Index([Timestamp("2012-01-01"), Timestamp("2012-01-02"), "aaaa"]),\n index=Index(["e"], name="name"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nth():\n df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])\n gb = df.groupby("A")\n\n tm.assert_frame_equal(gb.nth(0), df.iloc[[0, 2]])\n tm.assert_frame_equal(gb.nth(1), df.iloc[[1]])\n tm.assert_frame_equal(gb.nth(2), df.loc[[]])\n tm.assert_frame_equal(gb.nth(-1), df.iloc[[1, 2]])\n tm.assert_frame_equal(gb.nth(-2), df.iloc[[0]])\n tm.assert_frame_equal(gb.nth(-3), df.loc[[]])\n tm.assert_series_equal(gb.B.nth(0), df.B.iloc[[0, 2]])\n tm.assert_series_equal(gb.B.nth(1), df.B.iloc[[1]])\n tm.assert_frame_equal(gb[["B"]].nth(0), df[["B"]].iloc[[0, 2]])\n\n tm.assert_frame_equal(gb.nth(0, dropna="any"), df.iloc[[1, 2]])\n tm.assert_frame_equal(gb.nth(-1, dropna="any"), df.iloc[[1, 2]])\n\n tm.assert_frame_equal(gb.nth(7, dropna="any"), df.iloc[:0])\n tm.assert_frame_equal(gb.nth(2, dropna="any"), df.iloc[:0])\n\n\ndef test_nth2():\n # out of bounds, regression from 0.13.1\n # GH 6621\n df = DataFrame(\n {\n "color": {0: "green", 1: "green", 2: "red", 3: "red", 4: "red"},\n "food": {0: "ham", 1: "eggs", 2: "eggs", 3: "ham", 4: "pork"},\n "two": {\n 0: 1.5456590000000001,\n 1: -0.070345000000000005,\n 2: -2.4004539999999999,\n 3: 0.46206000000000003,\n 4: 0.52350799999999997,\n },\n "one": {\n 0: 0.56573799999999996,\n 1: -0.9742360000000001,\n 2: 1.033801,\n 3: -0.78543499999999999,\n 4: 0.70422799999999997,\n },\n }\n ).set_index(["color", "food"])\n\n result = df.groupby(level=0, as_index=False).nth(2)\n expected = df.iloc[[-1]]\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(level=0, as_index=False).nth(3)\n expected = df.loc[[]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nth3():\n # GH 7559\n # from the vbench\n df = DataFrame(np.random.default_rng(2).integers(1, 10, (100, 2)), dtype="int64")\n ser = df[1]\n gb = df[0]\n expected = ser.groupby(gb).first()\n expected2 = ser.groupby(gb).apply(lambda x: x.iloc[0])\n tm.assert_series_equal(expected2, expected, check_names=False)\n assert expected.name == 1\n assert expected2.name == 1\n\n # validate first\n v = ser[gb == 1].iloc[0]\n assert expected.iloc[0] == v\n assert expected2.iloc[0] == v\n\n with pytest.raises(ValueError, match="For a DataFrame"):\n ser.groupby(gb, sort=False).nth(0, dropna=True)\n\n\ndef test_nth4():\n # doc example\n df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])\n gb = df.groupby("A")\n result = gb.B.nth(0, dropna="all")\n expected = df.B.iloc[[1, 2]]\n tm.assert_series_equal(result, expected)\n\n\ndef test_nth5():\n # test multiple nth values\n df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]], columns=["A", "B"])\n gb = df.groupby("A")\n\n tm.assert_frame_equal(gb.nth(0), df.iloc[[0, 3]])\n tm.assert_frame_equal(gb.nth([0]), df.iloc[[0, 3]])\n tm.assert_frame_equal(gb.nth([0, 1]), df.iloc[[0, 1, 3, 4]])\n tm.assert_frame_equal(gb.nth([0, -1]), df.iloc[[0, 2, 3, 4]])\n tm.assert_frame_equal(gb.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]])\n tm.assert_frame_equal(gb.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]])\n tm.assert_frame_equal(gb.nth([2]), df.iloc[[2]])\n tm.assert_frame_equal(gb.nth([3, 4]), df.loc[[]])\n\n\ndef test_nth_bdays(unit):\n business_dates = pd.date_range(\n start="4/1/2014", end="6/30/2014", freq="B", unit=unit\n )\n df = DataFrame(1, index=business_dates, columns=["a", "b"])\n # get the first, fourth and last two business days for each month\n key = [df.index.year, df.index.month]\n result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])\n expected_dates = pd.to_datetime(\n [\n "2014/4/1",\n "2014/4/4",\n "2014/4/29",\n "2014/4/30",\n "2014/5/1",\n "2014/5/6",\n "2014/5/29",\n "2014/5/30",\n "2014/6/2",\n "2014/6/5",\n "2014/6/27",\n "2014/6/30",\n ]\n ).as_unit(unit)\n expected = DataFrame(1, columns=["a", "b"], index=expected_dates)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nth_multi_grouper(three_group):\n # PR 9090, related to issue 8979\n # test nth on multiple groupers\n grouped = three_group.groupby(["A", "B"])\n result = grouped.nth(0)\n expected = three_group.iloc[[0, 3, 4, 7]]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data, expected_first, expected_last",\n [\n (\n {\n "id": ["A"],\n "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),\n "foo": [1],\n },\n {\n "id": ["A"],\n "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),\n "foo": [1],\n },\n {\n "id": ["A"],\n "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),\n "foo": [1],\n },\n ),\n (\n {\n "id": ["A", "B", "A"],\n "time": [\n Timestamp("2012-01-01 13:00:00", tz="America/New_York"),\n Timestamp("2012-02-01 14:00:00", tz="US/Central"),\n Timestamp("2012-03-01 12:00:00", tz="Europe/London"),\n ],\n "foo": [1, 2, 3],\n },\n {\n "id": ["A", "B"],\n "time": [\n Timestamp("2012-01-01 13:00:00", tz="America/New_York"),\n Timestamp("2012-02-01 14:00:00", tz="US/Central"),\n ],\n "foo": [1, 2],\n },\n {\n "id": ["A", "B"],\n "time": [\n Timestamp("2012-03-01 12:00:00", tz="Europe/London"),\n Timestamp("2012-02-01 14:00:00", tz="US/Central"),\n ],\n "foo": [3, 2],\n },\n ),\n ],\n)\ndef test_first_last_tz(data, expected_first, expected_last):\n # GH15884\n # Test that the timezone is retained when calling first\n # or last on groupby with as_index=False\n\n df = DataFrame(data)\n\n result = df.groupby("id", as_index=False).first()\n expected = DataFrame(expected_first)\n cols = ["id", "time", "foo"]\n tm.assert_frame_equal(result[cols], expected[cols])\n\n result = df.groupby("id", as_index=False)["time"].first()\n tm.assert_frame_equal(result, expected[["id", "time"]])\n\n result = df.groupby("id", as_index=False).last()\n expected = DataFrame(expected_last)\n cols = ["id", "time", "foo"]\n tm.assert_frame_equal(result[cols], expected[cols])\n\n result = df.groupby("id", as_index=False)["time"].last()\n tm.assert_frame_equal(result, expected[["id", "time"]])\n\n\n@pytest.mark.parametrize(\n "method, ts, alpha",\n [\n ["first", Timestamp("2013-01-01", tz="US/Eastern"), "a"],\n ["last", Timestamp("2013-01-02", tz="US/Eastern"), "b"],\n ],\n)\ndef test_first_last_tz_multi_column(method, ts, alpha, unit):\n # GH 21603\n category_string = Series(list("abc")).astype("category")\n dti = pd.date_range("20130101", periods=3, tz="US/Eastern", unit=unit)\n df = DataFrame(\n {\n "group": [1, 1, 2],\n "category_string": category_string,\n "datetimetz": dti,\n }\n )\n result = getattr(df.groupby("group"), method)()\n expected = DataFrame(\n {\n "category_string": pd.Categorical(\n [alpha, "c"], dtype=category_string.dtype\n ),\n "datetimetz": [ts, Timestamp("2013-01-03", tz="US/Eastern")],\n },\n index=Index([1, 2], name="group"),\n )\n expected["datetimetz"] = expected["datetimetz"].dt.as_unit(unit)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "values",\n [\n pd.array([True, False], dtype="boolean"),\n pd.array([1, 2], dtype="Int64"),\n pd.to_datetime(["2020-01-01", "2020-02-01"]),\n pd.to_timedelta([1, 2], unit="D"),\n ],\n)\n@pytest.mark.parametrize("function", ["first", "last", "min", "max"])\ndef test_first_last_extension_array_keeps_dtype(values, function):\n # https://github.com/pandas-dev/pandas/issues/33071\n # https://github.com/pandas-dev/pandas/issues/32194\n df = DataFrame({"a": [1, 2], "b": values})\n grouped = df.groupby("a")\n idx = Index([1, 2], name="a")\n expected_series = Series(values, name="b", index=idx)\n expected_frame = DataFrame({"b": values}, index=idx)\n\n result_series = getattr(grouped["b"], function)()\n tm.assert_series_equal(result_series, expected_series)\n\n result_frame = grouped.agg({"b": function})\n tm.assert_frame_equal(result_frame, expected_frame)\n\n\ndef test_nth_multi_index_as_expected():\n # PR 9090, related to issue 8979\n # test nth on MultiIndex\n three_group = DataFrame(\n {\n "A": [\n "foo",\n "foo",\n "foo",\n "foo",\n "bar",\n "bar",\n "bar",\n "bar",\n "foo",\n "foo",\n "foo",\n ],\n "B": [\n "one",\n "one",\n "one",\n "two",\n "one",\n "one",\n "one",\n "two",\n "two",\n "two",\n "one",\n ],\n "C": [\n "dull",\n "dull",\n "shiny",\n "dull",\n "dull",\n "shiny",\n "shiny",\n "dull",\n "shiny",\n "shiny",\n "shiny",\n ],\n }\n )\n grouped = three_group.groupby(["A", "B"])\n result = grouped.nth(0)\n expected = three_group.iloc[[0, 3, 4, 7]]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "op, n, expected_rows",\n [\n ("head", -1, [0]),\n ("head", 0, []),\n ("head", 1, [0, 2]),\n ("head", 7, [0, 1, 2]),\n ("tail", -1, [1]),\n ("tail", 0, []),\n ("tail", 1, [1, 2]),\n ("tail", 7, [0, 1, 2]),\n ],\n)\n@pytest.mark.parametrize("columns", [None, [], ["A"], ["B"], ["A", "B"]])\n@pytest.mark.parametrize("as_index", [True, False])\ndef test_groupby_head_tail(op, n, expected_rows, columns, as_index):\n df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])\n g = df.groupby("A", as_index=as_index)\n expected = df.iloc[expected_rows]\n if columns is not None:\n g = g[columns]\n expected = expected[columns]\n result = getattr(g, op)(n)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "op, n, expected_cols",\n [\n ("head", -1, [0]),\n ("head", 0, []),\n ("head", 1, [0, 2]),\n ("head", 7, [0, 1, 2]),\n ("tail", -1, [1]),\n ("tail", 0, []),\n ("tail", 1, [1, 2]),\n ("tail", 7, [0, 1, 2]),\n ],\n)\ndef test_groupby_head_tail_axis_1(op, n, expected_cols):\n # GH 9772\n df = DataFrame(\n [[1, 2, 3], [1, 4, 5], [2, 6, 7], [3, 8, 9]], columns=["A", "B", "C"]\n )\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n g = df.groupby([0, 0, 1], axis=1)\n expected = df.iloc[:, expected_cols]\n result = getattr(g, op)(n)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_group_selection_cache():\n # GH 12839 nth, head, and tail should return same result consistently\n df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])\n expected = df.iloc[[0, 2]]\n\n g = df.groupby("A")\n result1 = g.head(n=2)\n result2 = g.nth(0)\n tm.assert_frame_equal(result1, df)\n tm.assert_frame_equal(result2, expected)\n\n g = df.groupby("A")\n result1 = g.tail(n=2)\n result2 = g.nth(0)\n tm.assert_frame_equal(result1, df)\n tm.assert_frame_equal(result2, expected)\n\n g = df.groupby("A")\n result1 = g.nth(0)\n result2 = g.head(n=2)\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, df)\n\n g = df.groupby("A")\n result1 = g.nth(0)\n result2 = g.tail(n=2)\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, df)\n\n\ndef test_nth_empty():\n # GH 16064\n df = DataFrame(index=[0], columns=["a", "b", "c"])\n result = df.groupby("a").nth(10)\n expected = df.iloc[:0]\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(["a", "b"]).nth(10)\n expected = df.iloc[:0]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nth_column_order():\n # GH 20760\n # Check that nth preserves column order\n df = DataFrame(\n [[1, "b", 100], [1, "a", 50], [1, "a", np.nan], [2, "c", 200], [2, "d", 150]],\n columns=["A", "C", "B"],\n )\n result = df.groupby("A").nth(0)\n expected = df.iloc[[0, 3]]\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("A").nth(-1, dropna="any")\n expected = df.iloc[[1, 4]]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dropna", [None, "any", "all"])\ndef test_nth_nan_in_grouper(dropna):\n # GH 26011\n df = DataFrame(\n {\n "a": [np.nan, "a", np.nan, "b", np.nan],\n "b": [0, 2, 4, 6, 8],\n "c": [1, 3, 5, 7, 9],\n }\n )\n result = df.groupby("a").nth(0, dropna=dropna)\n expected = df.iloc[[1, 3]]\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dropna", [None, "any", "all"])\ndef test_nth_nan_in_grouper_series(dropna):\n # GH 26454\n df = DataFrame(\n {\n "a": [np.nan, "a", np.nan, "b", np.nan],\n "b": [0, 2, 4, 6, 8],\n }\n )\n result = df.groupby("a")["b"].nth(0, dropna=dropna)\n expected = df["b"].iloc[[1, 3]]\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_first_categorical_and_datetime_data_nat():\n # GH 20520\n df = DataFrame(\n {\n "group": ["first", "first", "second", "third", "third"],\n "time": 5 * [np.datetime64("NaT")],\n "categories": Series(["a", "b", "c", "a", "b"], dtype="category"),\n }\n )\n result = df.groupby("group").first()\n expected = DataFrame(\n {\n "time": 3 * [np.datetime64("NaT")],\n "categories": Series(["a", "c", "a"]).astype(\n pd.CategoricalDtype(["a", "b", "c"])\n ),\n }\n )\n expected.index = Index(["first", "second", "third"], name="group")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_first_multi_key_groupby_categorical():\n # GH 22512\n df = DataFrame(\n {\n "A": [1, 1, 1, 2, 2],\n "B": [100, 100, 200, 100, 100],\n "C": ["apple", "orange", "mango", "mango", "orange"],\n "D": ["jupiter", "mercury", "mars", "venus", "venus"],\n }\n )\n df = df.astype({"D": "category"})\n result = df.groupby(by=["A", "B"]).first()\n expected = DataFrame(\n {\n "C": ["apple", "mango", "mango"],\n "D": Series(["jupiter", "mars", "venus"]).astype(\n pd.CategoricalDtype(["jupiter", "mars", "mercury", "venus"])\n ),\n }\n )\n expected.index = MultiIndex.from_tuples(\n [(1, 100), (1, 200), (2, 100)], names=["A", "B"]\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["first", "last", "nth"])\ndef test_groupby_last_first_nth_with_none(method, nulls_fixture):\n # GH29645\n expected = Series(["y"], dtype=object)\n data = Series(\n [nulls_fixture, nulls_fixture, nulls_fixture, "y", nulls_fixture],\n index=[0, 0, 0, 0, 0],\n dtype=object,\n ).groupby(level=0)\n\n if method == "nth":\n result = getattr(data, method)(3)\n else:\n result = getattr(data, method)()\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "arg, expected_rows",\n [\n [slice(None, 3, 2), [0, 1, 4, 5]],\n [slice(None, -2), [0, 2, 5]],\n [[slice(None, 2), slice(-2, None)], [0, 1, 2, 3, 4, 6, 7]],\n [[0, 1, slice(-2, None)], [0, 1, 2, 3, 4, 6, 7]],\n ],\n)\ndef test_slice(slice_test_df, slice_test_grouped, arg, expected_rows):\n # Test slices GH #42947\n\n result = slice_test_grouped.nth[arg]\n equivalent = slice_test_grouped.nth(arg)\n expected = slice_test_df.iloc[expected_rows]\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(equivalent, expected)\n\n\ndef test_nth_indexed(slice_test_df, slice_test_grouped):\n # Test index notation GH #44688\n\n result = slice_test_grouped.nth[0, 1, -2:]\n equivalent = slice_test_grouped.nth([0, 1, slice(-2, None)])\n expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]]\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(equivalent, expected)\n\n\ndef test_invalid_argument(slice_test_grouped):\n # Test for error on invalid argument\n\n with pytest.raises(TypeError, match="Invalid index"):\n slice_test_grouped.nth(3.14)\n\n\ndef test_negative_step(slice_test_grouped):\n # Test for error on negative slice step\n\n with pytest.raises(ValueError, match="Invalid step"):\n slice_test_grouped.nth(slice(None, None, -1))\n\n\ndef test_np_ints(slice_test_df, slice_test_grouped):\n # Test np ints work\n\n result = slice_test_grouped.nth(np.array([0, 1]))\n expected = slice_test_df.iloc[[0, 1, 2, 3, 4]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_nth_with_column_axis():\n # GH43926\n df = DataFrame(\n [\n [4, 5, 6],\n [8, 8, 7],\n ],\n index=["z", "y"],\n columns=["C", "B", "A"],\n )\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby(df.iloc[1], axis=1)\n result = gb.nth(0)\n expected = df.iloc[:, [0, 2]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_nth_interval():\n # GH#24205\n idx_result = MultiIndex(\n [\n pd.CategoricalIndex([pd.Interval(0, 1), pd.Interval(1, 2)]),\n pd.CategoricalIndex([pd.Interval(0, 10), pd.Interval(10, 20)]),\n ],\n [[0, 0, 0, 1, 1], [0, 1, 1, 0, -1]],\n )\n df_result = DataFrame({"col": range(len(idx_result))}, index=idx_result)\n result = df_result.groupby(level=[0, 1], observed=False).nth(0)\n val_expected = [0, 1, 3]\n idx_expected = MultiIndex(\n [\n pd.CategoricalIndex([pd.Interval(0, 1), pd.Interval(1, 2)]),\n pd.CategoricalIndex([pd.Interval(0, 10), pd.Interval(10, 20)]),\n ],\n [[0, 0, 1], [0, 1, 0]],\n )\n expected = DataFrame(val_expected, index=idx_expected, columns=["col"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "start, stop, expected_values, expected_columns",\n [\n (None, None, [0, 1, 2, 3, 4], list("ABCDE")),\n (None, 1, [0, 3], list("AD")),\n (None, 9, [0, 1, 2, 3, 4], list("ABCDE")),\n (None, -1, [0, 1, 3], list("ABD")),\n (1, None, [1, 2, 4], list("BCE")),\n (1, -1, [1], list("B")),\n (-1, None, [2, 4], list("CE")),\n (-1, 2, [4], list("E")),\n ],\n)\n@pytest.mark.parametrize("method", ["call", "index"])\ndef test_nth_slices_with_column_axis(\n start, stop, expected_values, expected_columns, method\n):\n df = DataFrame([range(5)], columns=[list("ABCDE")])\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby([5, 5, 5, 6, 6], axis=1)\n result = {\n "call": lambda start, stop: gb.nth(slice(start, stop)),\n "index": lambda start, stop: gb.nth[start:stop],\n }[method](start, stop)\n expected = DataFrame([expected_values], columns=[expected_columns])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.filterwarnings(\n "ignore:invalid value encountered in remainder:RuntimeWarning"\n)\ndef test_head_tail_dropna_true():\n # GH#45089\n df = DataFrame(\n [["a", "z"], ["b", np.nan], ["c", np.nan], ["c", np.nan]], columns=["X", "Y"]\n )\n expected = DataFrame([["a", "z"]], columns=["X", "Y"])\n\n result = df.groupby(["X", "Y"]).head(n=1)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(["X", "Y"]).tail(n=1)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(["X", "Y"]).nth(n=0)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_head_tail_dropna_false():\n # GH#45089\n df = DataFrame([["a", "z"], ["b", np.nan], ["c", np.nan]], columns=["X", "Y"])\n expected = DataFrame([["a", "z"], ["b", np.nan], ["c", np.nan]], columns=["X", "Y"])\n\n result = df.groupby(["X", "Y"], dropna=False).head(n=1)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(["X", "Y"], dropna=False).tail(n=1)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(["X", "Y"], dropna=False).nth(n=0)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("selection", ("b", ["b"], ["b", "c"]))\n@pytest.mark.parametrize("dropna", ["any", "all", None])\ndef test_nth_after_selection(selection, dropna):\n # GH#11038, GH#53518\n df = DataFrame(\n {\n "a": [1, 1, 2],\n "b": [np.nan, 3, 4],\n "c": [5, 6, 7],\n }\n )\n gb = df.groupby("a")[selection]\n result = gb.nth(0, dropna=dropna)\n if dropna == "any" or (dropna == "all" and selection != ["b", "c"]):\n locs = [1, 2]\n else:\n locs = [0, 2]\n expected = df.loc[locs, selection]\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data",\n [\n (\n Timestamp("2011-01-15 12:50:28.502376"),\n Timestamp("2011-01-20 12:50:28.593448"),\n ),\n (24650000000000001, 24650000000000002),\n ],\n)\ndef test_groupby_nth_int_like_precision(data):\n # GH#6620, GH#9311\n df = DataFrame({"a": [1, 1], "b": data})\n\n grouped = df.groupby("a")\n result = grouped.nth(0)\n expected = DataFrame({"a": 1, "b": [data[0]]})\n\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_nth.py | test_nth.py | Python | 28,225 | 0.95 | 0.060738 | 0.065385 | node-utils | 338 | 2025-02-07T13:23:48.184164 | MIT | true | 83e90058da39fdb905142d279fb94eb1 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]\n)\n@pytest.mark.parametrize(\n "a_vals,b_vals",\n [\n # Ints\n ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]),\n ([1, 2, 3, 4], [4, 3, 2, 1]),\n ([1, 2, 3, 4, 5], [4, 3, 2, 1]),\n # Floats\n ([1.0, 2.0, 3.0, 4.0, 5.0], [5.0, 4.0, 3.0, 2.0, 1.0]),\n # Missing data\n ([1.0, np.nan, 3.0, np.nan, 5.0], [5.0, np.nan, 3.0, np.nan, 1.0]),\n ([np.nan, 4.0, np.nan, 2.0, np.nan], [np.nan, 4.0, np.nan, 2.0, np.nan]),\n # Timestamps\n (\n pd.date_range("1/1/18", freq="D", periods=5),\n pd.date_range("1/1/18", freq="D", periods=5)[::-1],\n ),\n (\n pd.date_range("1/1/18", freq="D", periods=5).as_unit("s"),\n pd.date_range("1/1/18", freq="D", periods=5)[::-1].as_unit("s"),\n ),\n # All NA\n ([np.nan] * 5, [np.nan] * 5),\n ],\n)\n@pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1])\ndef test_quantile(interpolation, a_vals, b_vals, q, request):\n if (\n interpolation == "nearest"\n and q == 0.5\n and isinstance(b_vals, list)\n and b_vals == [4, 3, 2, 1]\n ):\n request.applymarker(\n pytest.mark.xfail(\n reason="Unclear numpy expectation for nearest "\n "result with equidistant data"\n )\n )\n all_vals = pd.concat([pd.Series(a_vals), pd.Series(b_vals)])\n\n a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation)\n b_expected = pd.Series(b_vals).quantile(q, interpolation=interpolation)\n\n df = DataFrame({"key": ["a"] * len(a_vals) + ["b"] * len(b_vals), "val": all_vals})\n\n expected = DataFrame(\n [a_expected, b_expected], columns=["val"], index=Index(["a", "b"], name="key")\n )\n if all_vals.dtype.kind == "M" and expected.dtypes.values[0].kind == "M":\n # TODO(non-nano): this should be unnecessary once array_to_datetime\n # correctly infers non-nano from Timestamp.unit\n expected = expected.astype(all_vals.dtype)\n result = df.groupby("key").quantile(q, interpolation=interpolation)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_quantile_array():\n # https://github.com/pandas-dev/pandas/issues/27526\n df = DataFrame({"A": [0, 1, 2, 3, 4]})\n key = np.array([0, 0, 1, 1, 1], dtype=np.int64)\n result = df.groupby(key).quantile([0.25])\n\n index = pd.MultiIndex.from_product([[0, 1], [0.25]])\n expected = DataFrame({"A": [0.25, 2.50]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]})\n index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]])\n\n key = np.array([0, 0, 1, 1], dtype=np.int64)\n result = df.groupby(key).quantile([0.25, 0.75])\n expected = DataFrame(\n {"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_quantile_array2():\n # https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959\n arr = np.random.default_rng(2).integers(0, 5, size=(10, 3), dtype=np.int64)\n df = DataFrame(arr, columns=list("ABC"))\n result = df.groupby("A").quantile([0.3, 0.7])\n expected = DataFrame(\n {\n "B": [2.0, 2.0, 2.3, 2.7, 0.3, 0.7, 3.2, 4.0, 0.3, 0.7],\n "C": [1.0, 1.0, 1.9, 3.0999999999999996, 0.3, 0.7, 2.6, 3.0, 1.2, 2.8],\n },\n index=pd.MultiIndex.from_product(\n [[0, 1, 2, 3, 4], [0.3, 0.7]], names=["A", None]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_quantile_array_no_sort():\n df = DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]})\n key = np.array([1, 0, 1], dtype=np.int64)\n result = df.groupby(key, sort=False).quantile([0.25, 0.5, 0.75])\n expected = DataFrame(\n {"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]},\n index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]),\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(key, sort=False).quantile([0.75, 0.25])\n expected = DataFrame(\n {"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]},\n index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_quantile_array_multiple_levels():\n df = DataFrame(\n {"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]}\n )\n result = df.groupby(["c", "d"]).quantile([0.25, 0.75])\n index = pd.MultiIndex.from_tuples(\n [("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)],\n names=["c", "d", None],\n )\n expected = DataFrame(\n {"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("frame_size", [(2, 3), (100, 10)])\n@pytest.mark.parametrize("groupby", [[0], [0, 1]])\n@pytest.mark.parametrize("q", [[0.5, 0.6]])\ndef test_groupby_quantile_with_arraylike_q_and_int_columns(frame_size, groupby, q):\n # GH30289\n nrow, ncol = frame_size\n df = DataFrame(np.array([ncol * [_ % 4] for _ in range(nrow)]), columns=range(ncol))\n\n idx_levels = [np.arange(min(nrow, 4))] * len(groupby) + [q]\n idx_codes = [[x for x in range(min(nrow, 4)) for _ in q]] * len(groupby) + [\n list(range(len(q))) * min(nrow, 4)\n ]\n expected_index = pd.MultiIndex(\n levels=idx_levels, codes=idx_codes, names=groupby + [None]\n )\n expected_values = [\n [float(x)] * (ncol - len(groupby)) for x in range(min(nrow, 4)) for _ in q\n ]\n expected_columns = [x for x in range(ncol) if x not in groupby]\n expected = DataFrame(\n expected_values, index=expected_index, columns=expected_columns\n )\n result = df.groupby(groupby).quantile(q)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_quantile_raises():\n df = DataFrame([["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"])\n\n msg = "dtype '(object|str)' does not support operation 'quantile'"\n with pytest.raises(TypeError, match=msg):\n df.groupby("key").quantile()\n\n\ndef test_quantile_out_of_bounds_q_raises():\n # https://github.com/pandas-dev/pandas/issues/27470\n df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)})\n g = df.groupby([0, 0, 0, 1, 1, 1])\n with pytest.raises(ValueError, match="Got '50.0' instead"):\n g.quantile(50)\n\n with pytest.raises(ValueError, match="Got '-1.0' instead"):\n g.quantile(-1)\n\n\ndef test_quantile_missing_group_values_no_segfaults():\n # GH 28662\n data = np.array([1.0, np.nan, 1.0])\n df = DataFrame({"key": data, "val": range(3)})\n\n # Random segfaults; would have been guaranteed in loop\n grp = df.groupby("key")\n for _ in range(100):\n grp.quantile()\n\n\n@pytest.mark.parametrize(\n "key, val, expected_key, expected_val",\n [\n ([1.0, np.nan, 3.0, np.nan], range(4), [1.0, 3.0], [0.0, 2.0]),\n ([1.0, np.nan, 2.0, 2.0], range(4), [1.0, 2.0], [0.0, 2.5]),\n (["a", "b", "b", np.nan], range(4), ["a", "b"], [0, 1.5]),\n ([0], [42], [0], [42.0]),\n ([], [], np.array([], dtype="float64"), np.array([], dtype="float64")),\n ],\n)\ndef test_quantile_missing_group_values_correct_results(\n key, val, expected_key, expected_val\n):\n # GH 28662, GH 33200, GH 33569\n df = DataFrame({"key": key, "val": val})\n\n expected = DataFrame(\n expected_val, index=Index(expected_key, name="key"), columns=["val"]\n )\n\n grp = df.groupby("key")\n\n result = grp.quantile(0.5)\n tm.assert_frame_equal(result, expected)\n\n result = grp.quantile()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "values",\n [\n pd.array([1, 0, None] * 2, dtype="Int64"),\n pd.array([True, False, None] * 2, dtype="boolean"),\n ],\n)\n@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])\ndef test_groupby_quantile_nullable_array(values, q):\n # https://github.com/pandas-dev/pandas/issues/33136\n df = DataFrame({"a": ["x"] * 3 + ["y"] * 3, "b": values})\n result = df.groupby("a")["b"].quantile(q)\n\n if isinstance(q, list):\n idx = pd.MultiIndex.from_product((["x", "y"], q), names=["a", None])\n true_quantiles = [0.0, 0.5, 1.0]\n else:\n idx = Index(["x", "y"], name="a")\n true_quantiles = [0.5]\n\n expected = pd.Series(true_quantiles * 2, index=idx, name="b", dtype="Float64")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])\n@pytest.mark.parametrize("numeric_only", [True, False])\ndef test_groupby_quantile_raises_on_invalid_dtype(q, numeric_only):\n df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]})\n if numeric_only:\n result = df.groupby("a").quantile(q, numeric_only=numeric_only)\n expected = df.groupby("a")[["b"]].quantile(q)\n tm.assert_frame_equal(result, expected)\n else:\n msg = "dtype '.*' does not support operation 'quantile'"\n with pytest.raises(TypeError, match=msg):\n df.groupby("a").quantile(q, numeric_only=numeric_only)\n\n\ndef test_groupby_quantile_NA_float(any_float_dtype):\n # GH#42849\n df = DataFrame({"x": [1, 1], "y": [0.2, np.nan]}, dtype=any_float_dtype)\n result = df.groupby("x")["y"].quantile(0.5)\n exp_index = Index([1.0], dtype=any_float_dtype, name="x")\n\n if any_float_dtype in ["Float32", "Float64"]:\n expected_dtype = any_float_dtype\n else:\n expected_dtype = None\n\n expected = pd.Series([0.2], dtype=expected_dtype, index=exp_index, name="y")\n tm.assert_series_equal(result, expected)\n\n result = df.groupby("x")["y"].quantile([0.5, 0.75])\n expected = pd.Series(\n [0.2] * 2,\n index=pd.MultiIndex.from_product((exp_index, [0.5, 0.75]), names=["x", None]),\n name="y",\n dtype=expected_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_quantile_NA_int(any_int_ea_dtype):\n # GH#42849\n df = DataFrame({"x": [1, 1], "y": [2, 5]}, dtype=any_int_ea_dtype)\n result = df.groupby("x")["y"].quantile(0.5)\n expected = pd.Series(\n [3.5],\n dtype="Float64",\n index=Index([1], name="x", dtype=any_int_ea_dtype),\n name="y",\n )\n tm.assert_series_equal(expected, result)\n\n result = df.groupby("x").quantile(0.5)\n expected = DataFrame(\n {"y": 3.5}, dtype="Float64", index=Index([1], name="x", dtype=any_int_ea_dtype)\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "interpolation, val1, val2", [("lower", 2, 2), ("higher", 2, 3), ("nearest", 2, 2)]\n)\ndef test_groupby_quantile_all_na_group_masked(\n interpolation, val1, val2, any_numeric_ea_dtype\n):\n # GH#37493\n df = DataFrame(\n {"a": [1, 1, 1, 2], "b": [1, 2, 3, pd.NA]}, dtype=any_numeric_ea_dtype\n )\n result = df.groupby("a").quantile(q=[0.5, 0.7], interpolation=interpolation)\n expected = DataFrame(\n {"b": [val1, val2, pd.NA, pd.NA]},\n dtype=any_numeric_ea_dtype,\n index=pd.MultiIndex.from_arrays(\n [pd.Series([1, 1, 2, 2], dtype=any_numeric_ea_dtype), [0.5, 0.7, 0.5, 0.7]],\n names=["a", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("interpolation", ["midpoint", "linear"])\ndef test_groupby_quantile_all_na_group_masked_interp(\n interpolation, any_numeric_ea_dtype\n):\n # GH#37493\n df = DataFrame(\n {"a": [1, 1, 1, 2], "b": [1, 2, 3, pd.NA]}, dtype=any_numeric_ea_dtype\n )\n result = df.groupby("a").quantile(q=[0.5, 0.75], interpolation=interpolation)\n\n if any_numeric_ea_dtype == "Float32":\n expected_dtype = any_numeric_ea_dtype\n else:\n expected_dtype = "Float64"\n\n expected = DataFrame(\n {"b": [2.0, 2.5, pd.NA, pd.NA]},\n dtype=expected_dtype,\n index=pd.MultiIndex.from_arrays(\n [\n pd.Series([1, 1, 2, 2], dtype=any_numeric_ea_dtype),\n [0.5, 0.75, 0.5, 0.75],\n ],\n names=["a", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", ["Float64", "Float32"])\ndef test_groupby_quantile_allNA_column(dtype):\n # GH#42849\n df = DataFrame({"x": [1, 1], "y": [pd.NA] * 2}, dtype=dtype)\n result = df.groupby("x")["y"].quantile(0.5)\n expected = pd.Series(\n [np.nan], dtype=dtype, index=Index([1.0], dtype=dtype), name="y"\n )\n expected.index.name = "x"\n tm.assert_series_equal(expected, result)\n\n\ndef test_groupby_timedelta_quantile():\n # GH: 29485\n df = DataFrame(\n {"value": pd.to_timedelta(np.arange(4), unit="s"), "group": [1, 1, 2, 2]}\n )\n result = df.groupby("group").quantile(0.99)\n expected = DataFrame(\n {\n "value": [\n pd.Timedelta("0 days 00:00:00.990000"),\n pd.Timedelta("0 days 00:00:02.990000"),\n ]\n },\n index=Index([1, 2], name="group"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_columns_groupby_quantile():\n # GH 33795\n df = DataFrame(\n np.arange(12).reshape(3, -1),\n index=list("XYZ"),\n columns=pd.Series(list("ABAB"), name="col"),\n )\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby("col", axis=1)\n result = gb.quantile(q=[0.8, 0.2])\n expected = DataFrame(\n [\n [1.6, 0.4, 2.6, 1.4],\n [5.6, 4.4, 6.6, 5.4],\n [9.6, 8.4, 10.6, 9.4],\n ],\n index=list("XYZ"),\n columns=pd.MultiIndex.from_tuples(\n [("A", 0.8), ("A", 0.2), ("B", 0.8), ("B", 0.2)], names=["col", None]\n ),\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_timestamp_groupby_quantile(unit):\n # GH 33168\n dti = pd.date_range(\n start="2020-04-19 00:00:00", freq="1min", periods=100, tz="UTC", unit=unit\n ).floor("1h")\n df = DataFrame(\n {\n "timestamp": dti,\n "category": list(range(1, 101)),\n "value": list(range(101, 201)),\n }\n )\n\n result = df.groupby("timestamp").quantile([0.2, 0.8])\n\n mi = pd.MultiIndex.from_product([dti[::99], [0.2, 0.8]], names=("timestamp", None))\n expected = DataFrame(\n [\n {"category": 12.8, "value": 112.8},\n {"category": 48.2, "value": 148.2},\n {"category": 68.8, "value": 168.8},\n {"category": 92.2, "value": 192.2},\n ],\n index=mi,\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_quantile_dt64tz_period():\n # GH#51373\n dti = pd.date_range("2016-01-01", periods=1000)\n df = pd.Series(dti).to_frame().copy()\n df[1] = dti.tz_localize("US/Pacific")\n df[2] = dti.to_period("D")\n df[3] = dti - dti[0]\n df.iloc[-1] = pd.NaT\n\n by = np.tile(np.arange(5), 200)\n gb = df.groupby(by)\n\n result = gb.quantile(0.5)\n\n # Check that we match the group-by-group result\n exp = {i: df.iloc[i::5].quantile(0.5) for i in range(5)}\n expected = DataFrame(exp).T.infer_objects()\n expected.index = expected.index.astype(int)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_quantile_nonmulti_levels_order():\n # Non-regression test for GH #53009\n ind = pd.MultiIndex.from_tuples(\n [\n (0, "a", "B"),\n (0, "a", "A"),\n (0, "b", "B"),\n (0, "b", "A"),\n (1, "a", "B"),\n (1, "a", "A"),\n (1, "b", "B"),\n (1, "b", "A"),\n ],\n names=["sample", "cat0", "cat1"],\n )\n ser = pd.Series(range(8), index=ind)\n result = ser.groupby(level="cat1", sort=False).quantile([0.2, 0.8])\n\n qind = pd.MultiIndex.from_tuples(\n [("B", 0.2), ("B", 0.8), ("A", 0.2), ("A", 0.8)], names=["cat1", None]\n )\n expected = pd.Series([1.2, 4.8, 2.2, 5.8], index=qind)\n\n tm.assert_series_equal(result, expected)\n\n # We need to check that index levels are not sorted\n expected_levels = pd.core.indexes.frozen.FrozenList([["B", "A"], [0.2, 0.8]])\n tm.assert_equal(result.index.levels, expected_levels)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_quantile.py | test_quantile.py | Python | 16,372 | 0.95 | 0.078629 | 0.06506 | python-kit | 197 | 2024-10-19T04:56:16.001926 | BSD-3-Clause | true | 8a286f07292d9b20344f9825c6c33355 |
from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n NaT,\n Series,\n concat,\n)\nimport pandas._testing as tm\n\n\ndef test_rank_unordered_categorical_typeerror():\n # GH#51034 should be TypeError, not NotImplementedError\n cat = pd.Categorical([], ordered=False)\n ser = Series(cat)\n df = ser.to_frame()\n\n msg = "Cannot perform rank with non-ordered Categorical"\n\n gb = ser.groupby(cat, observed=False)\n with pytest.raises(TypeError, match=msg):\n gb.rank()\n\n gb2 = df.groupby(cat, observed=False)\n with pytest.raises(TypeError, match=msg):\n gb2.rank()\n\n\ndef test_rank_apply():\n lev1 = np.array(["a" * 10] * 100, dtype=object)\n lev2 = np.array(["b" * 10] * 130, dtype=object)\n lab1 = np.random.default_rng(2).integers(0, 100, size=500, dtype=int)\n lab2 = np.random.default_rng(2).integers(0, 130, size=500, dtype=int)\n\n df = DataFrame(\n {\n "value": np.random.default_rng(2).standard_normal(500),\n "key1": lev1.take(lab1),\n "key2": lev2.take(lab2),\n }\n )\n\n result = df.groupby(["key1", "key2"]).value.rank()\n\n expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]\n expected = concat(expected, axis=0)\n expected = expected.reindex(result.index)\n tm.assert_series_equal(result, expected)\n\n result = df.groupby(["key1", "key2"]).value.rank(pct=True)\n\n expected = [\n piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])\n ]\n expected = concat(expected, axis=0)\n expected = expected.reindex(result.index)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])\n@pytest.mark.parametrize(\n "vals",\n [\n np.array([2, 2, 8, 2, 6], dtype=dtype)\n for dtype in ["i8", "i4", "i2", "i1", "u8", "u4", "u2", "u1", "f8", "f4", "f2"]\n ]\n + [\n [\n pd.Timestamp("2018-01-02"),\n pd.Timestamp("2018-01-02"),\n pd.Timestamp("2018-01-08"),\n pd.Timestamp("2018-01-02"),\n pd.Timestamp("2018-01-06"),\n ],\n [\n pd.Timestamp("2018-01-02", tz="US/Pacific"),\n pd.Timestamp("2018-01-02", tz="US/Pacific"),\n pd.Timestamp("2018-01-08", tz="US/Pacific"),\n pd.Timestamp("2018-01-02", tz="US/Pacific"),\n pd.Timestamp("2018-01-06", tz="US/Pacific"),\n ],\n [\n pd.Timestamp("2018-01-02") - pd.Timestamp(0),\n pd.Timestamp("2018-01-02") - pd.Timestamp(0),\n pd.Timestamp("2018-01-08") - pd.Timestamp(0),\n pd.Timestamp("2018-01-02") - pd.Timestamp(0),\n pd.Timestamp("2018-01-06") - pd.Timestamp(0),\n ],\n [\n pd.Timestamp("2018-01-02").to_period("D"),\n pd.Timestamp("2018-01-02").to_period("D"),\n pd.Timestamp("2018-01-08").to_period("D"),\n pd.Timestamp("2018-01-02").to_period("D"),\n pd.Timestamp("2018-01-06").to_period("D"),\n ],\n ],\n ids=lambda x: type(x[0]),\n)\n@pytest.mark.parametrize(\n "ties_method,ascending,pct,exp",\n [\n ("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]),\n ("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),\n ("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]),\n ("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]),\n ("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]),\n ("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),\n ("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),\n ("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]),\n ("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]),\n ("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),\n ("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]),\n ("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]),\n ("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]),\n ("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),\n ("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]),\n ("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]),\n ("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]),\n ("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]),\n ("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),\n ("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]),\n ],\n)\ndef test_rank_args(grps, vals, ties_method, ascending, pct, exp):\n key = np.repeat(grps, len(vals))\n\n orig_vals = vals\n vals = list(vals) * len(grps)\n if isinstance(orig_vals, np.ndarray):\n vals = np.array(vals, dtype=orig_vals.dtype)\n\n df = DataFrame({"key": key, "val": vals})\n result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct)\n\n exp_df = DataFrame(exp * len(grps), columns=["val"])\n tm.assert_frame_equal(result, exp_df)\n\n\n@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])\n@pytest.mark.parametrize(\n "vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]]\n)\n@pytest.mark.parametrize(\n "ties_method,ascending,na_option,exp",\n [\n ("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),\n ("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]),\n ("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]),\n ("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),\n ("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]),\n ("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]),\n ("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]),\n ("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]),\n ("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]),\n ("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]),\n ("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]),\n ("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]),\n ("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]),\n ("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]),\n ("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]),\n ("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]),\n ("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]),\n ("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]),\n ("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]),\n ("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]),\n ("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]),\n ("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]),\n ("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]),\n ("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]),\n ("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]),\n ("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]),\n ("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]),\n ("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]),\n ("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]),\n ("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]),\n ],\n)\ndef test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):\n # GH 20561\n key = np.repeat(grps, len(vals))\n vals = vals * len(grps)\n df = DataFrame({"key": key, "val": vals})\n result = df.groupby("key").rank(\n method=ties_method, ascending=ascending, na_option=na_option\n )\n exp_df = DataFrame(exp * len(grps), columns=["val"])\n tm.assert_frame_equal(result, exp_df)\n\n\n@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])\n@pytest.mark.parametrize(\n "vals",\n [\n np.array([2, 2, np.nan, 8, 2, 6, np.nan, np.nan], dtype=dtype)\n for dtype in ["f8", "f4", "f2"]\n ]\n + [\n [\n pd.Timestamp("2018-01-02"),\n pd.Timestamp("2018-01-02"),\n np.nan,\n pd.Timestamp("2018-01-08"),\n pd.Timestamp("2018-01-02"),\n pd.Timestamp("2018-01-06"),\n np.nan,\n np.nan,\n ],\n [\n pd.Timestamp("2018-01-02", tz="US/Pacific"),\n pd.Timestamp("2018-01-02", tz="US/Pacific"),\n np.nan,\n pd.Timestamp("2018-01-08", tz="US/Pacific"),\n pd.Timestamp("2018-01-02", tz="US/Pacific"),\n pd.Timestamp("2018-01-06", tz="US/Pacific"),\n np.nan,\n np.nan,\n ],\n [\n pd.Timestamp("2018-01-02") - pd.Timestamp(0),\n pd.Timestamp("2018-01-02") - pd.Timestamp(0),\n np.nan,\n pd.Timestamp("2018-01-08") - pd.Timestamp(0),\n pd.Timestamp("2018-01-02") - pd.Timestamp(0),\n pd.Timestamp("2018-01-06") - pd.Timestamp(0),\n np.nan,\n np.nan,\n ],\n [\n pd.Timestamp("2018-01-02").to_period("D"),\n pd.Timestamp("2018-01-02").to_period("D"),\n np.nan,\n pd.Timestamp("2018-01-08").to_period("D"),\n pd.Timestamp("2018-01-02").to_period("D"),\n pd.Timestamp("2018-01-06").to_period("D"),\n np.nan,\n np.nan,\n ],\n ],\n ids=lambda x: type(x[0]),\n)\n@pytest.mark.parametrize(\n "ties_method,ascending,na_option,pct,exp",\n [\n (\n "average",\n True,\n "keep",\n False,\n [2.0, 2.0, np.nan, 5.0, 2.0, 4.0, np.nan, np.nan],\n ),\n (\n "average",\n True,\n "keep",\n True,\n [0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan],\n ),\n (\n "average",\n False,\n "keep",\n False,\n [4.0, 4.0, np.nan, 1.0, 4.0, 2.0, np.nan, np.nan],\n ),\n (\n "average",\n False,\n "keep",\n True,\n [0.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan],\n ),\n ("min", True, "keep", False, [1.0, 1.0, np.nan, 5.0, 1.0, 4.0, np.nan, np.nan]),\n ("min", True, "keep", True, [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),\n (\n "min",\n False,\n "keep",\n False,\n [3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],\n ),\n ("min", False, "keep", True, [0.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),\n ("max", True, "keep", False, [3.0, 3.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan]),\n ("max", True, "keep", True, [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),\n (\n "max",\n False,\n "keep",\n False,\n [5.0, 5.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],\n ),\n ("max", False, "keep", True, [1.0, 1.0, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan]),\n (\n "first",\n True,\n "keep",\n False,\n [1.0, 2.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan],\n ),\n (\n "first",\n True,\n "keep",\n True,\n [0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan],\n ),\n (\n "first",\n False,\n "keep",\n False,\n [3.0, 4.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],\n ),\n (\n "first",\n False,\n "keep",\n True,\n [0.6, 0.8, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan],\n ),\n (\n "dense",\n True,\n "keep",\n False,\n [1.0, 1.0, np.nan, 3.0, 1.0, 2.0, np.nan, np.nan],\n ),\n (\n "dense",\n True,\n "keep",\n True,\n [\n 1.0 / 3.0,\n 1.0 / 3.0,\n np.nan,\n 3.0 / 3.0,\n 1.0 / 3.0,\n 2.0 / 3.0,\n np.nan,\n np.nan,\n ],\n ),\n (\n "dense",\n False,\n "keep",\n False,\n [3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],\n ),\n (\n "dense",\n False,\n "keep",\n True,\n [\n 3.0 / 3.0,\n 3.0 / 3.0,\n np.nan,\n 1.0 / 3.0,\n 3.0 / 3.0,\n 2.0 / 3.0,\n np.nan,\n np.nan,\n ],\n ),\n ("average", True, "bottom", False, [2.0, 2.0, 7.0, 5.0, 2.0, 4.0, 7.0, 7.0]),\n (\n "average",\n True,\n "bottom",\n True,\n [0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875],\n ),\n ("average", False, "bottom", False, [4.0, 4.0, 7.0, 1.0, 4.0, 2.0, 7.0, 7.0]),\n (\n "average",\n False,\n "bottom",\n True,\n [0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875],\n ),\n ("min", True, "bottom", False, [1.0, 1.0, 6.0, 5.0, 1.0, 4.0, 6.0, 6.0]),\n (\n "min",\n True,\n "bottom",\n True,\n [0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75],\n ),\n ("min", False, "bottom", False, [3.0, 3.0, 6.0, 1.0, 3.0, 2.0, 6.0, 6.0]),\n (\n "min",\n False,\n "bottom",\n True,\n [0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75],\n ),\n ("max", True, "bottom", False, [3.0, 3.0, 8.0, 5.0, 3.0, 4.0, 8.0, 8.0]),\n ("max", True, "bottom", True, [0.375, 0.375, 1.0, 0.625, 0.375, 0.5, 1.0, 1.0]),\n ("max", False, "bottom", False, [5.0, 5.0, 8.0, 1.0, 5.0, 2.0, 8.0, 8.0]),\n (\n "max",\n False,\n "bottom",\n True,\n [0.625, 0.625, 1.0, 0.125, 0.625, 0.25, 1.0, 1.0],\n ),\n ("first", True, "bottom", False, [1.0, 2.0, 6.0, 5.0, 3.0, 4.0, 7.0, 8.0]),\n (\n "first",\n True,\n "bottom",\n True,\n [0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.0],\n ),\n ("first", False, "bottom", False, [3.0, 4.0, 6.0, 1.0, 5.0, 2.0, 7.0, 8.0]),\n (\n "first",\n False,\n "bottom",\n True,\n [0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.0],\n ),\n ("dense", True, "bottom", False, [1.0, 1.0, 4.0, 3.0, 1.0, 2.0, 4.0, 4.0]),\n ("dense", True, "bottom", True, [0.25, 0.25, 1.0, 0.75, 0.25, 0.5, 1.0, 1.0]),\n ("dense", False, "bottom", False, [3.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 4.0]),\n ("dense", False, "bottom", True, [0.75, 0.75, 1.0, 0.25, 0.75, 0.5, 1.0, 1.0]),\n ],\n)\ndef test_rank_args_missing(grps, vals, ties_method, ascending, na_option, pct, exp):\n key = np.repeat(grps, len(vals))\n\n orig_vals = vals\n vals = list(vals) * len(grps)\n if isinstance(orig_vals, np.ndarray):\n vals = np.array(vals, dtype=orig_vals.dtype)\n\n df = DataFrame({"key": key, "val": vals})\n result = df.groupby("key").rank(\n method=ties_method, ascending=ascending, na_option=na_option, pct=pct\n )\n\n exp_df = DataFrame(exp * len(grps), columns=["val"])\n tm.assert_frame_equal(result, exp_df)\n\n\n@pytest.mark.parametrize(\n "pct,exp", [(False, [3.0, 3.0, 3.0, 3.0, 3.0]), (True, [0.6, 0.6, 0.6, 0.6, 0.6])]\n)\ndef test_rank_resets_each_group(pct, exp):\n df = DataFrame(\n {"key": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], "val": [1] * 10}\n )\n result = df.groupby("key").rank(pct=pct)\n exp_df = DataFrame(exp * 2, columns=["val"])\n tm.assert_frame_equal(result, exp_df)\n\n\n@pytest.mark.parametrize(\n "dtype", ["int64", "int32", "uint64", "uint32", "float64", "float32"]\n)\n@pytest.mark.parametrize("upper", [True, False])\ndef test_rank_avg_even_vals(dtype, upper):\n if upper:\n # use IntegerDtype/FloatingDtype\n dtype = dtype[0].upper() + dtype[1:]\n dtype = dtype.replace("Ui", "UI")\n df = DataFrame({"key": ["a"] * 4, "val": [1] * 4})\n df["val"] = df["val"].astype(dtype)\n assert df["val"].dtype == dtype\n\n result = df.groupby("key").rank()\n exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"])\n if upper:\n exp_df = exp_df.astype("Float64")\n tm.assert_frame_equal(result, exp_df)\n\n\n@pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"])\n@pytest.mark.parametrize("ascending", [True, False])\n@pytest.mark.parametrize("na_option", ["keep", "top", "bottom"])\n@pytest.mark.parametrize("pct", [True, False])\n@pytest.mark.parametrize(\n "vals", [["bar", "bar", "foo", "bar", "baz"], ["bar", np.nan, "foo", np.nan, "baz"]]\n)\ndef test_rank_object_dtype(ties_method, ascending, na_option, pct, vals):\n df = DataFrame({"key": ["foo"] * 5, "val": vals})\n mask = df["val"].isna()\n\n gb = df.groupby("key")\n res = gb.rank(method=ties_method, ascending=ascending, na_option=na_option, pct=pct)\n\n # construct our expected by using numeric values with the same ordering\n if mask.any():\n df2 = DataFrame({"key": ["foo"] * 5, "val": [0, np.nan, 2, np.nan, 1]})\n else:\n df2 = DataFrame({"key": ["foo"] * 5, "val": [0, 0, 2, 0, 1]})\n\n gb2 = df2.groupby("key")\n alt = gb2.rank(\n method=ties_method, ascending=ascending, na_option=na_option, pct=pct\n )\n\n tm.assert_frame_equal(res, alt)\n\n\n@pytest.mark.parametrize("na_option", [True, "bad", 1])\n@pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"])\n@pytest.mark.parametrize("ascending", [True, False])\n@pytest.mark.parametrize("pct", [True, False])\n@pytest.mark.parametrize(\n "vals",\n [\n ["bar", "bar", "foo", "bar", "baz"],\n ["bar", np.nan, "foo", np.nan, "baz"],\n [1, np.nan, 2, np.nan, 3],\n ],\n)\ndef test_rank_naoption_raises(ties_method, ascending, na_option, pct, vals):\n df = DataFrame({"key": ["foo"] * 5, "val": vals})\n msg = "na_option must be one of 'keep', 'top', or 'bottom'"\n\n with pytest.raises(ValueError, match=msg):\n df.groupby("key").rank(\n method=ties_method, ascending=ascending, na_option=na_option, pct=pct\n )\n\n\ndef test_rank_empty_group():\n # see gh-22519\n column = "A"\n df = DataFrame({"A": [0, 1, 0], "B": [1.0, np.nan, 2.0]})\n\n result = df.groupby(column).B.rank(pct=True)\n expected = Series([0.5, np.nan, 1.0], name="B")\n tm.assert_series_equal(result, expected)\n\n result = df.groupby(column).rank(pct=True)\n expected = DataFrame({"B": [0.5, np.nan, 1.0]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "input_key,input_value,output_value",\n [\n ([1, 2], [1, 1], [1.0, 1.0]),\n ([1, 1, 2, 2], [1, 2, 1, 2], [0.5, 1.0, 0.5, 1.0]),\n ([1, 1, 2, 2], [1, 2, 1, np.nan], [0.5, 1.0, 1.0, np.nan]),\n ([1, 1, 2], [1, 2, np.nan], [0.5, 1.0, np.nan]),\n ],\n)\ndef test_rank_zero_div(input_key, input_value, output_value):\n # GH 23666\n df = DataFrame({"A": input_key, "B": input_value})\n\n result = df.groupby("A").rank(method="dense", pct=True)\n expected = DataFrame({"B": output_value})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rank_min_int():\n # GH-32859\n df = DataFrame(\n {\n "grp": [1, 1, 2],\n "int_col": [\n np.iinfo(np.int64).min,\n np.iinfo(np.int64).max,\n np.iinfo(np.int64).min,\n ],\n "datetimelike": [NaT, datetime(2001, 1, 1), NaT],\n }\n )\n\n result = df.groupby("grp").rank()\n expected = DataFrame(\n {"int_col": [1.0, 2.0, 1.0], "datetimelike": [np.nan, 1.0, np.nan]}\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("use_nan", [True, False])\ndef test_rank_pct_equal_values_on_group_transition(use_nan):\n # GH#40518\n fill_value = np.nan if use_nan else 3\n df = DataFrame(\n [\n [-1, 1],\n [-1, 2],\n [1, fill_value],\n [-1, fill_value],\n ],\n columns=["group", "val"],\n )\n result = df.groupby(["group"])["val"].rank(\n method="dense",\n pct=True,\n )\n if use_nan:\n expected = Series([0.5, 1, np.nan, np.nan], name="val")\n else:\n expected = Series([1 / 3, 2 / 3, 1, 1], name="val")\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_rank_multiindex():\n # GH27721\n df = concat(\n {\n "a": DataFrame({"col1": [3, 4], "col2": [1, 2]}),\n "b": DataFrame({"col3": [5, 6], "col4": [7, 8]}),\n },\n axis=1,\n )\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby(level=0, axis=1)\n msg = "DataFrameGroupBy.rank with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = gb.rank(axis=1)\n\n expected = concat(\n [\n df["a"].rank(axis=1),\n df["b"].rank(axis=1),\n ],\n axis=1,\n keys=["a", "b"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_axis0_rank_axis1():\n # GH#41320\n df = DataFrame(\n {0: [1, 3, 5, 7], 1: [2, 4, 6, 8], 2: [1.5, 3.5, 5.5, 7.5]},\n index=["a", "a", "b", "b"],\n )\n msg = "The 'axis' keyword in DataFrame.groupby is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby(level=0, axis=0)\n\n msg = "DataFrameGroupBy.rank with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = gb.rank(axis=1)\n\n # This should match what we get when "manually" operating group-by-group\n expected = concat([df.loc["a"].rank(axis=1), df.loc["b"].rank(axis=1)], axis=0)\n tm.assert_frame_equal(res, expected)\n\n # check that we haven't accidentally written a case that coincidentally\n # matches rank(axis=0)\n msg = "The 'axis' keyword in DataFrameGroupBy.rank"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n alt = gb.rank(axis=0)\n assert not alt.equals(expected)\n\n\ndef test_groupby_axis0_cummax_axis1():\n # case where groupby axis is 0 and axis keyword in transform is 1\n\n # df has mixed dtype -> multiple blocks\n df = DataFrame(\n {0: [1, 3, 5, 7], 1: [2, 4, 6, 8], 2: [1.5, 3.5, 5.5, 7.5]},\n index=["a", "a", "b", "b"],\n )\n msg = "The 'axis' keyword in DataFrame.groupby is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby(level=0, axis=0)\n\n msg = "DataFrameGroupBy.cummax with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n cmax = gb.cummax(axis=1)\n expected = df[[0, 1]].astype(np.float64)\n expected[2] = expected[1]\n tm.assert_frame_equal(cmax, expected)\n\n\ndef test_non_unique_index():\n # GH 16577\n df = DataFrame(\n {"A": [1.0, 2.0, 3.0, np.nan], "value": 1.0},\n index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4,\n )\n result = df.groupby([df.index, "A"]).value.rank(ascending=True, pct=True)\n expected = Series(\n [1.0, 1.0, 1.0, np.nan],\n index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4,\n name="value",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_rank_categorical():\n cat = pd.Categorical(["a", "a", "b", np.nan, "c", "b"], ordered=True)\n cat2 = pd.Categorical([1, 2, 3, np.nan, 4, 5], ordered=True)\n\n df = DataFrame({"col1": [0, 1, 0, 1, 0, 1], "col2": cat, "col3": cat2})\n\n gb = df.groupby("col1")\n\n res = gb.rank()\n\n expected = df.astype(object).groupby("col1").rank()\n tm.assert_frame_equal(res, expected)\n\n\n@pytest.mark.parametrize("na_option", ["top", "bottom"])\ndef test_groupby_op_with_nullables(na_option):\n # GH 54206\n df = DataFrame({"x": [None]}, dtype="Float64")\n result = df.groupby("x", dropna=False)["x"].rank(method="min", na_option=na_option)\n expected = Series([1.0], dtype="Float64", name=result.name)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_rank.py | test_rank.py | Python | 24,263 | 0.95 | 0.041609 | 0.026398 | awesome-app | 673 | 2023-10-22T06:20:20.157925 | GPL-3.0 | true | 17fa5a98ba5eea79a78ae1b1c3bd7f47 |
import pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])\ndef test_groupby_sample_balanced_groups_shape(n, frac):\n values = [1] * 10 + [2] * 10\n df = DataFrame({"a": values, "b": values})\n\n result = df.groupby("a").sample(n=n, frac=frac)\n values = [1] * 2 + [2] * 2\n expected = DataFrame({"a": values, "b": values}, index=result.index)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("a")["b"].sample(n=n, frac=frac)\n expected = Series(values, name="b", index=result.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_sample_unbalanced_groups_shape():\n values = [1] * 10 + [2] * 20\n df = DataFrame({"a": values, "b": values})\n\n result = df.groupby("a").sample(n=5)\n values = [1] * 5 + [2] * 5\n expected = DataFrame({"a": values, "b": values}, index=result.index)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("a")["b"].sample(n=5)\n expected = Series(values, name="b", index=result.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_sample_index_value_spans_groups():\n values = [1] * 3 + [2] * 3\n df = DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2])\n\n result = df.groupby("a").sample(n=2)\n values = [1] * 2 + [2] * 2\n expected = DataFrame({"a": values, "b": values}, index=result.index)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("a")["b"].sample(n=2)\n expected = Series(values, name="b", index=result.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_sample_n_and_frac_raises():\n df = DataFrame({"a": [1, 2], "b": [1, 2]})\n msg = "Please enter a value for `frac` OR `n`, not both"\n\n with pytest.raises(ValueError, match=msg):\n df.groupby("a").sample(n=1, frac=1.0)\n\n with pytest.raises(ValueError, match=msg):\n df.groupby("a")["b"].sample(n=1, frac=1.0)\n\n\ndef test_groupby_sample_frac_gt_one_without_replacement_raises():\n df = DataFrame({"a": [1, 2], "b": [1, 2]})\n msg = "Replace has to be set to `True` when upsampling the population `frac` > 1."\n\n with pytest.raises(ValueError, match=msg):\n df.groupby("a").sample(frac=1.5, replace=False)\n\n with pytest.raises(ValueError, match=msg):\n df.groupby("a")["b"].sample(frac=1.5, replace=False)\n\n\n@pytest.mark.parametrize("n", [-1, 1.5])\ndef test_groupby_sample_invalid_n_raises(n):\n df = DataFrame({"a": [1, 2], "b": [1, 2]})\n\n if n < 0:\n msg = "A negative number of rows requested. Please provide `n` >= 0."\n else:\n msg = "Only integers accepted as `n` values"\n\n with pytest.raises(ValueError, match=msg):\n df.groupby("a").sample(n=n)\n\n with pytest.raises(ValueError, match=msg):\n df.groupby("a")["b"].sample(n=n)\n\n\ndef test_groupby_sample_oversample():\n values = [1] * 10 + [2] * 10\n df = DataFrame({"a": values, "b": values})\n\n result = df.groupby("a").sample(frac=2.0, replace=True)\n values = [1] * 20 + [2] * 20\n expected = DataFrame({"a": values, "b": values}, index=result.index)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("a")["b"].sample(frac=2.0, replace=True)\n expected = Series(values, name="b", index=result.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_sample_without_n_or_frac():\n values = [1] * 10 + [2] * 10\n df = DataFrame({"a": values, "b": values})\n\n result = df.groupby("a").sample(n=None, frac=None)\n expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=result.index)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("a")["b"].sample(n=None, frac=None)\n expected = Series([1, 2], name="b", index=result.index)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "index, expected_index",\n [(["w", "x", "y", "z"], ["w", "w", "y", "y"]), ([3, 4, 5, 6], [3, 3, 5, 5])],\n)\ndef test_groupby_sample_with_weights(index, expected_index):\n # GH 39927 - tests for integer index needed\n values = [1] * 2 + [2] * 2\n df = DataFrame({"a": values, "b": values}, index=Index(index))\n\n result = df.groupby("a").sample(n=2, replace=True, weights=[1, 0, 1, 0])\n expected = DataFrame({"a": values, "b": values}, index=Index(expected_index))\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("a")["b"].sample(n=2, replace=True, weights=[1, 0, 1, 0])\n expected = Series(values, name="b", index=Index(expected_index))\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_sample_with_selections():\n # GH 39928\n values = [1] * 10 + [2] * 10\n df = DataFrame({"a": values, "b": values, "c": values})\n\n result = df.groupby("a")[["b", "c"]].sample(n=None, frac=None)\n expected = DataFrame({"b": [1, 2], "c": [1, 2]}, index=result.index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_sample_with_empty_inputs():\n # GH48459\n df = DataFrame({"a": [], "b": []})\n groupby_df = df.groupby("a")\n\n result = groupby_df.sample()\n expected = df\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_sample.py | test_sample.py | Python | 5,155 | 0.95 | 0.090909 | 0.027273 | node-utils | 246 | 2025-06-16T14:34:55.430407 | BSD-3-Clause | true | cc687788e1de1cb29fed9103d8c428dd |
import numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_integer_dtype\n\nfrom pandas import (\n DataFrame,\n Index,\n PeriodIndex,\n Series,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize("by", ["A", "B", ["A", "B"]])\ndef test_size(df, by):\n grouped = df.groupby(by=by)\n result = grouped.size()\n for key, group in grouped:\n assert result[key] == len(group)\n\n\n@pytest.mark.parametrize(\n "by",\n [\n [0, 0, 0, 0],\n [0, 1, 1, 1],\n [1, 0, 1, 1],\n [0, None, None, None],\n pytest.param([None, None, None, None], marks=pytest.mark.xfail),\n ],\n)\ndef test_size_axis_1(df, axis_1, by, sort, dropna):\n # GH#45715\n counts = {key: sum(value == key for value in by) for key in dict.fromkeys(by)}\n if dropna:\n counts = {key: value for key, value in counts.items() if key is not None}\n expected = Series(counts, dtype="int64")\n if sort:\n expected = expected.sort_index()\n if is_integer_dtype(expected.index.dtype) and not any(x is None for x in by):\n expected.index = expected.index.astype(int)\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouped = df.groupby(by=by, axis=axis_1, sort=sort, dropna=dropna)\n result = grouped.size()\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("by", ["A", "B", ["A", "B"]])\n@pytest.mark.parametrize("sort", [True, False])\ndef test_size_sort(sort, by):\n df = DataFrame(np.random.default_rng(2).choice(20, (1000, 3)), columns=list("ABC"))\n left = df.groupby(by=by, sort=sort).size()\n right = df.groupby(by=by, sort=sort)["C"].apply(lambda a: a.shape[0])\n tm.assert_series_equal(left, right, check_names=False)\n\n\ndef test_size_series_dataframe():\n # https://github.com/pandas-dev/pandas/issues/11699\n df = DataFrame(columns=["A", "B"])\n out = Series(dtype="int64", index=Index([], name="A"))\n tm.assert_series_equal(df.groupby("A").size(), out)\n\n\ndef test_size_groupby_all_null():\n # https://github.com/pandas-dev/pandas/issues/23050\n # Assert no 'Value Error : Length of passed values is 2, index implies 0'\n df = DataFrame({"A": [None, None]}) # all-null groups\n result = df.groupby("A").size()\n expected = Series(dtype="int64", index=Index([], name="A"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_size_period_index():\n # https://github.com/pandas-dev/pandas/issues/34010\n ser = Series([1], index=PeriodIndex(["2000"], name="A", freq="D"))\n grp = ser.groupby(level="A")\n result = grp.size()\n tm.assert_series_equal(result, ser)\n\n\n@pytest.mark.parametrize("as_index", [True, False])\ndef test_size_on_categorical(as_index):\n df = DataFrame([[1, 1], [2, 2]], columns=["A", "B"])\n df["A"] = df["A"].astype("category")\n result = df.groupby(["A", "B"], as_index=as_index, observed=False).size()\n\n expected = DataFrame(\n [[1, 1, 1], [1, 2, 0], [2, 1, 0], [2, 2, 1]], columns=["A", "B", "size"]\n )\n expected["A"] = expected["A"].astype("category")\n if as_index:\n expected = expected.set_index(["A", "B"])["size"].rename(None)\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])\ndef test_size_series_masked_type_returns_Int64(dtype):\n # GH 54132\n ser = Series([1, 1, 1], index=["a", "a", "b"], dtype=dtype)\n result = ser.groupby(level=0).size()\n expected = Series([2, 1], dtype="Int64", index=["a", "b"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_size_strings(any_string_dtype, using_infer_string):\n # GH#55627\n dtype = any_string_dtype\n df = DataFrame({"a": ["a", "a", "b"], "b": "a"}, dtype=dtype)\n result = df.groupby("a")["b"].size()\n exp_dtype = "Int64" if dtype == "string[pyarrow]" else "int64"\n exp_index_dtype = "str" if using_infer_string and dtype == "object" else dtype\n expected = Series(\n [2, 1],\n index=Index(["a", "b"], name="a", dtype=exp_index_dtype),\n name="b",\n dtype=exp_dtype,\n )\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_size.py | test_size.py | Python | 4,138 | 0.95 | 0.172131 | 0.070707 | react-lib | 921 | 2024-04-21T18:18:41.351582 | GPL-3.0 | true | ffcd758e2705b3cb87ca128bf1a8b948 |
import numpy as np\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\ndef test_groupby_skew_equivalence():\n # Test that that groupby skew method (which uses libgroupby.group_skew)\n # matches the results of operating group-by-group (which uses nanops.nanskew)\n nrows = 1000\n ngroups = 3\n ncols = 2\n nan_frac = 0.05\n\n arr = np.random.default_rng(2).standard_normal((nrows, ncols))\n arr[np.random.default_rng(2).random(nrows) < nan_frac] = np.nan\n\n df = pd.DataFrame(arr)\n grps = np.random.default_rng(2).integers(0, ngroups, size=nrows)\n gb = df.groupby(grps)\n\n result = gb.skew()\n\n grpwise = [grp.skew().to_frame(i).T for i, grp in gb]\n expected = pd.concat(grpwise, axis=0)\n expected.index = expected.index.astype(result.index.dtype) # 32bit builds\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_skew.py | test_skew.py | Python | 841 | 0.95 | 0.074074 | 0.1 | awesome-app | 589 | 2023-07-14T20:40:39.048301 | GPL-3.0 | true | 640b13e0b88b143587e1f54b6a5d3c8e |
"""\nthese are systematically testing all of the args to value_counts\nwith different size combinations. This is to ensure stability of the sorting\nand proper parameter handling\n"""\n\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n Grouper,\n Index,\n MultiIndex,\n Series,\n date_range,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.util.version import Version\n\n\ndef tests_value_counts_index_names_category_column():\n # GH44324 Missing name of index category column\n df = DataFrame(\n {\n "gender": ["female"],\n "country": ["US"],\n }\n )\n df["gender"] = df["gender"].astype("category")\n result = df.groupby("country")["gender"].value_counts()\n\n # Construct expected, very specific multiindex\n df_mi_expected = DataFrame([["US", "female"]], columns=["country", "gender"])\n df_mi_expected["gender"] = df_mi_expected["gender"].astype("category")\n mi_expected = MultiIndex.from_frame(df_mi_expected)\n expected = Series([1], index=mi_expected, name="count")\n\n tm.assert_series_equal(result, expected)\n\n\ndef seed_df(seed_nans, n, m):\n days = date_range("2015-08-24", periods=10)\n\n frame = DataFrame(\n {\n "1st": np.random.default_rng(2).choice(list("abcd"), n),\n "2nd": np.random.default_rng(2).choice(days, n),\n "3rd": np.random.default_rng(2).integers(1, m + 1, n),\n }\n )\n\n if seed_nans:\n # Explicitly cast to float to avoid implicit cast when setting nan\n frame["3rd"] = frame["3rd"].astype("float")\n frame.loc[1::11, "1st"] = np.nan\n frame.loc[3::17, "2nd"] = np.nan\n frame.loc[7::19, "3rd"] = np.nan\n frame.loc[8::19, "3rd"] = np.nan\n frame.loc[9::19, "3rd"] = np.nan\n\n return frame\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize("seed_nans", [True, False])\n@pytest.mark.parametrize("num_rows", [10, 50])\n@pytest.mark.parametrize("max_int", [5, 20])\n@pytest.mark.parametrize("keys", ["1st", "2nd", ["1st", "2nd"]], ids=repr)\n@pytest.mark.parametrize("bins", [None, [0, 5]], ids=repr)\n@pytest.mark.parametrize("isort", [True, False])\n@pytest.mark.parametrize("normalize, name", [(True, "proportion"), (False, "count")])\n@pytest.mark.parametrize("sort", [True, False])\n@pytest.mark.parametrize("ascending", [True, False])\n@pytest.mark.parametrize("dropna", [True, False])\ndef test_series_groupby_value_counts(\n seed_nans,\n num_rows,\n max_int,\n keys,\n bins,\n isort,\n normalize,\n name,\n sort,\n ascending,\n dropna,\n):\n df = seed_df(seed_nans, num_rows, max_int)\n\n def rebuild_index(df):\n arr = list(map(df.index.get_level_values, range(df.index.nlevels)))\n df.index = MultiIndex.from_arrays(arr, names=df.index.names)\n return df\n\n kwargs = {\n "normalize": normalize,\n "sort": sort,\n "ascending": ascending,\n "dropna": dropna,\n "bins": bins,\n }\n\n gr = df.groupby(keys, sort=isort)\n left = gr["3rd"].value_counts(**kwargs)\n\n gr = df.groupby(keys, sort=isort)\n right = gr["3rd"].apply(Series.value_counts, **kwargs)\n right.index.names = right.index.names[:-1] + ["3rd"]\n # https://github.com/pandas-dev/pandas/issues/49909\n right = right.rename(name)\n\n # have to sort on index because of unstable sort on values\n left, right = map(rebuild_index, (left, right)) # xref GH9212\n tm.assert_series_equal(left.sort_index(), right.sort_index())\n\n\n@pytest.mark.parametrize("utc", [True, False])\ndef test_series_groupby_value_counts_with_grouper(utc):\n # GH28479\n df = DataFrame(\n {\n "Timestamp": [\n 1565083561,\n 1565083561 + 86400,\n 1565083561 + 86500,\n 1565083561 + 86400 * 2,\n 1565083561 + 86400 * 3,\n 1565083561 + 86500 * 3,\n 1565083561 + 86400 * 4,\n ],\n "Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"],\n }\n ).drop([3])\n\n df["Datetime"] = to_datetime(df["Timestamp"], utc=utc, unit="s")\n dfg = df.groupby(Grouper(freq="1D", key="Datetime"))\n\n # have to sort on index because of unstable sort on values xref GH9212\n result = dfg["Food"].value_counts().sort_index()\n expected = dfg["Food"].apply(Series.value_counts).sort_index()\n expected.index.names = result.index.names\n # https://github.com/pandas-dev/pandas/issues/49909\n expected = expected.rename("count")\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]])\ndef test_series_groupby_value_counts_empty(columns):\n # GH39172\n df = DataFrame(columns=columns)\n dfg = df.groupby(columns[:-1])\n\n result = dfg[columns[-1]].value_counts()\n expected = Series([], dtype=result.dtype, name="count")\n expected.index = MultiIndex.from_arrays([[]] * len(columns), names=columns)\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]])\ndef test_series_groupby_value_counts_one_row(columns):\n # GH42618\n df = DataFrame(data=[range(len(columns))], columns=columns)\n dfg = df.groupby(columns[:-1])\n\n result = dfg[columns[-1]].value_counts()\n expected = df.value_counts()\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_groupby_value_counts_on_categorical():\n # GH38672\n\n s = Series(Categorical(["a"], categories=["a", "b"]))\n result = s.groupby([0]).value_counts()\n\n expected = Series(\n data=[1, 0],\n index=MultiIndex.from_arrays(\n [\n np.array([0, 0]),\n CategoricalIndex(\n ["a", "b"], categories=["a", "b"], ordered=False, dtype="category"\n ),\n ]\n ),\n name="count",\n )\n\n # Expected:\n # 0 a 1\n # b 0\n # dtype: int64\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_groupby_value_counts_no_sort():\n # GH#50482\n df = DataFrame(\n {\n "gender": ["male", "male", "female", "male", "female", "male"],\n "education": ["low", "medium", "high", "low", "high", "low"],\n "country": ["US", "FR", "US", "FR", "FR", "FR"],\n }\n )\n gb = df.groupby(["country", "gender"], sort=False)["education"]\n result = gb.value_counts(sort=False)\n index = MultiIndex(\n levels=[["US", "FR"], ["male", "female"], ["low", "medium", "high"]],\n codes=[[0, 1, 0, 1, 1], [0, 0, 1, 0, 1], [0, 1, 2, 0, 2]],\n names=["country", "gender", "education"],\n )\n expected = Series([1, 1, 1, 2, 1], index=index, name="count")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.fixture\ndef education_df():\n return DataFrame(\n {\n "gender": ["male", "male", "female", "male", "female", "male"],\n "education": ["low", "medium", "high", "low", "high", "low"],\n "country": ["US", "FR", "US", "FR", "FR", "FR"],\n }\n )\n\n\ndef test_axis(education_df):\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gp = education_df.groupby("country", axis=1)\n with pytest.raises(NotImplementedError, match="axis"):\n gp.value_counts()\n\n\ndef test_bad_subset(education_df):\n gp = education_df.groupby("country")\n with pytest.raises(ValueError, match="subset"):\n gp.value_counts(subset=["country"])\n\n\ndef test_basic(education_df, request):\n # gh43564\n if Version(np.__version__) >= Version("1.25"):\n request.applymarker(\n pytest.mark.xfail(\n reason=(\n "pandas default unstable sorting of duplicates"\n "issue with numpy>=1.25 with AVX instructions"\n ),\n strict=False,\n )\n )\n result = education_df.groupby("country")[["gender", "education"]].value_counts(\n normalize=True\n )\n expected = Series(\n data=[0.5, 0.25, 0.25, 0.5, 0.5],\n index=MultiIndex.from_tuples(\n [\n ("FR", "male", "low"),\n ("FR", "female", "high"),\n ("FR", "male", "medium"),\n ("US", "female", "high"),\n ("US", "male", "low"),\n ],\n names=["country", "gender", "education"],\n ),\n name="proportion",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef _frame_value_counts(df, keys, normalize, sort, ascending):\n return df[keys].value_counts(normalize=normalize, sort=sort, ascending=ascending)\n\n\n@pytest.mark.parametrize("groupby", ["column", "array", "function"])\n@pytest.mark.parametrize("normalize, name", [(True, "proportion"), (False, "count")])\n@pytest.mark.parametrize(\n "sort, ascending",\n [\n (False, None),\n (True, True),\n (True, False),\n ],\n)\n@pytest.mark.parametrize("as_index", [True, False])\n@pytest.mark.parametrize("frame", [True, False])\ndef test_against_frame_and_seriesgroupby(\n education_df,\n groupby,\n normalize,\n name,\n sort,\n ascending,\n as_index,\n frame,\n request,\n using_infer_string,\n):\n # test all parameters:\n # - Use column, array or function as by= parameter\n # - Whether or not to normalize\n # - Whether or not to sort and how\n # - Whether or not to use the groupby as an index\n # - 3-way compare against:\n # - apply with :meth:`~DataFrame.value_counts`\n # - `~SeriesGroupBy.value_counts`\n if Version(np.__version__) >= Version("1.25") and frame and sort and normalize:\n request.applymarker(\n pytest.mark.xfail(\n reason=(\n "pandas default unstable sorting of duplicates"\n "issue with numpy>=1.25 with AVX instructions"\n ),\n strict=False,\n )\n )\n by = {\n "column": "country",\n "array": education_df["country"].values,\n "function": lambda x: education_df["country"][x] == "US",\n }[groupby]\n\n gp = education_df.groupby(by=by, as_index=as_index)\n result = gp[["gender", "education"]].value_counts(\n normalize=normalize, sort=sort, ascending=ascending\n )\n if frame:\n # compare against apply with DataFrame value_counts\n warn = FutureWarning if groupby == "column" else None\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(warn, match=msg):\n expected = gp.apply(\n _frame_value_counts, ["gender", "education"], normalize, sort, ascending\n )\n\n if as_index:\n tm.assert_series_equal(result, expected)\n else:\n name = "proportion" if normalize else "count"\n expected = expected.reset_index().rename({0: name}, axis=1)\n if groupby == "column":\n expected = expected.rename({"level_0": "country"}, axis=1)\n expected["country"] = np.where(expected["country"], "US", "FR")\n elif groupby == "function":\n expected["level_0"] = expected["level_0"] == 1\n else:\n expected["level_0"] = np.where(expected["level_0"], "US", "FR")\n tm.assert_frame_equal(result, expected)\n else:\n # compare against SeriesGroupBy value_counts\n education_df["both"] = education_df["gender"] + "-" + education_df["education"]\n expected = gp["both"].value_counts(\n normalize=normalize, sort=sort, ascending=ascending\n )\n expected.name = name\n if as_index:\n index_frame = expected.index.to_frame(index=False)\n index_frame["gender"] = index_frame["both"].str.split("-").str.get(0)\n index_frame["education"] = index_frame["both"].str.split("-").str.get(1)\n del index_frame["both"]\n index_frame2 = index_frame.rename({0: None}, axis=1)\n expected.index = MultiIndex.from_frame(index_frame2)\n\n if index_frame2.columns.isna()[0]:\n # with using_infer_string, the columns in index_frame as string\n # dtype, which makes the rename({0: None}) above use np.nan\n # instead of None, so we need to set None more explicitly.\n expected.index.names = [None] + expected.index.names[1:]\n tm.assert_series_equal(result, expected)\n else:\n expected.insert(1, "gender", expected["both"].str.split("-").str.get(0))\n expected.insert(2, "education", expected["both"].str.split("-").str.get(1))\n if using_infer_string:\n expected = expected.astype({"gender": "str", "education": "str"})\n del expected["both"]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("normalize", [True, False])\n@pytest.mark.parametrize(\n "sort, ascending, expected_rows, expected_count, expected_group_size",\n [\n (False, None, [0, 1, 2, 3, 4], [1, 1, 1, 2, 1], [1, 3, 1, 3, 1]),\n (True, False, [3, 0, 1, 2, 4], [2, 1, 1, 1, 1], [3, 1, 3, 1, 1]),\n (True, True, [0, 1, 2, 4, 3], [1, 1, 1, 1, 2], [1, 3, 1, 1, 3]),\n ],\n)\ndef test_compound(\n education_df,\n normalize,\n sort,\n ascending,\n expected_rows,\n expected_count,\n expected_group_size,\n any_string_dtype,\n using_infer_string,\n):\n dtype = any_string_dtype\n education_df = education_df.astype(dtype)\n education_df.columns = education_df.columns.astype(dtype)\n # Multiple groupby keys and as_index=False\n gp = education_df.groupby(["country", "gender"], as_index=False, sort=False)\n result = gp["education"].value_counts(\n normalize=normalize, sort=sort, ascending=ascending\n )\n expected = DataFrame()\n for column in ["country", "gender", "education"]:\n expected[column] = [education_df[column][row] for row in expected_rows]\n expected = expected.astype(dtype)\n expected.columns = expected.columns.astype(dtype)\n if normalize:\n expected["proportion"] = expected_count\n expected["proportion"] /= expected_group_size\n if dtype == "string[pyarrow]":\n # TODO(nullable) also string[python] should return nullable dtypes\n expected["proportion"] = expected["proportion"].convert_dtypes()\n else:\n expected["count"] = expected_count\n if dtype == "string[pyarrow]":\n expected["count"] = expected["count"].convert_dtypes()\n if using_infer_string and dtype == object:\n expected = expected.astype(\n {"country": "str", "gender": "str", "education": "str"}\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.fixture\ndef animals_df():\n return DataFrame(\n {"key": [1, 1, 1, 1], "num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},\n index=["falcon", "dog", "cat", "ant"],\n )\n\n\n@pytest.mark.parametrize(\n "sort, ascending, normalize, name, expected_data, expected_index",\n [\n (False, None, False, "count", [1, 2, 1], [(1, 1, 1), (2, 4, 6), (2, 0, 0)]),\n (True, True, False, "count", [1, 1, 2], [(1, 1, 1), (2, 6, 4), (2, 0, 0)]),\n (True, False, False, "count", [2, 1, 1], [(1, 1, 1), (4, 2, 6), (0, 2, 0)]),\n (\n True,\n False,\n True,\n "proportion",\n [0.5, 0.25, 0.25],\n [(1, 1, 1), (4, 2, 6), (0, 2, 0)],\n ),\n ],\n)\ndef test_data_frame_value_counts(\n animals_df, sort, ascending, normalize, name, expected_data, expected_index\n):\n # 3-way compare with :meth:`~DataFrame.value_counts`\n # Tests from frame/methods/test_value_counts.py\n result_frame = animals_df.value_counts(\n sort=sort, ascending=ascending, normalize=normalize\n )\n expected = Series(\n data=expected_data,\n index=MultiIndex.from_arrays(\n expected_index, names=["key", "num_legs", "num_wings"]\n ),\n name=name,\n )\n tm.assert_series_equal(result_frame, expected)\n\n result_frame_groupby = animals_df.groupby("key").value_counts(\n sort=sort, ascending=ascending, normalize=normalize\n )\n\n tm.assert_series_equal(result_frame_groupby, expected)\n\n\n@pytest.fixture\ndef nulls_df():\n n = np.nan\n return DataFrame(\n {\n "A": [1, 1, n, 4, n, 6, 6, 6, 6],\n "B": [1, 1, 3, n, n, 6, 6, 6, 6],\n "C": [1, 2, 3, 4, 5, 6, n, 8, n],\n "D": [1, 2, 3, 4, 5, 6, 7, n, n],\n }\n )\n\n\n@pytest.mark.parametrize(\n "group_dropna, count_dropna, expected_rows, expected_values",\n [\n (\n False,\n False,\n [0, 1, 3, 5, 7, 6, 8, 2, 4],\n [0.5, 0.5, 1.0, 0.25, 0.25, 0.25, 0.25, 1.0, 1.0],\n ),\n (False, True, [0, 1, 3, 5, 2, 4], [0.5, 0.5, 1.0, 1.0, 1.0, 1.0]),\n (True, False, [0, 1, 5, 7, 6, 8], [0.5, 0.5, 0.25, 0.25, 0.25, 0.25]),\n (True, True, [0, 1, 5], [0.5, 0.5, 1.0]),\n ],\n)\ndef test_dropna_combinations(\n nulls_df, group_dropna, count_dropna, expected_rows, expected_values, request\n):\n if Version(np.__version__) >= Version("1.25") and not group_dropna:\n request.applymarker(\n pytest.mark.xfail(\n reason=(\n "pandas default unstable sorting of duplicates"\n "issue with numpy>=1.25 with AVX instructions"\n ),\n strict=False,\n )\n )\n gp = nulls_df.groupby(["A", "B"], dropna=group_dropna)\n result = gp.value_counts(normalize=True, sort=True, dropna=count_dropna)\n columns = DataFrame()\n for column in nulls_df.columns:\n columns[column] = [nulls_df[column][row] for row in expected_rows]\n index = MultiIndex.from_frame(columns)\n expected = Series(data=expected_values, index=index, name="proportion")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.fixture\ndef names_with_nulls_df(nulls_fixture):\n return DataFrame(\n {\n "key": [1, 1, 1, 1],\n "first_name": ["John", "Anne", "John", "Beth"],\n "middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"],\n },\n )\n\n\n@pytest.mark.parametrize(\n "dropna, expected_data, expected_index",\n [\n (\n True,\n [1, 1],\n MultiIndex.from_arrays(\n [(1, 1), ("Beth", "John"), ("Louise", "Smith")],\n names=["key", "first_name", "middle_name"],\n ),\n ),\n (\n False,\n [1, 1, 1, 1],\n MultiIndex(\n levels=[\n Index([1]),\n Index(["Anne", "Beth", "John"]),\n Index(["Louise", "Smith", np.nan]),\n ],\n codes=[[0, 0, 0, 0], [0, 1, 2, 2], [2, 0, 1, 2]],\n names=["key", "first_name", "middle_name"],\n ),\n ),\n ],\n)\n@pytest.mark.parametrize("normalize, name", [(False, "count"), (True, "proportion")])\ndef test_data_frame_value_counts_dropna(\n names_with_nulls_df, dropna, normalize, name, expected_data, expected_index\n):\n # GH 41334\n # 3-way compare with :meth:`~DataFrame.value_counts`\n # Tests with nulls from frame/methods/test_value_counts.py\n result_frame = names_with_nulls_df.value_counts(dropna=dropna, normalize=normalize)\n expected = Series(\n data=expected_data,\n index=expected_index,\n name=name,\n )\n if normalize:\n expected /= float(len(expected_data))\n\n tm.assert_series_equal(result_frame, expected)\n\n result_frame_groupby = names_with_nulls_df.groupby("key").value_counts(\n dropna=dropna, normalize=normalize\n )\n\n tm.assert_series_equal(result_frame_groupby, expected)\n\n\n@pytest.mark.parametrize("as_index", [False, True])\n@pytest.mark.parametrize("observed", [False, True])\n@pytest.mark.parametrize(\n "normalize, name, expected_data",\n [\n (\n False,\n "count",\n np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64),\n ),\n (\n True,\n "proportion",\n np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),\n ),\n ],\n)\ndef test_categorical_single_grouper_with_only_observed_categories(\n education_df, as_index, observed, normalize, name, expected_data, request\n):\n # Test single categorical grouper with only observed grouping categories\n # when non-groupers are also categorical\n if Version(np.__version__) >= Version("1.25"):\n request.applymarker(\n pytest.mark.xfail(\n reason=(\n "pandas default unstable sorting of duplicates"\n "issue with numpy>=1.25 with AVX instructions"\n ),\n strict=False,\n )\n )\n\n gp = education_df.astype("category").groupby(\n "country", as_index=as_index, observed=observed\n )\n result = gp.value_counts(normalize=normalize)\n\n expected_index = MultiIndex.from_tuples(\n [\n ("FR", "male", "low"),\n ("FR", "female", "high"),\n ("FR", "male", "medium"),\n ("FR", "female", "low"),\n ("FR", "female", "medium"),\n ("FR", "male", "high"),\n ("US", "female", "high"),\n ("US", "male", "low"),\n ("US", "female", "low"),\n ("US", "female", "medium"),\n ("US", "male", "high"),\n ("US", "male", "medium"),\n ],\n names=["country", "gender", "education"],\n )\n\n expected_series = Series(\n data=expected_data,\n index=expected_index,\n name=name,\n )\n for i in range(3):\n expected_series.index = expected_series.index.set_levels(\n CategoricalIndex(expected_series.index.levels[i]), level=i\n )\n\n if as_index:\n tm.assert_series_equal(result, expected_series)\n else:\n expected = expected_series.reset_index(\n name="proportion" if normalize else "count"\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef assert_categorical_single_grouper(\n education_df, as_index, observed, expected_index, normalize, name, expected_data\n):\n # Test single categorical grouper when non-groupers are also categorical\n education_df = education_df.copy().astype("category")\n\n # Add non-observed grouping categories\n education_df["country"] = education_df["country"].cat.add_categories(["ASIA"])\n\n gp = education_df.groupby("country", as_index=as_index, observed=observed)\n result = gp.value_counts(normalize=normalize)\n\n expected_series = Series(\n data=expected_data,\n index=MultiIndex.from_tuples(\n expected_index,\n names=["country", "gender", "education"],\n ),\n name=name,\n )\n for i in range(3):\n index_level = CategoricalIndex(expected_series.index.levels[i])\n if i == 0:\n index_level = index_level.set_categories(\n education_df["country"].cat.categories\n )\n expected_series.index = expected_series.index.set_levels(index_level, level=i)\n\n if as_index:\n tm.assert_series_equal(result, expected_series)\n else:\n expected = expected_series.reset_index(name=name)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("as_index", [True, False])\n@pytest.mark.parametrize(\n "normalize, name, expected_data",\n [\n (\n False,\n "count",\n np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64),\n ),\n (\n True,\n "proportion",\n np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),\n ),\n ],\n)\ndef test_categorical_single_grouper_observed_true(\n education_df, as_index, normalize, name, expected_data, request\n):\n # GH#46357\n\n if Version(np.__version__) >= Version("1.25"):\n request.applymarker(\n pytest.mark.xfail(\n reason=(\n "pandas default unstable sorting of duplicates"\n "issue with numpy>=1.25 with AVX instructions"\n ),\n strict=False,\n )\n )\n\n expected_index = [\n ("FR", "male", "low"),\n ("FR", "female", "high"),\n ("FR", "male", "medium"),\n ("FR", "female", "low"),\n ("FR", "female", "medium"),\n ("FR", "male", "high"),\n ("US", "female", "high"),\n ("US", "male", "low"),\n ("US", "female", "low"),\n ("US", "female", "medium"),\n ("US", "male", "high"),\n ("US", "male", "medium"),\n ]\n\n assert_categorical_single_grouper(\n education_df=education_df,\n as_index=as_index,\n observed=True,\n expected_index=expected_index,\n normalize=normalize,\n name=name,\n expected_data=expected_data,\n )\n\n\n@pytest.mark.parametrize("as_index", [True, False])\n@pytest.mark.parametrize(\n "normalize, name, expected_data",\n [\n (\n False,\n "count",\n np.array(\n [2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int64\n ),\n ),\n (\n True,\n "proportion",\n np.array(\n [\n 0.5,\n 0.25,\n 0.25,\n 0.0,\n 0.0,\n 0.0,\n 0.5,\n 0.5,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n ),\n ),\n ],\n)\ndef test_categorical_single_grouper_observed_false(\n education_df, as_index, normalize, name, expected_data, request\n):\n # GH#46357\n\n if Version(np.__version__) >= Version("1.25"):\n request.applymarker(\n pytest.mark.xfail(\n reason=(\n "pandas default unstable sorting of duplicates"\n "issue with numpy>=1.25 with AVX instructions"\n ),\n strict=False,\n )\n )\n\n expected_index = [\n ("FR", "male", "low"),\n ("FR", "female", "high"),\n ("FR", "male", "medium"),\n ("FR", "female", "low"),\n ("FR", "female", "medium"),\n ("FR", "male", "high"),\n ("US", "female", "high"),\n ("US", "male", "low"),\n ("US", "female", "low"),\n ("US", "female", "medium"),\n ("US", "male", "high"),\n ("US", "male", "medium"),\n ("ASIA", "female", "high"),\n ("ASIA", "female", "low"),\n ("ASIA", "female", "medium"),\n ("ASIA", "male", "high"),\n ("ASIA", "male", "low"),\n ("ASIA", "male", "medium"),\n ]\n\n assert_categorical_single_grouper(\n education_df=education_df,\n as_index=as_index,\n observed=False,\n expected_index=expected_index,\n normalize=normalize,\n name=name,\n expected_data=expected_data,\n )\n\n\n@pytest.mark.parametrize("as_index", [True, False])\n@pytest.mark.parametrize(\n "observed, expected_index",\n [\n (\n False,\n [\n ("FR", "high", "female"),\n ("FR", "high", "male"),\n ("FR", "low", "male"),\n ("FR", "low", "female"),\n ("FR", "medium", "male"),\n ("FR", "medium", "female"),\n ("US", "high", "female"),\n ("US", "high", "male"),\n ("US", "low", "male"),\n ("US", "low", "female"),\n ("US", "medium", "female"),\n ("US", "medium", "male"),\n ],\n ),\n (\n True,\n [\n ("FR", "high", "female"),\n ("FR", "low", "male"),\n ("FR", "medium", "male"),\n ("US", "high", "female"),\n ("US", "low", "male"),\n ],\n ),\n ],\n)\n@pytest.mark.parametrize(\n "normalize, name, expected_data",\n [\n (\n False,\n "count",\n np.array([1, 0, 2, 0, 1, 0, 1, 0, 1, 0, 0, 0], dtype=np.int64),\n ),\n (\n True,\n "proportion",\n # NaN values corresponds to non-observed groups\n np.array([1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]),\n ),\n ],\n)\ndef test_categorical_multiple_groupers(\n education_df, as_index, observed, expected_index, normalize, name, expected_data\n):\n # GH#46357\n\n # Test multiple categorical groupers when non-groupers are non-categorical\n education_df = education_df.copy()\n education_df["country"] = education_df["country"].astype("category")\n education_df["education"] = education_df["education"].astype("category")\n\n gp = education_df.groupby(\n ["country", "education"], as_index=as_index, observed=observed\n )\n result = gp.value_counts(normalize=normalize)\n\n expected_series = Series(\n data=expected_data[expected_data > 0.0] if observed else expected_data,\n index=MultiIndex.from_tuples(\n expected_index,\n names=["country", "education", "gender"],\n ),\n name=name,\n )\n for i in range(2):\n expected_series.index = expected_series.index.set_levels(\n CategoricalIndex(expected_series.index.levels[i]), level=i\n )\n\n if as_index:\n tm.assert_series_equal(result, expected_series)\n else:\n expected = expected_series.reset_index(\n name="proportion" if normalize else "count"\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("as_index", [False, True])\n@pytest.mark.parametrize("observed", [False, True])\n@pytest.mark.parametrize(\n "normalize, name, expected_data",\n [\n (\n False,\n "count",\n np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64),\n ),\n (\n True,\n "proportion",\n # NaN values corresponds to non-observed groups\n np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),\n ),\n ],\n)\ndef test_categorical_non_groupers(\n education_df, as_index, observed, normalize, name, expected_data, request\n):\n # GH#46357 Test non-observed categories are included in the result,\n # regardless of `observed`\n\n if Version(np.__version__) >= Version("1.25"):\n request.applymarker(\n pytest.mark.xfail(\n reason=(\n "pandas default unstable sorting of duplicates"\n "issue with numpy>=1.25 with AVX instructions"\n ),\n strict=False,\n )\n )\n\n education_df = education_df.copy()\n education_df["gender"] = education_df["gender"].astype("category")\n education_df["education"] = education_df["education"].astype("category")\n\n gp = education_df.groupby("country", as_index=as_index, observed=observed)\n result = gp.value_counts(normalize=normalize)\n\n expected_index = [\n ("FR", "male", "low"),\n ("FR", "female", "high"),\n ("FR", "male", "medium"),\n ("FR", "female", "low"),\n ("FR", "female", "medium"),\n ("FR", "male", "high"),\n ("US", "female", "high"),\n ("US", "male", "low"),\n ("US", "female", "low"),\n ("US", "female", "medium"),\n ("US", "male", "high"),\n ("US", "male", "medium"),\n ]\n expected_series = Series(\n data=expected_data,\n index=MultiIndex.from_tuples(\n expected_index,\n names=["country", "gender", "education"],\n ),\n name=name,\n )\n for i in range(1, 3):\n expected_series.index = expected_series.index.set_levels(\n CategoricalIndex(expected_series.index.levels[i]), level=i\n )\n\n if as_index:\n tm.assert_series_equal(result, expected_series)\n else:\n expected = expected_series.reset_index(\n name="proportion" if normalize else "count"\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "normalize, expected_label, expected_values",\n [\n (False, "count", [1, 1, 1]),\n (True, "proportion", [0.5, 0.5, 1.0]),\n ],\n)\ndef test_mixed_groupings(normalize, expected_label, expected_values):\n # Test multiple groupings\n df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})\n gp = df.groupby([[4, 5, 4], "A", lambda i: 7 if i == 1 else 8], as_index=False)\n result = gp.value_counts(sort=True, normalize=normalize)\n expected = DataFrame(\n {\n "level_0": np.array([4, 4, 5], dtype=int),\n "A": [1, 1, 2],\n "level_2": [8, 8, 7],\n "B": [1, 3, 2],\n expected_label: expected_values,\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "test, columns, expected_names",\n [\n ("repeat", list("abbde"), ["a", None, "d", "b", "b", "e"]),\n ("level", list("abcd") + ["level_1"], ["a", None, "d", "b", "c", "level_1"]),\n ],\n)\n@pytest.mark.parametrize("as_index", [False, True])\ndef test_column_label_duplicates(test, columns, expected_names, as_index):\n # GH 44992\n # Test for duplicate input column labels and generated duplicate labels\n df = DataFrame([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]], columns=columns)\n expected_data = [(1, 0, 7, 3, 5, 9), (2, 1, 8, 4, 6, 10)]\n keys = ["a", np.array([0, 1], dtype=np.int64), "d"]\n result = df.groupby(keys, as_index=as_index).value_counts()\n if as_index:\n expected = Series(\n data=(1, 1),\n index=MultiIndex.from_tuples(\n expected_data,\n names=expected_names,\n ),\n name="count",\n )\n tm.assert_series_equal(result, expected)\n else:\n expected_data = [list(row) + [1] for row in expected_data]\n expected_columns = list(expected_names)\n expected_columns[1] = "level_1"\n expected_columns.append("count")\n expected = DataFrame(expected_data, columns=expected_columns)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "normalize, expected_label",\n [\n (False, "count"),\n (True, "proportion"),\n ],\n)\ndef test_result_label_duplicates(normalize, expected_label):\n # Test for result column label duplicating an input column label\n gb = DataFrame([[1, 2, 3]], columns=["a", "b", expected_label]).groupby(\n "a", as_index=False\n )\n msg = f"Column label '{expected_label}' is duplicate of result column"\n with pytest.raises(ValueError, match=msg):\n gb.value_counts(normalize=normalize)\n\n\ndef test_ambiguous_grouping():\n # Test that groupby is not confused by groupings length equal to row count\n df = DataFrame({"a": [1, 1]})\n gb = df.groupby(np.array([1, 1], dtype=np.int64))\n result = gb.value_counts()\n expected = Series(\n [2], index=MultiIndex.from_tuples([[1, 1]], names=[None, "a"]), name="count"\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_subset_overlaps_gb_key_raises():\n # GH 46383\n df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1])\n msg = "Keys {'c1'} in subset cannot be in the groupby column keys."\n with pytest.raises(ValueError, match=msg):\n df.groupby("c1").value_counts(subset=["c1"])\n\n\ndef test_subset_doesnt_exist_in_frame():\n # GH 46383\n df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1])\n msg = "Keys {'c3'} in subset do not exist in the DataFrame."\n with pytest.raises(ValueError, match=msg):\n df.groupby("c1").value_counts(subset=["c3"])\n\n\ndef test_subset():\n # GH 46383\n df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1])\n result = df.groupby(level=0).value_counts(subset=["c2"])\n expected = Series(\n [1, 2],\n index=MultiIndex.from_arrays([[0, 1], ["x", "y"]], names=[None, "c2"]),\n name="count",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_subset_duplicate_columns():\n # GH 46383\n df = DataFrame(\n [["a", "x", "x"], ["b", "y", "y"], ["b", "y", "y"]],\n index=[0, 1, 1],\n columns=["c1", "c2", "c2"],\n )\n result = df.groupby(level=0).value_counts(subset=["c2"])\n expected = Series(\n [1, 2],\n index=MultiIndex.from_arrays(\n [[0, 1], ["x", "y"], ["x", "y"]], names=[None, "c2", "c2"]\n ),\n name="count",\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("utc", [True, False])\ndef test_value_counts_time_grouper(utc, unit):\n # GH#50486\n df = DataFrame(\n {\n "Timestamp": [\n 1565083561,\n 1565083561 + 86400,\n 1565083561 + 86500,\n 1565083561 + 86400 * 2,\n 1565083561 + 86400 * 3,\n 1565083561 + 86500 * 3,\n 1565083561 + 86400 * 4,\n ],\n "Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"],\n }\n ).drop([3])\n\n df["Datetime"] = to_datetime(df["Timestamp"], utc=utc, unit="s").dt.as_unit(unit)\n gb = df.groupby(Grouper(freq="1D", key="Datetime"))\n result = gb.value_counts()\n dates = to_datetime(\n ["2019-08-06", "2019-08-07", "2019-08-09", "2019-08-10"], utc=utc\n ).as_unit(unit)\n timestamps = df["Timestamp"].unique()\n index = MultiIndex(\n levels=[dates, timestamps, ["apple", "banana", "orange", "pear"]],\n codes=[[0, 1, 1, 2, 2, 3], range(6), [0, 0, 1, 2, 2, 3]],\n names=["Datetime", "Timestamp", "Food"],\n )\n expected = Series(1, index=index, name="count")\n tm.assert_series_equal(result, expected)\n\n\ndef test_value_counts_integer_columns():\n # GH#55627\n df = DataFrame({1: ["a", "a", "a"], 2: ["a", "a", "d"], 3: ["a", "b", "c"]})\n gp = df.groupby([1, 2], as_index=False, sort=False)\n result = gp[3].value_counts()\n expected = DataFrame(\n {1: ["a", "a", "a"], 2: ["a", "a", "d"], 3: ["a", "b", "c"], "count": 1}\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("vc_sort", [True, False])\n@pytest.mark.parametrize("normalize", [True, False])\ndef test_value_counts_sort(sort, vc_sort, normalize):\n # GH#55951\n df = DataFrame({"a": [2, 1, 1, 1], 0: [3, 4, 3, 3]})\n gb = df.groupby("a", sort=sort)\n result = gb.value_counts(sort=vc_sort, normalize=normalize)\n\n if normalize:\n values = [2 / 3, 1 / 3, 1.0]\n else:\n values = [2, 1, 1]\n index = MultiIndex(\n levels=[[1, 2], [3, 4]], codes=[[0, 0, 1], [0, 1, 0]], names=["a", 0]\n )\n expected = Series(values, index=index, name="proportion" if normalize else "count")\n if sort and vc_sort:\n taker = [0, 1, 2]\n elif sort and not vc_sort:\n taker = [0, 1, 2]\n elif not sort and vc_sort:\n taker = [0, 2, 1]\n else:\n taker = [2, 1, 0]\n expected = expected.take(taker)\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("vc_sort", [True, False])\n@pytest.mark.parametrize("normalize", [True, False])\ndef test_value_counts_sort_categorical(sort, vc_sort, normalize):\n # GH#55951\n df = DataFrame({"a": [2, 1, 1, 1], 0: [3, 4, 3, 3]}, dtype="category")\n gb = df.groupby("a", sort=sort, observed=True)\n result = gb.value_counts(sort=vc_sort, normalize=normalize)\n\n if normalize:\n values = [2 / 3, 1 / 3, 1.0, 0.0]\n else:\n values = [2, 1, 1, 0]\n name = "proportion" if normalize else "count"\n expected = DataFrame(\n {\n "a": Categorical([1, 1, 2, 2]),\n 0: Categorical([3, 4, 3, 4]),\n name: values,\n }\n ).set_index(["a", 0])[name]\n if sort and vc_sort:\n taker = [0, 1, 2, 3]\n elif sort and not vc_sort:\n taker = [0, 1, 2, 3]\n elif not sort and vc_sort:\n taker = [0, 2, 1, 3]\n else:\n taker = [2, 3, 0, 1]\n expected = expected.take(taker)\n\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\test_value_counts.py | test_value_counts.py | Python | 40,439 | 0.95 | 0.074045 | 0.055506 | awesome-app | 814 | 2023-09-07T13:44:46.496079 | MIT | true | 598c594199e80711b9f2552718dc8112 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_corrwith.cpython-313.pyc | test_corrwith.cpython-313.pyc | Other | 1,338 | 0.8 | 0 | 0 | react-lib | 610 | 2024-06-28T19:25:37.133681 | Apache-2.0 | true | 2b3c1599f2a49c36ebd5c46aa2c52999 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_describe.cpython-313.pyc | test_describe.cpython-313.pyc | Other | 15,542 | 0.8 | 0.005208 | 0.011111 | node-utils | 186 | 2023-10-12T03:24:01.295573 | Apache-2.0 | true | 0f1cc3b755d40b03ed185cd07d3d926a |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_groupby_shift_diff.cpython-313.pyc | test_groupby_shift_diff.cpython-313.pyc | Other | 12,352 | 0.8 | 0.012048 | 0.040816 | awesome-app | 60 | 2023-08-15T20:03:50.957864 | Apache-2.0 | true | d812b9e66aff3197861968f9cf2142e0 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_is_monotonic.cpython-313.pyc | test_is_monotonic.cpython-313.pyc | Other | 3,260 | 0.8 | 0 | 0 | awesome-app | 634 | 2024-05-10T22:24:35.683228 | BSD-3-Clause | true | 0937d80191624bbba428045ee7698dd6 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_nlargest_nsmallest.cpython-313.pyc | test_nlargest_nsmallest.cpython-313.pyc | Other | 5,416 | 0.8 | 0 | 0 | node-utils | 499 | 2024-11-16T01:30:33.572712 | MIT | true | 72072635fc7acfd42fd8b790efa43ba9 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_nth.cpython-313.pyc | test_nth.cpython-313.pyc | Other | 42,796 | 0.95 | 0.001773 | 0.02243 | python-kit | 722 | 2025-05-03T10:57:20.593852 | Apache-2.0 | true | 4f8833d6860cb083826322b46e6a06da |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_quantile.cpython-313.pyc | test_quantile.cpython-313.pyc | Other | 26,453 | 0.8 | 0.002841 | 0.048193 | node-utils | 423 | 2024-10-05T00:41:05.485771 | MIT | true | 74be9c6dd9fcb94698f1f1079e24da78 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_rank.cpython-313.pyc | test_rank.cpython-313.pyc | Other | 33,640 | 0.8 | 0 | 0.040712 | python-kit | 975 | 2024-07-24T00:25:59.921435 | BSD-3-Clause | true | dfd6550e32995edf81f0af88ab6b9671 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_sample.cpython-313.pyc | test_sample.cpython-313.pyc | Other | 9,008 | 0.8 | 0.005682 | 0.111801 | node-utils | 166 | 2023-09-29T23:13:38.147147 | GPL-3.0 | true | d574008361bd82f99f2bca93024a115a |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_size.cpython-313.pyc | test_size.cpython-313.pyc | Other | 7,743 | 0.8 | 0 | 0.012195 | awesome-app | 350 | 2024-11-08T10:15:17.780480 | MIT | true | ef87e0944e004d1b7a1cd223c38c4a5b |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_skew.cpython-313.pyc | test_skew.cpython-313.pyc | Other | 1,773 | 0.8 | 0 | 0 | awesome-app | 626 | 2024-12-15T07:12:37.402137 | GPL-3.0 | true | 2328b7d486d78e84cf5a430a04b35a16 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\test_value_counts.cpython-313.pyc | test_value_counts.cpython-313.pyc | Other | 44,725 | 0.95 | 0 | 0.015968 | python-kit | 191 | 2023-07-30T20:35:06.088127 | BSD-3-Clause | true | b548175553fbbb78c3b08e68e72b329d |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\methods\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 203 | 0.7 | 0 | 0 | vue-tools | 540 | 2023-09-29T23:16:04.186883 | MIT | true | 62f2c58b04d87f20c1bd0a7c477064a2 |
import numpy as np\nimport pytest\n\nfrom pandas.compat import is_platform_arm\nfrom pandas.errors import NumbaUtilError\n\nfrom pandas import (\n DataFrame,\n Series,\n option_context,\n)\nimport pandas._testing as tm\nfrom pandas.util.version import Version\n\npytestmark = [pytest.mark.single_cpu]\n\nnumba = pytest.importorskip("numba")\npytestmark.append(\n pytest.mark.skipif(\n Version(numba.__version__) == Version("0.61") and is_platform_arm(),\n reason=f"Segfaults on ARM platforms with numba {numba.__version__}",\n )\n)\n\n\ndef test_correct_function_signature():\n pytest.importorskip("numba")\n\n def incorrect_function(x):\n return x + 1\n\n data = DataFrame(\n {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},\n columns=["key", "data"],\n )\n with pytest.raises(NumbaUtilError, match="The first 2"):\n data.groupby("key").transform(incorrect_function, engine="numba")\n\n with pytest.raises(NumbaUtilError, match="The first 2"):\n data.groupby("key")["data"].transform(incorrect_function, engine="numba")\n\n\ndef test_check_nopython_kwargs():\n pytest.importorskip("numba")\n\n def incorrect_function(values, index):\n return values + 1\n\n data = DataFrame(\n {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},\n columns=["key", "data"],\n )\n with pytest.raises(NumbaUtilError, match="numba does not support"):\n data.groupby("key").transform(incorrect_function, engine="numba", a=1)\n\n with pytest.raises(NumbaUtilError, match="numba does not support"):\n data.groupby("key")["data"].transform(incorrect_function, engine="numba", a=1)\n\n\n@pytest.mark.filterwarnings("ignore")\n# Filter warnings when parallel=True and the function can't be parallelized by Numba\n@pytest.mark.parametrize("jit", [True, False])\n@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])\n@pytest.mark.parametrize("as_index", [True, False])\ndef test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):\n pytest.importorskip("numba")\n\n def func(values, index):\n return values + 1\n\n if jit:\n # Test accepted jitted functions\n import numba\n\n func = numba.jit(func)\n\n data = DataFrame(\n {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]\n )\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n grouped = data.groupby(0, as_index=as_index)\n if pandas_obj == "Series":\n grouped = grouped[1]\n\n result = grouped.transform(func, engine="numba", engine_kwargs=engine_kwargs)\n expected = grouped.transform(lambda x: x + 1, engine="cython")\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore")\n# Filter warnings when parallel=True and the function can't be parallelized by Numba\n@pytest.mark.parametrize("jit", [True, False])\n@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])\ndef test_cache(jit, pandas_obj, nogil, parallel, nopython):\n # Test that the functions are cached correctly if we switch functions\n pytest.importorskip("numba")\n\n def func_1(values, index):\n return values + 1\n\n def func_2(values, index):\n return values * 5\n\n if jit:\n import numba\n\n func_1 = numba.jit(func_1)\n func_2 = numba.jit(func_2)\n\n data = DataFrame(\n {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]\n )\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n grouped = data.groupby(0)\n if pandas_obj == "Series":\n grouped = grouped[1]\n\n result = grouped.transform(func_1, engine="numba", engine_kwargs=engine_kwargs)\n expected = grouped.transform(lambda x: x + 1, engine="cython")\n tm.assert_equal(result, expected)\n\n result = grouped.transform(func_2, engine="numba", engine_kwargs=engine_kwargs)\n expected = grouped.transform(lambda x: x * 5, engine="cython")\n tm.assert_equal(result, expected)\n\n # Retest func_1 which should use the cache\n result = grouped.transform(func_1, engine="numba", engine_kwargs=engine_kwargs)\n expected = grouped.transform(lambda x: x + 1, engine="cython")\n tm.assert_equal(result, expected)\n\n\ndef test_use_global_config():\n pytest.importorskip("numba")\n\n def func_1(values, index):\n return values + 1\n\n data = DataFrame(\n {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]\n )\n grouped = data.groupby(0)\n expected = grouped.transform(func_1, engine="numba")\n with option_context("compute.use_numba", True):\n result = grouped.transform(func_1, engine=None)\n tm.assert_frame_equal(expected, result)\n\n\n# TODO: Test more than just reductions (e.g. actually test transformations once we have\n@pytest.mark.parametrize(\n "agg_func", [["min", "max"], "min", {"B": ["min", "max"], "C": "sum"}]\n)\ndef test_string_cython_vs_numba(agg_func, numba_supported_reductions):\n pytest.importorskip("numba")\n agg_func, kwargs = numba_supported_reductions\n data = DataFrame(\n {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]\n )\n grouped = data.groupby(0)\n\n result = grouped.transform(agg_func, engine="numba", **kwargs)\n expected = grouped.transform(agg_func, engine="cython", **kwargs)\n tm.assert_frame_equal(result, expected)\n\n result = grouped[1].transform(agg_func, engine="numba", **kwargs)\n expected = grouped[1].transform(agg_func, engine="cython", **kwargs)\n tm.assert_series_equal(result, expected)\n\n\ndef test_args_not_cached():\n # GH 41647\n pytest.importorskip("numba")\n\n def sum_last(values, index, n):\n return values[-n:].sum()\n\n df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]})\n grouped_x = df.groupby("id")["x"]\n result = grouped_x.transform(sum_last, 1, engine="numba")\n expected = Series([1.0] * 4, name="x")\n tm.assert_series_equal(result, expected)\n\n result = grouped_x.transform(sum_last, 2, engine="numba")\n expected = Series([2.0] * 4, name="x")\n tm.assert_series_equal(result, expected)\n\n\ndef test_index_data_correctly_passed():\n # GH 43133\n pytest.importorskip("numba")\n\n def f(values, index):\n return index - 1\n\n df = DataFrame({"group": ["A", "A", "B"], "v": [4, 5, 6]}, index=[-1, -2, -3])\n result = df.groupby("group").transform(f, engine="numba")\n expected = DataFrame([-4.0, -3.0, -2.0], columns=["v"], index=[-1, -2, -3])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_engine_kwargs_not_cached():\n # If the user passes a different set of engine_kwargs don't return the same\n # jitted function\n pytest.importorskip("numba")\n nogil = True\n parallel = False\n nopython = True\n\n def func_kwargs(values, index):\n return nogil + parallel + nopython\n\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n df = DataFrame({"value": [0, 0, 0]})\n result = df.groupby(level=0).transform(\n func_kwargs, engine="numba", engine_kwargs=engine_kwargs\n )\n expected = DataFrame({"value": [2.0, 2.0, 2.0]})\n tm.assert_frame_equal(result, expected)\n\n nogil = False\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n result = df.groupby(level=0).transform(\n func_kwargs, engine="numba", engine_kwargs=engine_kwargs\n )\n expected = DataFrame({"value": [1.0, 1.0, 1.0]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore")\ndef test_multiindex_one_key(nogil, parallel, nopython):\n pytest.importorskip("numba")\n\n def numba_func(values, index):\n return 1\n\n df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n result = df.groupby("A").transform(\n numba_func, engine="numba", engine_kwargs=engine_kwargs\n )\n expected = DataFrame([{"A": 1, "B": 2, "C": 1.0}]).set_index(["A", "B"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiindex_multi_key_not_supported(nogil, parallel, nopython):\n pytest.importorskip("numba")\n\n def numba_func(values, index):\n return 1\n\n df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n with pytest.raises(NotImplementedError, match="more than 1 grouping labels"):\n df.groupby(["A", "B"]).transform(\n numba_func, engine="numba", engine_kwargs=engine_kwargs\n )\n\n\ndef test_multilabel_numba_vs_cython(numba_supported_reductions):\n pytest.importorskip("numba")\n reduction, kwargs = numba_supported_reductions\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.random.default_rng(2).standard_normal(8),\n }\n )\n gb = df.groupby(["A", "B"])\n res_agg = gb.transform(reduction, engine="numba", **kwargs)\n expected_agg = gb.transform(reduction, engine="cython", **kwargs)\n tm.assert_frame_equal(res_agg, expected_agg)\n\n\ndef test_multilabel_udf_numba_vs_cython():\n pytest.importorskip("numba")\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],\n "B": ["one", "one", "two", "three", "two", "two", "one", "three"],\n "C": np.random.default_rng(2).standard_normal(8),\n "D": np.random.default_rng(2).standard_normal(8),\n }\n )\n gb = df.groupby(["A", "B"])\n result = gb.transform(\n lambda values, index: (values - values.min()) / (values.max() - values.min()),\n engine="numba",\n )\n expected = gb.transform(\n lambda x: (x - x.min()) / (x.max() - x.min()), engine="cython"\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\transform\test_numba.py | test_numba.py | Python | 10,011 | 0.95 | 0.112245 | 0.04386 | react-lib | 432 | 2025-01-01T02:56:05.453542 | GPL-3.0 | true | 300bb045da914dd956103c7a35b01642 |
""" test with the .transform """\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import lib\n\nfrom pandas.core.dtypes.common import ensure_platform_int\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n concat,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.groupby import get_groupby_method_args\n\n\ndef assert_fp_equal(a, b):\n assert (np.abs(a - b) < 1e-12).all()\n\n\ndef test_transform():\n data = Series(np.arange(9) // 3, index=np.arange(9))\n\n index = np.arange(9)\n np.random.default_rng(2).shuffle(index)\n data = data.reindex(index)\n\n grouped = data.groupby(lambda x: x // 3)\n\n transformed = grouped.transform(lambda x: x * x.sum())\n assert transformed[7] == 12\n\n # GH 8046\n # make sure that we preserve the input order\n\n df = DataFrame(\n np.arange(6, dtype="int64").reshape(3, 2), columns=["a", "b"], index=[0, 2, 1]\n )\n key = [0, 0, 1]\n expected = (\n df.sort_index()\n .groupby(key)\n .transform(lambda x: x - x.mean())\n .groupby(key)\n .mean()\n )\n result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(key).mean()\n tm.assert_frame_equal(result, expected)\n\n def demean(arr):\n return arr - arr.mean(axis=0)\n\n people = DataFrame(\n np.random.default_rng(2).standard_normal((5, 5)),\n columns=["a", "b", "c", "d", "e"],\n index=["Joe", "Steve", "Wes", "Jim", "Travis"],\n )\n key = ["one", "two", "one", "two", "one"]\n result = people.groupby(key).transform(demean).groupby(key).mean()\n expected = people.groupby(key, group_keys=False).apply(demean).groupby(key).mean()\n tm.assert_frame_equal(result, expected)\n\n # GH 8430\n df = DataFrame(\n np.random.default_rng(2).standard_normal((50, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=date_range("2000-01-01", periods=50, freq="B"),\n )\n g = df.groupby(pd.Grouper(freq="ME"))\n g.transform(lambda x: x - 1)\n\n # GH 9700\n df = DataFrame({"a": range(5, 10), "b": range(5)})\n msg = "using DataFrameGroupBy.max"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby("a").transform(max)\n expected = DataFrame({"b": range(5)})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_fast():\n df = DataFrame(\n {\n "id": np.arange(100000) / 3,\n "val": np.random.default_rng(2).standard_normal(100000),\n }\n )\n\n grp = df.groupby("id")["val"]\n\n values = np.repeat(grp.mean().values, ensure_platform_int(grp.count().values))\n expected = Series(values, index=df.index, name="val")\n\n msg = "using SeriesGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grp.transform(np.mean)\n tm.assert_series_equal(result, expected)\n\n result = grp.transform("mean")\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform_fast2():\n # GH 12737\n df = DataFrame(\n {\n "grouping": [0, 1, 1, 3],\n "f": [1.1, 2.1, 3.1, 4.5],\n "d": date_range("2014-1-1", "2014-1-4"),\n "i": [1, 2, 3, 4],\n },\n columns=["grouping", "f", "i", "d"],\n )\n result = df.groupby("grouping").transform("first")\n\n dates = Index(\n [\n Timestamp("2014-1-1"),\n Timestamp("2014-1-2"),\n Timestamp("2014-1-2"),\n Timestamp("2014-1-4"),\n ],\n dtype="M8[ns]",\n )\n expected = DataFrame(\n {"f": [1.1, 2.1, 2.1, 4.5], "d": dates, "i": [1, 2, 2, 4]},\n columns=["f", "i", "d"],\n )\n tm.assert_frame_equal(result, expected)\n\n # selection\n result = df.groupby("grouping")[["f", "i"]].transform("first")\n expected = expected[["f", "i"]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_fast3():\n # dup columns\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["g", "a", "a"])\n result = df.groupby("g").transform("first")\n expected = df.drop("g", axis=1)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_broadcast(tsframe, ts):\n grouped = ts.groupby(lambda x: x.month)\n msg = "using SeriesGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grouped.transform(np.mean)\n\n tm.assert_index_equal(result.index, ts.index)\n for _, gp in grouped:\n assert_fp_equal(result.reindex(gp.index), gp.mean())\n\n grouped = tsframe.groupby(lambda x: x.month)\n msg = "using DataFrameGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grouped.transform(np.mean)\n tm.assert_index_equal(result.index, tsframe.index)\n for _, gp in grouped:\n agged = gp.mean(axis=0)\n res = result.reindex(gp.index)\n for col in tsframe:\n assert_fp_equal(res[col], agged[col])\n\n # group columns\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouped = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)\n msg = "using DataFrameGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grouped.transform(np.mean)\n tm.assert_index_equal(result.index, tsframe.index)\n tm.assert_index_equal(result.columns, tsframe.columns)\n for _, gp in grouped:\n agged = gp.mean(1)\n res = result.reindex(columns=gp.columns)\n for idx in gp.index:\n assert_fp_equal(res.xs(idx), agged[idx])\n\n\ndef test_transform_axis_1(request, transformation_func):\n # GH 36308\n\n df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"])\n args = get_groupby_method_args(transformation_func, df)\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby([0, 0, 1], axis=1)\n warn = FutureWarning if transformation_func == "fillna" else None\n msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n result = gb.transform(transformation_func, *args)\n msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n expected = df.T.groupby([0, 0, 1]).transform(transformation_func, *args).T\n\n if transformation_func in ["diff", "shift"]:\n # Result contains nans, so transpose coerces to float\n expected["b"] = expected["b"].astype("int64")\n\n # cumcount returns Series; the rest are DataFrame\n tm.assert_equal(result, expected)\n\n\ndef test_transform_axis_1_reducer(request, reduction_func):\n # GH#45715\n if reduction_func in (\n "corrwith",\n "ngroup",\n "nth",\n ):\n marker = pytest.mark.xfail(reason="transform incorrectly fails - GH#45986")\n request.applymarker(marker)\n\n df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"])\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby([0, 0, 1], axis=1)\n\n result = gb.transform(reduction_func)\n expected = df.T.groupby([0, 0, 1]).transform(reduction_func).T\n tm.assert_equal(result, expected)\n\n\ndef test_transform_axis_ts(tsframe):\n # make sure that we are setting the axes\n # correctly when on axis=0 or 1\n # in the presence of a non-monotonic indexer\n # GH12713\n\n base = tsframe.iloc[0:5]\n r = len(base.index)\n c = len(base.columns)\n tso = DataFrame(\n np.random.default_rng(2).standard_normal((r, c)),\n index=base.index,\n columns=base.columns,\n dtype="float64",\n )\n # monotonic\n ts = tso\n grouped = ts.groupby(lambda x: x.weekday(), group_keys=False)\n result = ts - grouped.transform("mean")\n expected = grouped.apply(lambda x: x - x.mean(axis=0))\n tm.assert_frame_equal(result, expected)\n\n ts = ts.T\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouped = ts.groupby(lambda x: x.weekday(), axis=1, group_keys=False)\n result = ts - grouped.transform("mean")\n expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)\n tm.assert_frame_equal(result, expected)\n\n # non-monotonic\n ts = tso.iloc[[1, 0] + list(range(2, len(base)))]\n grouped = ts.groupby(lambda x: x.weekday(), group_keys=False)\n result = ts - grouped.transform("mean")\n expected = grouped.apply(lambda x: x - x.mean(axis=0))\n tm.assert_frame_equal(result, expected)\n\n ts = ts.T\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouped = ts.groupby(lambda x: x.weekday(), axis=1, group_keys=False)\n result = ts - grouped.transform("mean")\n expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_dtype():\n # GH 9807\n # Check transform dtype output is preserved\n df = DataFrame([[1, 3], [2, 3]])\n result = df.groupby(1).transform("mean")\n expected = DataFrame([[1.5], [1.5]])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_bug():\n # GH 5712\n # transforming on a datetime column\n df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)})\n result = df.groupby("A")["B"].transform(lambda x: x.rank(ascending=False))\n expected = Series(np.arange(5, 0, step=-1), name="B", dtype="float64")\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform_numeric_to_boolean():\n # GH 16875\n # inconsistency in transforming boolean values\n expected = Series([True, True], name="A")\n\n df = DataFrame({"A": [1.1, 2.2], "B": [1, 2]})\n result = df.groupby("B").A.transform(lambda x: True)\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"A": [1, 2], "B": [1, 2]})\n result = df.groupby("B").A.transform(lambda x: True)\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform_datetime_to_timedelta():\n # GH 15429\n # transforming a datetime to timedelta\n df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)})\n expected = Series(\n Timestamp("20130101") - Timestamp("20130101"), index=range(5), name="A"\n )\n\n # this does date math without changing result type in transform\n base_time = df["A"][0]\n result = (\n df.groupby("A")["A"].transform(lambda x: x.max() - x.min() + base_time)\n - base_time\n )\n tm.assert_series_equal(result, expected)\n\n # this does date math and causes the transform to return timedelta\n result = df.groupby("A")["A"].transform(lambda x: x.max() - x.min())\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform_datetime_to_numeric():\n # GH 10972\n # convert dt to float\n df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")})\n result = df.groupby("a").b.transform(\n lambda x: x.dt.dayofweek - x.dt.dayofweek.mean()\n )\n\n expected = Series([-0.5, 0.5], name="b")\n tm.assert_series_equal(result, expected)\n\n # convert dt to int\n df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")})\n result = df.groupby("a").b.transform(\n lambda x: x.dt.dayofweek - x.dt.dayofweek.min()\n )\n\n expected = Series([0, 1], dtype=np.int32, name="b")\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform_casting():\n # 13046\n times = [\n "13:43:27",\n "14:26:19",\n "14:29:01",\n "18:39:34",\n "18:40:18",\n "18:44:30",\n "18:46:00",\n "18:52:15",\n "18:59:59",\n "19:17:48",\n "19:21:38",\n ]\n df = DataFrame(\n {\n "A": [f"B-{i}" for i in range(11)],\n "ID3": np.take(\n ["a", "b", "c", "d", "e"], [0, 1, 2, 1, 3, 1, 1, 1, 4, 1, 1]\n ),\n "DATETIME": pd.to_datetime([f"2014-10-08 {time}" for time in times]),\n },\n index=pd.RangeIndex(11, name="idx"),\n )\n\n result = df.groupby("ID3")["DATETIME"].transform(lambda x: x.diff())\n assert lib.is_np_dtype(result.dtype, "m")\n\n result = df[["ID3", "DATETIME"]].groupby("ID3").transform(lambda x: x.diff())\n assert lib.is_np_dtype(result.DATETIME.dtype, "m")\n\n\ndef test_transform_multiple(ts):\n grouped = ts.groupby([lambda x: x.year, lambda x: x.month])\n\n grouped.transform(lambda x: x * 2)\n\n msg = "using SeriesGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n grouped.transform(np.mean)\n\n\ndef test_dispatch_transform(tsframe):\n df = tsframe[::5].reindex(tsframe.index)\n\n grouped = df.groupby(lambda x: x.month)\n\n msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n filled = grouped.fillna(method="pad")\n msg = "Series.fillna with 'method' is deprecated"\n fillit = lambda x: x.fillna(method="pad")\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df.groupby(lambda x: x.month).transform(fillit)\n tm.assert_frame_equal(filled, expected)\n\n\ndef test_transform_fillna_null():\n df = DataFrame(\n {\n "price": [10, 10, 20, 20, 30, 30],\n "color": [10, 10, 20, 20, 30, 30],\n "cost": (100, 200, 300, 400, 500, 600),\n }\n )\n msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with pytest.raises(ValueError, match="Must specify a fill 'value' or 'method'"):\n df.groupby(["price"]).transform("fillna")\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with pytest.raises(ValueError, match="Must specify a fill 'value' or 'method'"):\n df.groupby(["price"]).fillna()\n\n\ndef test_transform_transformation_func(transformation_func):\n # GH 30918\n df = DataFrame(\n {\n "A": ["foo", "foo", "foo", "foo", "bar", "bar", "baz"],\n "B": [1, 2, np.nan, 3, 3, np.nan, 4],\n },\n index=date_range("2020-01-01", "2020-01-07"),\n )\n if transformation_func == "cumcount":\n test_op = lambda x: x.transform("cumcount")\n mock_op = lambda x: Series(range(len(x)), x.index)\n elif transformation_func == "fillna":\n test_op = lambda x: x.transform("fillna", value=0)\n mock_op = lambda x: x.fillna(value=0)\n elif transformation_func == "ngroup":\n test_op = lambda x: x.transform("ngroup")\n counter = -1\n\n def mock_op(x):\n nonlocal counter\n counter += 1\n return Series(counter, index=x.index)\n\n else:\n test_op = lambda x: x.transform(transformation_func)\n mock_op = lambda x: getattr(x, transformation_func)()\n\n if transformation_func == "pct_change":\n msg = "The default fill_method='pad' in DataFrame.pct_change is deprecated"\n groupby_msg = (\n "The default fill_method='ffill' in DataFrameGroupBy.pct_change "\n "is deprecated"\n )\n warn = FutureWarning\n groupby_warn = FutureWarning\n elif transformation_func == "fillna":\n msg = ""\n groupby_msg = "DataFrameGroupBy.fillna is deprecated"\n warn = None\n groupby_warn = FutureWarning\n else:\n msg = groupby_msg = ""\n warn = groupby_warn = None\n\n with tm.assert_produces_warning(groupby_warn, match=groupby_msg):\n result = test_op(df.groupby("A"))\n\n # pass the group in same order as iterating `for ... in df.groupby(...)`\n # but reorder to match df's index since this is a transform\n groups = [df[["B"]].iloc[4:6], df[["B"]].iloc[6:], df[["B"]].iloc[:4]]\n with tm.assert_produces_warning(warn, match=msg):\n expected = concat([mock_op(g) for g in groups]).sort_index()\n # sort_index does not preserve the freq\n expected = expected.set_axis(df.index)\n\n if transformation_func in ("cumcount", "ngroup"):\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_select_columns(df):\n f = lambda x: x.mean()\n result = df.groupby("A")[["C", "D"]].transform(f)\n\n selection = df[["C", "D"]]\n expected = selection.groupby(df["A"]).transform(f)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_nuisance_raises(df, using_infer_string):\n # case that goes through _transform_item_by_item\n\n df.columns = ["A", "B", "B", "D"]\n\n # this also tests orderings in transform between\n # series/frame to make sure it's consistent\n grouped = df.groupby("A")\n\n gbc = grouped["B"]\n msg = "Could not convert"\n if using_infer_string:\n msg = "Cannot perform reduction 'mean' with string dtype"\n with pytest.raises(TypeError, match=msg):\n gbc.transform(lambda x: np.mean(x))\n\n with pytest.raises(TypeError, match=msg):\n df.groupby("A").transform(lambda x: np.mean(x))\n\n\ndef test_transform_function_aliases(df):\n result = df.groupby("A").transform("mean", numeric_only=True)\n msg = "using DataFrameGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df.groupby("A")[["C", "D"]].transform(np.mean)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("A")["C"].transform("mean")\n msg = "using SeriesGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = df.groupby("A")["C"].transform(np.mean)\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_fast_transform_date():\n # GH 13191\n df = DataFrame(\n {"grouping": [np.nan, 1, 1, 3], "d": date_range("2014-1-1", "2014-1-4")}\n )\n result = df.groupby("grouping")["d"].transform("first")\n dates = [\n pd.NaT,\n Timestamp("2014-1-2"),\n Timestamp("2014-1-2"),\n Timestamp("2014-1-4"),\n ]\n expected = Series(dates, name="d", dtype="M8[ns]")\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform_length():\n # GH 9697\n df = DataFrame({"col1": [1, 1, 2, 2], "col2": [1, 2, 3, np.nan]})\n expected = Series([3.0] * 4)\n\n def nsum(x):\n return np.nansum(x)\n\n msg = "using DataFrameGroupBy.sum"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n results = [\n df.groupby("col1").transform(sum)["col2"],\n df.groupby("col1")["col2"].transform(sum),\n df.groupby("col1").transform(nsum)["col2"],\n df.groupby("col1")["col2"].transform(nsum),\n ]\n for result in results:\n tm.assert_series_equal(result, expected, check_names=False)\n\n\ndef test_transform_coercion():\n # 14457\n # when we are transforming be sure to not coerce\n # via assignment\n df = DataFrame({"A": ["a", "a", "b", "b"], "B": [0, 1, 3, 4]})\n g = df.groupby("A")\n\n msg = "using DataFrameGroupBy.mean"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.transform(np.mean)\n\n result = g.transform(lambda x: np.mean(x, axis=0))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_transform_with_int(using_infer_string):\n # GH 3740, make sure that we might upcast on item-by-item transform\n\n # floats\n df = DataFrame(\n {\n "A": [1, 1, 1, 2, 2, 2],\n "B": Series(1, dtype="float64"),\n "C": Series([1, 2, 3, 1, 2, 3], dtype="float64"),\n "D": "foo",\n }\n )\n with np.errstate(all="ignore"):\n result = df.groupby("A")[["B", "C"]].transform(\n lambda x: (x - x.mean()) / x.std()\n )\n expected = DataFrame(\n {"B": np.nan, "C": Series([-1, 0, 1, -1, 0, 1], dtype="float64")}\n )\n tm.assert_frame_equal(result, expected)\n\n # int case\n df = DataFrame(\n {\n "A": [1, 1, 1, 2, 2, 2],\n "B": 1,\n "C": [1, 2, 3, 1, 2, 3],\n "D": "foo",\n }\n )\n msg = "Could not convert"\n if using_infer_string:\n msg = "Cannot perform reduction 'mean' with string dtype"\n with np.errstate(all="ignore"):\n with pytest.raises(TypeError, match=msg):\n df.groupby("A").transform(lambda x: (x - x.mean()) / x.std())\n result = df.groupby("A")[["B", "C"]].transform(\n lambda x: (x - x.mean()) / x.std()\n )\n expected = DataFrame({"B": np.nan, "C": [-1.0, 0.0, 1.0, -1.0, 0.0, 1.0]})\n tm.assert_frame_equal(result, expected)\n\n # int that needs float conversion\n s = Series([2, 3, 4, 10, 5, -1])\n df = DataFrame({"A": [1, 1, 1, 2, 2, 2], "B": 1, "C": s, "D": "foo"})\n with np.errstate(all="ignore"):\n with pytest.raises(TypeError, match=msg):\n df.groupby("A").transform(lambda x: (x - x.mean()) / x.std())\n result = df.groupby("A")[["B", "C"]].transform(\n lambda x: (x - x.mean()) / x.std()\n )\n\n s1 = s.iloc[0:3]\n s1 = (s1 - s1.mean()) / s1.std()\n s2 = s.iloc[3:6]\n s2 = (s2 - s2.mean()) / s2.std()\n expected = DataFrame({"B": np.nan, "C": concat([s1, s2])})\n tm.assert_frame_equal(result, expected)\n\n # int doesn't get downcasted\n result = df.groupby("A")[["B", "C"]].transform(lambda x: x * 2 / 2)\n expected = DataFrame({"B": 1.0, "C": [2.0, 3.0, 4.0, 10.0, 5.0, -1.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_transform_with_nan_group():\n # GH 9941\n df = DataFrame({"a": range(10), "b": [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})\n msg = "using SeriesGroupBy.max"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.groupby(df.b)["a"].transform(max)\n expected = Series([1.0, 1.0, 2.0, 3.0, np.nan, 6.0, 6.0, 9.0, 9.0, 9.0], name="a")\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform_mixed_type():\n index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]])\n df = DataFrame(\n {\n "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],\n "c": np.tile(["a", "b", "c"], 2),\n "v": np.arange(1.0, 7.0),\n },\n index=index,\n )\n\n def f(group):\n group["g"] = group["d"] * 2\n return group[:1]\n\n grouped = df.groupby("c")\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = grouped.apply(f)\n\n assert result["d"].dtype == np.float64\n\n # this is by definition a mutating operation!\n with pd.option_context("mode.chained_assignment", None):\n for key, group in grouped:\n res = f(group)\n tm.assert_frame_equal(res, result.loc[key])\n\n\n@pytest.mark.parametrize(\n "op, args, targop",\n [\n ("cumprod", (), lambda x: x.cumprod()),\n ("cumsum", (), lambda x: x.cumsum()),\n ("shift", (-1,), lambda x: x.shift(-1)),\n ("shift", (1,), lambda x: x.shift()),\n ],\n)\ndef test_cython_transform_series(op, args, targop):\n # GH 4095\n s = Series(np.random.default_rng(2).standard_normal(1000))\n s_missing = s.copy()\n s_missing.iloc[2:10] = np.nan\n labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float)\n\n # series\n for data in [s, s_missing]:\n # print(data.head())\n expected = data.groupby(labels).transform(targop)\n\n tm.assert_series_equal(expected, data.groupby(labels).transform(op, *args))\n tm.assert_series_equal(expected, getattr(data.groupby(labels), op)(*args))\n\n\n@pytest.mark.parametrize("op", ["cumprod", "cumsum"])\n@pytest.mark.parametrize("skipna", [False, True])\n@pytest.mark.parametrize(\n "input, exp",\n [\n # When everything is NaN\n ({"key": ["b"] * 10, "value": np.nan}, Series([np.nan] * 10, name="value")),\n # When there is a single NaN\n (\n {"key": ["b"] * 10 + ["a"] * 2, "value": [3] * 3 + [np.nan] + [3] * 8},\n {\n ("cumprod", False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],\n ("cumprod", True): [\n 3.0,\n 9.0,\n 27.0,\n np.nan,\n 81.0,\n 243.0,\n 729.0,\n 2187.0,\n 6561.0,\n 19683.0,\n 3.0,\n 9.0,\n ],\n ("cumsum", False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],\n ("cumsum", True): [\n 3.0,\n 6.0,\n 9.0,\n np.nan,\n 12.0,\n 15.0,\n 18.0,\n 21.0,\n 24.0,\n 27.0,\n 3.0,\n 6.0,\n ],\n },\n ),\n ],\n)\ndef test_groupby_cum_skipna(op, skipna, input, exp):\n df = DataFrame(input)\n result = df.groupby("key")["value"].transform(op, skipna=skipna)\n if isinstance(exp, dict):\n expected = exp[(op, skipna)]\n else:\n expected = exp\n expected = Series(expected, name="value")\n tm.assert_series_equal(expected, result)\n\n\n@pytest.fixture\ndef frame():\n floating = Series(np.random.default_rng(2).standard_normal(10))\n floating_missing = floating.copy()\n floating_missing.iloc[2:7] = np.nan\n strings = list("abcde") * 2\n strings_missing = strings[:]\n strings_missing[5] = np.nan\n\n df = DataFrame(\n {\n "float": floating,\n "float_missing": floating_missing,\n "int": [1, 1, 1, 1, 2] * 2,\n "datetime": date_range("1990-1-1", periods=10),\n "timedelta": pd.timedelta_range(1, freq="s", periods=10),\n "string": strings,\n "string_missing": strings_missing,\n "cat": Categorical(strings),\n },\n )\n return df\n\n\n@pytest.fixture\ndef frame_mi(frame):\n frame.index = MultiIndex.from_product([range(5), range(2)])\n return frame\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\n "op, args, targop",\n [\n ("cumprod", (), lambda x: x.cumprod()),\n ("cumsum", (), lambda x: x.cumsum()),\n ("shift", (-1,), lambda x: x.shift(-1)),\n ("shift", (1,), lambda x: x.shift()),\n ],\n)\n@pytest.mark.parametrize("df_fix", ["frame", "frame_mi"])\n@pytest.mark.parametrize(\n "gb_target",\n [\n {"by": np.random.default_rng(2).integers(0, 50, size=10).astype(float)},\n {"level": 0},\n {"by": "string"},\n pytest.param({"by": "string_missing"}, marks=pytest.mark.xfail),\n {"by": ["int", "string"]},\n ],\n)\ndef test_cython_transform_frame(request, op, args, targop, df_fix, gb_target):\n df = request.getfixturevalue(df_fix)\n gb = df.groupby(group_keys=False, **gb_target)\n\n if op != "shift" and "int" not in gb_target:\n # numeric apply fastpath promotes dtype so have\n # to apply separately and concat\n i = gb[["int"]].apply(targop)\n f = gb[["float", "float_missing"]].apply(targop)\n expected = concat([f, i], axis=1)\n else:\n if op != "shift" or not isinstance(gb_target.get("by"), (str, list)):\n warn = None\n else:\n warn = FutureWarning\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(warn, match=msg):\n expected = gb.apply(targop)\n\n expected = expected.sort_index(axis=1)\n if op == "shift":\n depr_msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n expected["string_missing"] = expected["string_missing"].fillna(\n np.nan, downcast=False\n )\n expected["string"] = expected["string"].fillna(np.nan, downcast=False)\n\n result = gb[expected.columns].transform(op, *args).sort_index(axis=1)\n tm.assert_frame_equal(result, expected)\n result = getattr(gb[expected.columns], op)(*args).sort_index(axis=1)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\n "op, args, targop",\n [\n ("cumprod", (), lambda x: x.cumprod()),\n ("cumsum", (), lambda x: x.cumsum()),\n ("shift", (-1,), lambda x: x.shift(-1)),\n ("shift", (1,), lambda x: x.shift()),\n ],\n)\n@pytest.mark.parametrize("df_fix", ["frame", "frame_mi"])\n@pytest.mark.parametrize(\n "gb_target",\n [\n {"by": np.random.default_rng(2).integers(0, 50, size=10).astype(float)},\n {"level": 0},\n {"by": "string"},\n # TODO: create xfail condition given other params\n # {"by": 'string_missing'},\n {"by": ["int", "string"]},\n ],\n)\n@pytest.mark.parametrize(\n "column",\n [\n "float",\n "float_missing",\n "int",\n "datetime",\n "timedelta",\n "string",\n "string_missing",\n ],\n)\ndef test_cython_transform_frame_column(\n request, op, args, targop, df_fix, gb_target, column\n):\n df = request.getfixturevalue(df_fix)\n gb = df.groupby(group_keys=False, **gb_target)\n c = column\n if (\n c not in ["float", "int", "float_missing"]\n and op != "shift"\n and not (c == "timedelta" and op == "cumsum")\n ):\n msg = "|".join(\n [\n "does not support .* operations",\n ".* is not supported for object dtype",\n "is not implemented for this dtype",\n ".* is not supported for str dtype",\n "dtype 'str' does not support operation '.*'",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n gb[c].transform(op)\n with pytest.raises(TypeError, match=msg):\n getattr(gb[c], op)()\n else:\n expected = gb[c].apply(targop)\n expected.name = c\n if c in ["string_missing", "string"]:\n depr_msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n expected = expected.fillna(np.nan, downcast=False)\n\n res = gb[c].transform(op, *args)\n tm.assert_series_equal(expected, res)\n res2 = getattr(gb[c], op)(*args)\n tm.assert_series_equal(expected, res2)\n\n\ndef test_transform_with_non_scalar_group():\n # GH 10165\n cols = MultiIndex.from_tuples(\n [\n ("syn", "A"),\n ("foo", "A"),\n ("non", "A"),\n ("syn", "C"),\n ("foo", "C"),\n ("non", "C"),\n ("syn", "T"),\n ("foo", "T"),\n ("non", "T"),\n ("syn", "G"),\n ("foo", "G"),\n ("non", "G"),\n ]\n )\n df = DataFrame(\n np.random.default_rng(2).integers(1, 10, (4, 12)),\n columns=cols,\n index=["A", "C", "G", "T"],\n )\n\n msg = "DataFrame.groupby with axis=1 is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n gb = df.groupby(axis=1, level=1)\n msg = "transform must return a scalar value for each group.*"\n with pytest.raises(ValueError, match=msg):\n gb.transform(lambda z: z.div(z.sum(axis=1), axis=0))\n\n\n@pytest.mark.parametrize(\n "cols,expected",\n [\n ("a", Series([1, 1, 1], name="a")),\n (\n ["a", "c"],\n DataFrame({"a": [1, 1, 1], "c": [1, 1, 1]}),\n ),\n ],\n)\n@pytest.mark.parametrize("agg_func", ["count", "rank", "size"])\ndef test_transform_numeric_ret(cols, expected, agg_func):\n # GH#19200 and GH#27469\n df = DataFrame(\n {"a": date_range("2018-01-01", periods=3), "b": range(3), "c": range(7, 10)}\n )\n result = df.groupby("b")[cols].transform(agg_func)\n\n if agg_func == "rank":\n expected = expected.astype("float")\n elif agg_func == "size" and cols == ["a", "c"]:\n # transform("size") returns a Series\n expected = expected["a"].rename(None)\n tm.assert_equal(result, expected)\n\n\ndef test_transform_ffill():\n # GH 24211\n data = [["a", 0.0], ["a", float("nan")], ["b", 1.0], ["b", float("nan")]]\n df = DataFrame(data, columns=["key", "values"])\n result = df.groupby("key").transform("ffill")\n expected = DataFrame({"values": [0.0, 0.0, 1.0, 1.0]})\n tm.assert_frame_equal(result, expected)\n result = df.groupby("key")["values"].transform("ffill")\n expected = Series([0.0, 0.0, 1.0, 1.0], name="values")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("mix_groupings", [True, False])\n@pytest.mark.parametrize("as_series", [True, False])\n@pytest.mark.parametrize("val1,val2", [("foo", "bar"), (1, 2), (1.0, 2.0)])\n@pytest.mark.parametrize(\n "fill_method,limit,exp_vals",\n [\n (\n "ffill",\n None,\n [np.nan, np.nan, "val1", "val1", "val1", "val2", "val2", "val2"],\n ),\n ("ffill", 1, [np.nan, np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan]),\n (\n "bfill",\n None,\n ["val1", "val1", "val1", "val2", "val2", "val2", np.nan, np.nan],\n ),\n ("bfill", 1, [np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan, np.nan]),\n ],\n)\ndef test_group_fill_methods(\n mix_groupings, as_series, val1, val2, fill_method, limit, exp_vals\n):\n vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]\n _exp_vals = list(exp_vals)\n # Overwrite placeholder values\n for index, exp_val in enumerate(_exp_vals):\n if exp_val == "val1":\n _exp_vals[index] = val1\n elif exp_val == "val2":\n _exp_vals[index] = val2\n\n # Need to modify values and expectations depending on the\n # Series / DataFrame that we ultimately want to generate\n if mix_groupings: # ['a', 'b', 'a, 'b', ...]\n keys = ["a", "b"] * len(vals)\n\n def interweave(list_obj):\n temp = []\n for x in list_obj:\n temp.extend([x, x])\n\n return temp\n\n _exp_vals = interweave(_exp_vals)\n vals = interweave(vals)\n else: # ['a', 'a', 'a', ... 'b', 'b', 'b']\n keys = ["a"] * len(vals) + ["b"] * len(vals)\n _exp_vals = _exp_vals * 2\n vals = vals * 2\n\n df = DataFrame({"key": keys, "val": vals})\n if as_series:\n result = getattr(df.groupby("key")["val"], fill_method)(limit=limit)\n exp = Series(_exp_vals, name="val")\n tm.assert_series_equal(result, exp)\n else:\n result = getattr(df.groupby("key"), fill_method)(limit=limit)\n exp = DataFrame({"val": _exp_vals})\n tm.assert_frame_equal(result, exp)\n\n\n@pytest.mark.parametrize("fill_method", ["ffill", "bfill"])\ndef test_pad_stable_sorting(fill_method):\n # GH 21207\n x = [0] * 20\n y = [np.nan] * 10 + [1] * 10\n\n if fill_method == "bfill":\n y = y[::-1]\n\n df = DataFrame({"x": x, "y": y})\n expected = df.drop("x", axis=1)\n\n result = getattr(df.groupby("x"), fill_method)()\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "freq",\n [\n None,\n pytest.param(\n "D",\n marks=pytest.mark.xfail(\n reason="GH#23918 before method uses freq in vectorized approach"\n ),\n ),\n ],\n)\n@pytest.mark.parametrize("periods", [1, -1])\n@pytest.mark.parametrize("fill_method", ["ffill", "bfill", None])\n@pytest.mark.parametrize("limit", [None, 1])\ndef test_pct_change(frame_or_series, freq, periods, fill_method, limit):\n # GH 21200, 21621, 30463\n vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4]\n keys = ["a", "b"]\n key_v = np.repeat(keys, len(vals))\n df = DataFrame({"key": key_v, "vals": vals * 2})\n\n df_g = df\n if fill_method is not None:\n df_g = getattr(df.groupby("key"), fill_method)(limit=limit)\n grp = df_g.groupby(df.key)\n\n expected = grp["vals"].obj / grp["vals"].shift(periods) - 1\n\n gb = df.groupby("key")\n\n if frame_or_series is Series:\n gb = gb["vals"]\n else:\n expected = expected.to_frame("vals")\n\n msg = (\n "The 'fill_method' keyword being not None and the 'limit' keyword in "\n f"{type(gb).__name__}.pct_change are deprecated"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = gb.pct_change(\n periods=periods, fill_method=fill_method, limit=limit, freq=freq\n )\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func, expected_status",\n [\n ("ffill", ["shrt", "shrt", "lng", np.nan, "shrt", "ntrl", "ntrl"]),\n ("bfill", ["shrt", "lng", "lng", "shrt", "shrt", "ntrl", np.nan]),\n ],\n)\ndef test_ffill_bfill_non_unique_multilevel(func, expected_status):\n # GH 19437\n date = pd.to_datetime(\n [\n "2018-01-01",\n "2018-01-01",\n "2018-01-01",\n "2018-01-01",\n "2018-01-02",\n "2018-01-01",\n "2018-01-02",\n ]\n )\n symbol = ["MSFT", "MSFT", "MSFT", "AAPL", "AAPL", "TSLA", "TSLA"]\n status = ["shrt", np.nan, "lng", np.nan, "shrt", "ntrl", np.nan]\n\n df = DataFrame({"date": date, "symbol": symbol, "status": status})\n df = df.set_index(["date", "symbol"])\n result = getattr(df.groupby("symbol")["status"], func)()\n\n index = MultiIndex.from_tuples(\n tuples=list(zip(*[date, symbol])), names=["date", "symbol"]\n )\n expected = Series(expected_status, index=index, name="status")\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", [np.any, np.all])\ndef test_any_all_np_func(func):\n # GH 20653\n df = DataFrame(\n [["foo", True], [np.nan, True], ["foo", True]], columns=["key", "val"]\n )\n\n exp = Series([True, np.nan, True], name="val")\n\n msg = "using SeriesGroupBy.[any|all]"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = df.groupby("key")["val"].transform(func)\n tm.assert_series_equal(res, exp)\n\n\ndef test_groupby_transform_rename():\n # https://github.com/pandas-dev/pandas/issues/23461\n def demean_rename(x):\n result = x - x.mean()\n\n if isinstance(x, Series):\n return result\n\n result = result.rename(columns={c: f"{c}_demeaned" for c in result.columns})\n\n return result\n\n df = DataFrame({"group": list("ababa"), "value": [1, 1, 1, 2, 2]})\n expected = DataFrame({"value": [-1.0 / 3, -0.5, -1.0 / 3, 0.5, 2.0 / 3]})\n\n result = df.groupby("group").transform(demean_rename)\n tm.assert_frame_equal(result, expected)\n result_single = df.groupby("group").value.transform(demean_rename)\n tm.assert_series_equal(result_single, expected["value"])\n\n\n@pytest.mark.parametrize("func", [min, max, np.min, np.max, "first", "last"])\ndef test_groupby_transform_timezone_column(func):\n # GH 24198\n ts = pd.to_datetime("now", utc=True).tz_convert("Asia/Singapore")\n result = DataFrame({"end_time": [ts], "id": [1]})\n warn = FutureWarning if not isinstance(func, str) else None\n msg = "using SeriesGroupBy.[min|max]"\n with tm.assert_produces_warning(warn, match=msg):\n result["max_end_time"] = result.groupby("id").end_time.transform(func)\n expected = DataFrame([[ts, 1, ts]], columns=["end_time", "id", "max_end_time"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func, values",\n [\n ("idxmin", ["1/1/2011"] * 2 + ["1/3/2011"] * 7 + ["1/10/2011"]),\n ("idxmax", ["1/2/2011"] * 2 + ["1/9/2011"] * 7 + ["1/10/2011"]),\n ],\n)\ndef test_groupby_transform_with_datetimes(func, values):\n # GH 15306\n dates = date_range("1/1/2011", periods=10, freq="D")\n\n stocks = DataFrame({"price": np.arange(10.0)}, index=dates)\n stocks["week_id"] = dates.isocalendar().week\n\n result = stocks.groupby(stocks["week_id"])["price"].transform(func)\n\n expected = Series(\n data=pd.to_datetime(values).as_unit("ns"), index=dates, name="price"\n )\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_transform_dtype():\n # GH 22243\n df = DataFrame({"a": [1], "val": [1.35]})\n\n result = df["val"].transform(lambda x: x.map(lambda y: f"+{y}"))\n expected1 = Series(["+1.35"], name="val")\n tm.assert_series_equal(result, expected1)\n\n result = df.groupby("a")["val"].transform(lambda x: x.map(lambda y: f"+{y}"))\n tm.assert_series_equal(result, expected1)\n\n result = df.groupby("a")["val"].transform(lambda x: x.map(lambda y: f"+({y})"))\n expected2 = Series(["+(1.35)"], name="val")\n tm.assert_series_equal(result, expected2)\n\n df["val"] = df["val"].astype(object)\n result = df.groupby("a")["val"].transform(lambda x: x.map(lambda y: f"+{y}"))\n tm.assert_series_equal(result, expected1)\n\n\n@pytest.mark.parametrize("func", ["cumsum", "cumprod", "cummin", "cummax"])\ndef test_transform_absent_categories(func):\n # GH 16771\n # cython transforms with more groups than rows\n x_vals = [1]\n x_cats = range(2)\n y = [1]\n df = DataFrame({"x": Categorical(x_vals, x_cats), "y": y})\n result = getattr(df.y.groupby(df.x, observed=False), func)()\n expected = df.y\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["ffill", "bfill", "shift"])\n@pytest.mark.parametrize("key, val", [("level", 0), ("by", Series([0]))])\ndef test_ffill_not_in_axis(func, key, val):\n # GH 21521\n df = DataFrame([[np.nan]])\n result = getattr(df.groupby(**{key: val}), func)()\n expected = df\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_invalid_name_raises():\n # GH#27486\n df = DataFrame({"a": [0, 1, 1, 2]})\n g = df.groupby(["a", "b", "b", "c"])\n with pytest.raises(ValueError, match="not a valid function name"):\n g.transform("some_arbitrary_name")\n\n # method exists on the object, but is not a valid transformation/agg\n assert hasattr(g, "aggregate") # make sure the method exists\n with pytest.raises(ValueError, match="not a valid function name"):\n g.transform("aggregate")\n\n # Test SeriesGroupBy\n g = df["a"].groupby(["a", "b", "b", "c"])\n with pytest.raises(ValueError, match="not a valid function name"):\n g.transform("some_arbitrary_name")\n\n\ndef test_transform_agg_by_name(request, reduction_func, frame_or_series):\n func = reduction_func\n\n obj = DataFrame(\n {"a": [0, 0, 0, 1, 1, 1], "b": range(6)},\n index=["A", "B", "C", "D", "E", "F"],\n )\n if frame_or_series is Series:\n obj = obj["a"]\n\n g = obj.groupby(np.repeat([0, 1], 3))\n\n if func == "corrwith" and isinstance(obj, Series): # GH#32293\n # TODO: implement SeriesGroupBy.corrwith\n assert not hasattr(g, func)\n return\n\n args = get_groupby_method_args(reduction_func, obj)\n result = g.transform(func, *args)\n\n # this is the *definition* of a transformation\n tm.assert_index_equal(result.index, obj.index)\n\n if func not in ("ngroup", "size") and obj.ndim == 2:\n # size/ngroup return a Series, unlike other transforms\n tm.assert_index_equal(result.columns, obj.columns)\n\n # verify that values were broadcasted across each group\n assert len(set(DataFrame(result).iloc[-3:, -1])) == 1\n\n\ndef test_transform_lambda_with_datetimetz():\n # GH 27496\n df = DataFrame(\n {\n "time": [\n Timestamp("2010-07-15 03:14:45"),\n Timestamp("2010-11-19 18:47:06"),\n ],\n "timezone": ["Etc/GMT+4", "US/Eastern"],\n }\n )\n result = df.groupby(["timezone"])["time"].transform(\n lambda x: x.dt.tz_localize(x.name)\n )\n expected = Series(\n [\n Timestamp("2010-07-15 03:14:45", tz="Etc/GMT+4"),\n Timestamp("2010-11-19 18:47:06", tz="US/Eastern"),\n ],\n name="time",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform_fastpath_raises():\n # GH#29631 case where fastpath defined in groupby.generic _choose_path\n # raises, but slow_path does not\n\n df = DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]})\n gb = df.groupby("A")\n\n def func(grp):\n # we want a function such that func(frame) fails but func.apply(frame)\n # works\n if grp.ndim == 2:\n # Ensure that fast_path fails\n raise NotImplementedError("Don't cross the streams")\n return grp * 2\n\n # Check that the fastpath raises, see _transform_general\n obj = gb._obj_with_exclusions\n gen = gb._grouper.get_iterator(obj, axis=gb.axis)\n fast_path, slow_path = gb._define_paths(func)\n _, group = next(gen)\n\n with pytest.raises(NotImplementedError, match="Don't cross the streams"):\n fast_path(group)\n\n result = gb.transform(func)\n\n expected = DataFrame([2, -2, 2, 4], columns=["B"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_lambda_indexing():\n # GH 7883\n df = DataFrame(\n {\n "A": ["foo", "bar", "foo", "bar", "foo", "flux", "foo", "flux"],\n "B": ["one", "one", "two", "three", "two", "six", "five", "three"],\n "C": range(8),\n "D": range(8),\n "E": range(8),\n }\n )\n df = df.set_index(["A", "B"])\n df = df.sort_index()\n result = df.groupby(level="A").transform(lambda x: x.iloc[-1])\n expected = DataFrame(\n {\n "C": [3, 3, 7, 7, 4, 4, 4, 4],\n "D": [3, 3, 7, 7, 4, 4, 4, 4],\n "E": [3, 3, 7, 7, 4, 4, 4, 4],\n },\n index=MultiIndex.from_tuples(\n [\n ("bar", "one"),\n ("bar", "three"),\n ("flux", "six"),\n ("flux", "three"),\n ("foo", "five"),\n ("foo", "one"),\n ("foo", "two"),\n ("foo", "two"),\n ],\n names=["A", "B"],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_and_not_categorical_key(observed):\n # Checks that groupby-transform, when grouping by both a categorical\n # and a non-categorical key, doesn't try to expand the output to include\n # non-observed categories but instead matches the input shape.\n # GH 32494\n df_with_categorical = DataFrame(\n {\n "A": Categorical(["a", "b", "a"], categories=["a", "b", "c"]),\n "B": [1, 2, 3],\n "C": ["a", "b", "a"],\n }\n )\n df_without_categorical = DataFrame(\n {"A": ["a", "b", "a"], "B": [1, 2, 3], "C": ["a", "b", "a"]}\n )\n\n # DataFrame case\n result = df_with_categorical.groupby(["A", "C"], observed=observed).transform("sum")\n expected = df_without_categorical.groupby(["A", "C"]).transform("sum")\n tm.assert_frame_equal(result, expected)\n expected_explicit = DataFrame({"B": [4, 2, 4]})\n tm.assert_frame_equal(result, expected_explicit)\n\n # Series case\n result = df_with_categorical.groupby(["A", "C"], observed=observed)["B"].transform(\n "sum"\n )\n expected = df_without_categorical.groupby(["A", "C"])["B"].transform("sum")\n tm.assert_series_equal(result, expected)\n expected_explicit = Series([4, 2, 4], name="B")\n tm.assert_series_equal(result, expected_explicit)\n\n\ndef test_string_rank_grouping():\n # GH 19354\n df = DataFrame({"A": [1, 1, 2], "B": [1, 2, 3]})\n result = df.groupby("A").transform("rank")\n expected = DataFrame({"B": [1.0, 2.0, 1.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_cumcount():\n # GH 27472\n df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)})\n grp = df.groupby(np.repeat([0, 1], 3))\n\n result = grp.cumcount()\n expected = Series([0, 1, 2, 0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n result = grp.transform("cumcount")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("keys", [["A1"], ["A1", "A2"]])\ndef test_null_group_lambda_self(sort, dropna, keys):\n # GH 17093\n size = 50\n nulls1 = np.random.default_rng(2).choice([False, True], size)\n nulls2 = np.random.default_rng(2).choice([False, True], size)\n # Whether a group contains a null value or not\n nulls_grouper = nulls1 if len(keys) == 1 else nulls1 | nulls2\n\n a1 = np.random.default_rng(2).integers(0, 5, size=size).astype(float)\n a1[nulls1] = np.nan\n a2 = np.random.default_rng(2).integers(0, 5, size=size).astype(float)\n a2[nulls2] = np.nan\n values = np.random.default_rng(2).integers(0, 5, size=a1.shape)\n df = DataFrame({"A1": a1, "A2": a2, "B": values})\n\n expected_values = values\n if dropna and nulls_grouper.any():\n expected_values = expected_values.astype(float)\n expected_values[nulls_grouper] = np.nan\n expected = DataFrame(expected_values, columns=["B"])\n\n gb = df.groupby(keys, dropna=dropna, sort=sort)\n result = gb[["B"]].transform(lambda x: x)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_null_group_str_reducer(request, dropna, reduction_func):\n # GH 17093\n if reduction_func == "corrwith":\n msg = "incorrectly raises"\n request.applymarker(pytest.mark.xfail(reason=msg))\n\n index = [1, 2, 3, 4] # test transform preserves non-standard index\n df = DataFrame({"A": [1, 1, np.nan, np.nan], "B": [1, 2, 2, 3]}, index=index)\n gb = df.groupby("A", dropna=dropna)\n\n args = get_groupby_method_args(reduction_func, df)\n\n # Manually handle reducers that don't fit the generic pattern\n # Set expected with dropna=False, then replace if necessary\n if reduction_func == "first":\n expected = DataFrame({"B": [1, 1, 2, 2]}, index=index)\n elif reduction_func == "last":\n expected = DataFrame({"B": [2, 2, 3, 3]}, index=index)\n elif reduction_func == "nth":\n expected = DataFrame({"B": [1, 1, 2, 2]}, index=index)\n elif reduction_func == "size":\n expected = Series([2, 2, 2, 2], index=index)\n elif reduction_func == "corrwith":\n expected = DataFrame({"B": [1.0, 1.0, 1.0, 1.0]}, index=index)\n else:\n expected_gb = df.groupby("A", dropna=False)\n buffer = []\n for idx, group in expected_gb:\n res = getattr(group["B"], reduction_func)()\n buffer.append(Series(res, index=group.index))\n expected = concat(buffer).to_frame("B")\n if dropna:\n dtype = object if reduction_func in ("any", "all") else float\n expected = expected.astype(dtype)\n if expected.ndim == 2:\n expected.iloc[[2, 3], 0] = np.nan\n else:\n expected.iloc[[2, 3]] = np.nan\n\n result = gb.transform(reduction_func, *args)\n tm.assert_equal(result, expected)\n\n\ndef test_null_group_str_transformer(request, dropna, transformation_func):\n # GH 17093\n df = DataFrame({"A": [1, 1, np.nan], "B": [1, 2, 2]}, index=[1, 2, 3])\n args = get_groupby_method_args(transformation_func, df)\n gb = df.groupby("A", dropna=dropna)\n\n buffer = []\n for k, (idx, group) in enumerate(gb):\n if transformation_func == "cumcount":\n # DataFrame has no cumcount method\n res = DataFrame({"B": range(len(group))}, index=group.index)\n elif transformation_func == "ngroup":\n res = DataFrame(len(group) * [k], index=group.index, columns=["B"])\n else:\n res = getattr(group[["B"]], transformation_func)(*args)\n buffer.append(res)\n if dropna:\n dtype = object if transformation_func in ("any", "all") else None\n buffer.append(DataFrame([[np.nan]], index=[3], dtype=dtype, columns=["B"]))\n expected = concat(buffer)\n\n if transformation_func in ("cumcount", "ngroup"):\n # ngroup/cumcount always returns a Series as it counts the groups, not values\n expected = expected["B"].rename(None)\n\n if transformation_func == "pct_change" and not dropna:\n warn = FutureWarning\n msg = (\n "The default fill_method='ffill' in DataFrameGroupBy.pct_change "\n "is deprecated"\n )\n elif transformation_func == "fillna":\n warn = FutureWarning\n msg = "DataFrameGroupBy.fillna is deprecated"\n else:\n warn = None\n msg = ""\n with tm.assert_produces_warning(warn, match=msg):\n result = gb.transform(transformation_func, *args)\n\n tm.assert_equal(result, expected)\n\n\ndef test_null_group_str_reducer_series(request, dropna, reduction_func):\n # GH 17093\n index = [1, 2, 3, 4] # test transform preserves non-standard index\n ser = Series([1, 2, 2, 3], index=index)\n gb = ser.groupby([1, 1, np.nan, np.nan], dropna=dropna)\n\n if reduction_func == "corrwith":\n # corrwith not implemented for SeriesGroupBy\n assert not hasattr(gb, reduction_func)\n return\n\n args = get_groupby_method_args(reduction_func, ser)\n\n # Manually handle reducers that don't fit the generic pattern\n # Set expected with dropna=False, then replace if necessary\n if reduction_func == "first":\n expected = Series([1, 1, 2, 2], index=index)\n elif reduction_func == "last":\n expected = Series([2, 2, 3, 3], index=index)\n elif reduction_func == "nth":\n expected = Series([1, 1, 2, 2], index=index)\n elif reduction_func == "size":\n expected = Series([2, 2, 2, 2], index=index)\n elif reduction_func == "corrwith":\n expected = Series([1, 1, 2, 2], index=index)\n else:\n expected_gb = ser.groupby([1, 1, np.nan, np.nan], dropna=False)\n buffer = []\n for idx, group in expected_gb:\n res = getattr(group, reduction_func)()\n buffer.append(Series(res, index=group.index))\n expected = concat(buffer)\n if dropna:\n dtype = object if reduction_func in ("any", "all") else float\n expected = expected.astype(dtype)\n expected.iloc[[2, 3]] = np.nan\n\n result = gb.transform(reduction_func, *args)\n tm.assert_series_equal(result, expected)\n\n\ndef test_null_group_str_transformer_series(dropna, transformation_func):\n # GH 17093\n ser = Series([1, 2, 2], index=[1, 2, 3])\n args = get_groupby_method_args(transformation_func, ser)\n gb = ser.groupby([1, 1, np.nan], dropna=dropna)\n\n buffer = []\n for k, (idx, group) in enumerate(gb):\n if transformation_func == "cumcount":\n # Series has no cumcount method\n res = Series(range(len(group)), index=group.index)\n elif transformation_func == "ngroup":\n res = Series(k, index=group.index)\n else:\n res = getattr(group, transformation_func)(*args)\n buffer.append(res)\n if dropna:\n dtype = object if transformation_func in ("any", "all") else None\n buffer.append(Series([np.nan], index=[3], dtype=dtype))\n expected = concat(buffer)\n\n warn = FutureWarning if transformation_func == "fillna" else None\n msg = "SeriesGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n result = gb.transform(transformation_func, *args)\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func, expected_values",\n [\n (Series.sort_values, [5, 4, 3, 2, 1]),\n (lambda x: x.head(1), [5.0, np.nan, 3, 2, np.nan]),\n ],\n)\n@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])\n@pytest.mark.parametrize("keys_in_index", [True, False])\ndef test_transform_aligns(func, frame_or_series, expected_values, keys, keys_in_index):\n # GH#45648 - transform should align with the input's index\n df = DataFrame({"a1": [1, 1, 3, 2, 2], "b": [5, 4, 3, 2, 1]})\n if "a2" in keys:\n df["a2"] = df["a1"]\n if keys_in_index:\n df = df.set_index(keys, append=True)\n\n gb = df.groupby(keys)\n if frame_or_series is Series:\n gb = gb["b"]\n\n result = gb.transform(func)\n expected = DataFrame({"b": expected_values}, index=df.index)\n if frame_or_series is Series:\n expected = expected["b"]\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("keys", ["A", ["A", "B"]])\ndef test_as_index_no_change(keys, df, groupby_func):\n # GH#49834 - as_index should have no impact on DataFrameGroupBy.transform\n if keys == "A":\n # Column B is string dtype; will fail on some ops\n df = df.drop(columns="B")\n args = get_groupby_method_args(groupby_func, df)\n gb_as_index_true = df.groupby(keys, as_index=True)\n gb_as_index_false = df.groupby(keys, as_index=False)\n warn = FutureWarning if groupby_func == "fillna" else None\n msg = "DataFrameGroupBy.fillna is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n result = gb_as_index_true.transform(groupby_func, *args)\n with tm.assert_produces_warning(warn, match=msg):\n expected = gb_as_index_false.transform(groupby_func, *args)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("how", ["idxmax", "idxmin"])\n@pytest.mark.parametrize("numeric_only", [True, False])\ndef test_idxmin_idxmax_transform_args(how, skipna, numeric_only):\n # GH#55268 - ensure *args are passed through when calling transform\n df = DataFrame({"a": [1, 1, 1, 2], "b": [3.0, 4.0, np.nan, 6.0], "c": list("abcd")})\n gb = df.groupby("a")\n msg = f"'axis' keyword in DataFrameGroupBy.{how} is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = gb.transform(how, 0, skipna, numeric_only)\n warn = None if skipna else FutureWarning\n msg = f"The behavior of DataFrameGroupBy.{how} with .* any-NA and skipna=False"\n with tm.assert_produces_warning(warn, match=msg):\n expected = gb.transform(how, skipna=skipna, numeric_only=numeric_only)\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\groupby\transform\test_transform.py | test_transform.py | Python | 57,512 | 0.75 | 0.091228 | 0.083101 | vue-tools | 550 | 2024-09-06T02:27:22.532231 | GPL-3.0 | true | 4355a8567a37c3d6efe416cfafc4ea58 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\transform\__pycache__\test_numba.cpython-313.pyc | test_numba.cpython-313.pyc | Other | 15,523 | 0.95 | 0 | 0.035714 | react-lib | 985 | 2025-01-30T09:51:11.244764 | MIT | true | b359896f0efd80aa98ec373cd1777081 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\transform\__pycache__\test_transform.cpython-313.pyc | test_transform.cpython-313.pyc | Other | 92,218 | 0.75 | 0.004337 | 0.026221 | awesome-app | 79 | 2025-01-21T10:22:23.000888 | BSD-3-Clause | true | c79ef63c5c2d49b3e0dcb99c25ff9903 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\transform\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 205 | 0.7 | 0 | 0 | vue-tools | 173 | 2023-12-27T11:14:48.632685 | GPL-3.0 | true | cd9bb4ae8fd14c456ae05502c7f1f6ce |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\conftest.cpython-313.pyc | conftest.cpython-313.pyc | Other | 6,553 | 0.95 | 0.088235 | 0 | python-kit | 590 | 2025-02-10T18:22:58.200975 | GPL-3.0 | true | fa897a3566cb09fbb06c399c12079eed |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_all_methods.cpython-313.pyc | test_all_methods.cpython-313.pyc | Other | 4,508 | 0.8 | 0.052632 | 0 | node-utils | 587 | 2025-02-21T20:21:23.107670 | MIT | true | 5a4bd221f807c642d525ac3dfdfade9d |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_api.cpython-313.pyc | test_api.cpython-313.pyc | Other | 7,114 | 0.8 | 0.033708 | 0.02381 | vue-tools | 29 | 2024-06-25T00:09:03.526778 | BSD-3-Clause | true | fa6ee7bb88bb2fd84fbc4ad3cf348349 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_apply.cpython-313.pyc | test_apply.cpython-313.pyc | Other | 89,837 | 0.75 | 0.000918 | 0.022814 | react-lib | 415 | 2023-11-02T15:13:37.787075 | GPL-3.0 | true | 48af50a985c48843fa587bec79681d84 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_apply_mutate.cpython-313.pyc | test_apply_mutate.cpython-313.pyc | Other | 8,058 | 0.95 | 0 | 0 | react-lib | 302 | 2025-05-18T04:47:50.066959 | GPL-3.0 | true | b96f85192b8afec62329827b5090d004 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_bin_groupby.cpython-313.pyc | test_bin_groupby.cpython-313.pyc | Other | 3,319 | 0.8 | 0 | 0.065217 | react-lib | 580 | 2024-03-27T08:39:37.555199 | GPL-3.0 | true | dc602bb98bc2bfe4e0b56286eb1bafd0 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_categorical.cpython-313.pyc | test_categorical.cpython-313.pyc | Other | 98,719 | 0.75 | 0.002939 | 0.016897 | awesome-app | 22 | 2023-12-18T23:46:02.933835 | GPL-3.0 | true | d7b23618f4baa7ea277cb3b56c91c833 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_counting.cpython-313.pyc | test_counting.cpython-313.pyc | Other | 27,025 | 0.95 | 0 | 0.020408 | react-lib | 259 | 2024-12-07T08:21:04.940043 | MIT | true | 9c7751980a04b04f8ab23a29799d4fe0 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_cumulative.cpython-313.pyc | test_cumulative.cpython-313.pyc | Other | 18,605 | 0.8 | 0.003745 | 0.059524 | vue-tools | 835 | 2024-08-10T02:21:01.884676 | BSD-3-Clause | true | 1fcf2bd8658751a17475f5d7160657ff |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_filters.cpython-313.pyc | test_filters.cpython-313.pyc | Other | 42,751 | 0.95 | 0.004926 | 0.035912 | vue-tools | 306 | 2024-05-05T23:57:27.476115 | BSD-3-Clause | true | d3a57546c9bad67db13a55bec2043699 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_groupby_dropna.cpython-313.pyc | test_groupby_dropna.cpython-313.pyc | Other | 31,115 | 0.8 | 0 | 0.002725 | python-kit | 150 | 2025-01-29T04:08:10.418853 | Apache-2.0 | true | cac8cc700fd6563363e688b4cce78da8 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_groupby_subclass.cpython-313.pyc | test_groupby_subclass.cpython-313.pyc | Other | 6,996 | 0.8 | 0.010101 | 0.042553 | node-utils | 457 | 2025-03-19T03:04:55.907652 | BSD-3-Clause | true | a22f765df498a67377ddd02271c5b2f8 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_grouping.cpython-313.pyc | test_grouping.cpython-313.pyc | Other | 72,798 | 0.75 | 0.003322 | 0.009101 | python-kit | 193 | 2025-03-21T03:13:15.500051 | BSD-3-Clause | true | 1c5181bddbaa0050756fa6474488e488 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_indexing.cpython-313.pyc | test_indexing.cpython-313.pyc | Other | 15,333 | 0.8 | 0 | 0.009217 | python-kit | 964 | 2023-08-05T23:07:13.476924 | GPL-3.0 | true | 3ac4d5921d94046f063709bb55fbff23 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_index_as_string.cpython-313.pyc | test_index_as_string.cpython-313.pyc | Other | 3,466 | 0.8 | 0 | 0.071429 | vue-tools | 963 | 2024-04-05T05:42:55.007051 | MIT | true | ca58932d99024418531d5574948fdb75 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_libgroupby.cpython-313.pyc | test_libgroupby.cpython-313.pyc | Other | 20,533 | 0.95 | 0.011236 | 0 | react-lib | 945 | 2024-02-26T14:18:23.553675 | BSD-3-Clause | true | b1b23b5c5e03993d237c5d1f3939546f |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_missing.cpython-313.pyc | test_missing.cpython-313.pyc | Other | 9,159 | 0.8 | 0 | 0.007299 | python-kit | 10 | 2024-05-10T13:13:20.867018 | GPL-3.0 | true | eb174f8184372a3d6805b9ce5a0bf165 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_numba.cpython-313.pyc | test_numba.cpython-313.pyc | Other | 5,444 | 0.95 | 0 | 0 | python-kit | 23 | 2025-03-24T07:45:50.672995 | GPL-3.0 | true | 9df30fe63f4dd2f02b530640fda6ef97 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_numeric_only.cpython-313.pyc | test_numeric_only.cpython-313.pyc | Other | 18,472 | 0.95 | 0.028926 | 0 | python-kit | 522 | 2023-12-17T11:53:07.001884 | GPL-3.0 | true | 676a10b1d03f194c8e0060f1a8a9156f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.