ZTWHHH commited on
Commit
4d9127d
·
verified ·
1 Parent(s): 261779a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so +3 -0
  3. videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.cpython-310-x86_64-linux-gnu.so +3 -0
  4. videochat2/lib/python3.10/site-packages/pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so +3 -0
  5. videochat2/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/__init__.cpython-310.pyc +0 -0
  6. videochat2/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_constructors.cpython-310.pyc +0 -0
  7. videochat2/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_cumulative.cpython-310.pyc +0 -0
  8. videochat2/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_reductions.cpython-310.pyc +0 -0
  9. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/__init__.py +25 -0
  10. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_bin_groupby.cpython-310.pyc +0 -0
  11. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_groupby.cpython-310.pyc +0 -0
  12. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__init__.py +0 -0
  13. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/__init__.cpython-310.pyc +0 -0
  14. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_aggregate.cpython-310.pyc +0 -0
  15. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_cython.cpython-310.pyc +0 -0
  16. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_numba.cpython-310.pyc +0 -0
  17. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_other.cpython-310.pyc +0 -0
  18. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py +1523 -0
  19. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_cython.py +399 -0
  20. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_numba.py +242 -0
  21. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_other.py +664 -0
  22. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/conftest.py +204 -0
  23. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_allowlist.py +326 -0
  24. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_any_all.py +189 -0
  25. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_api_consistency.py +142 -0
  26. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_apply.py +1341 -0
  27. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_apply_mutate.py +144 -0
  28. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_bin_groupby.py +65 -0
  29. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_categorical.py +2057 -0
  30. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_counting.py +377 -0
  31. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_filters.py +622 -0
  32. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_function.py +1637 -0
  33. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby.py +2837 -0
  34. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_dropna.py +684 -0
  35. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_shift_diff.py +156 -0
  36. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_subclass.py +105 -0
  37. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_grouping.py +1077 -0
  38. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_index_as_string.py +85 -0
  39. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_indexing.py +332 -0
  40. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_libgroupby.py +284 -0
  41. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_min_max.py +249 -0
  42. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_missing.py +154 -0
  43. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_nth.py +824 -0
  44. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_numba.py +73 -0
  45. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_nunique.py +197 -0
  46. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_pipe.py +80 -0
  47. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_quantile.py +470 -0
  48. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_raises.py +633 -0
  49. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_rank.py +698 -0
  50. videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_sample.py +154 -0
.gitattributes CHANGED
@@ -1266,3 +1266,6 @@ videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.cpython-3
1266
  videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1267
  videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1268
  videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/period.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
1266
  videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1267
  videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1268
  videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/period.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1269
+ videochat2/lib/python3.10/site-packages/pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1270
+ videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1271
+ videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc11828d0b7bf5167ff9ec490d0e3de327240d4d526261d9134ac9d9367bf26d
3
+ size 953312
videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:071b85b2a109cbf283646ee8b6be23f4bc6c1b10a7cbfdc287293ae8ed514338
3
+ size 231400
videochat2/lib/python3.10/site-packages/pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6bc87f3058b27f9a36b356d6c52f4384d84795253e0a6deb30355077196c556
3
+ size 391064
videochat2/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_constructors.cpython-310.pyc ADDED
Binary file (7.3 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_cumulative.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_reductions.cpython-310.pyc ADDED
Binary file (5.07 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_groupby_method_args(name, obj):
2
+ """
3
+ Get required arguments for a groupby method.
4
+
5
+ When parametrizing a test over groupby methods (e.g. "sum", "mean", "fillna"),
6
+ it is often the case that arguments are required for certain methods.
7
+
8
+ Parameters
9
+ ----------
10
+ name: str
11
+ Name of the method.
12
+ obj: Series or DataFrame
13
+ pandas object that is being grouped.
14
+
15
+ Returns
16
+ -------
17
+ A tuple of required arguments for the method.
18
+ """
19
+ if name in ("nth", "fillna", "take"):
20
+ return (0,)
21
+ if name == "quantile":
22
+ return (0.5,)
23
+ if name == "corrwith":
24
+ return (obj,)
25
+ return ()
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_bin_groupby.cpython-310.pyc ADDED
Binary file (1.95 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_groupby.cpython-310.pyc ADDED
Binary file (80.9 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_aggregate.cpython-310.pyc ADDED
Binary file (46.9 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_cython.cpython-310.pyc ADDED
Binary file (9.63 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_numba.cpython-310.pyc ADDED
Binary file (7.86 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_other.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py ADDED
@@ -0,0 +1,1523 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test .agg behavior / note that .apply is tested generally in test_groupby.py
3
+ """
4
+ import datetime
5
+ import functools
6
+ from functools import partial
7
+ import re
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ from pandas.errors import SpecificationError
13
+
14
+ from pandas.core.dtypes.common import is_integer_dtype
15
+
16
+ import pandas as pd
17
+ from pandas import (
18
+ DataFrame,
19
+ Index,
20
+ MultiIndex,
21
+ Series,
22
+ concat,
23
+ to_datetime,
24
+ )
25
+ import pandas._testing as tm
26
+ from pandas.core.groupby.grouper import Grouping
27
+
28
+
29
+ def test_groupby_agg_no_extra_calls():
30
+ # GH#31760
31
+ df = DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]})
32
+ gb = df.groupby("key")["value"]
33
+
34
+ def dummy_func(x):
35
+ assert len(x) != 0
36
+ return x.sum()
37
+
38
+ gb.agg(dummy_func)
39
+
40
+
41
+ def test_agg_regression1(tsframe):
42
+ grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
43
+ result = grouped.agg(np.mean)
44
+ expected = grouped.mean()
45
+ tm.assert_frame_equal(result, expected)
46
+
47
+
48
+ def test_agg_must_agg(df):
49
+ grouped = df.groupby("A")["C"]
50
+
51
+ msg = "Must produce aggregated value"
52
+ with pytest.raises(Exception, match=msg):
53
+ grouped.agg(lambda x: x.describe())
54
+ with pytest.raises(Exception, match=msg):
55
+ grouped.agg(lambda x: x.index[:2])
56
+
57
+
58
+ def test_agg_ser_multi_key(df):
59
+ f = lambda x: x.sum()
60
+ results = df.C.groupby([df.A, df.B]).aggregate(f)
61
+ expected = df.groupby(["A", "B"]).sum()["C"]
62
+ tm.assert_series_equal(results, expected)
63
+
64
+
65
+ def test_groupby_aggregation_mixed_dtype():
66
+ # GH 6212
67
+ expected = DataFrame(
68
+ {
69
+ "v1": [5, 5, 7, np.nan, 3, 3, 4, 1],
70
+ "v2": [55, 55, 77, np.nan, 33, 33, 44, 11],
71
+ },
72
+ index=MultiIndex.from_tuples(
73
+ [
74
+ (1, 95),
75
+ (1, 99),
76
+ (2, 95),
77
+ (2, 99),
78
+ ("big", "damp"),
79
+ ("blue", "dry"),
80
+ ("red", "red"),
81
+ ("red", "wet"),
82
+ ],
83
+ names=["by1", "by2"],
84
+ ),
85
+ )
86
+
87
+ df = DataFrame(
88
+ {
89
+ "v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
90
+ "v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
91
+ "by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
92
+ "by2": [
93
+ "wet",
94
+ "dry",
95
+ 99,
96
+ 95,
97
+ np.nan,
98
+ "damp",
99
+ 95,
100
+ 99,
101
+ "red",
102
+ 99,
103
+ np.nan,
104
+ np.nan,
105
+ ],
106
+ }
107
+ )
108
+
109
+ g = df.groupby(["by1", "by2"])
110
+ result = g[["v1", "v2"]].mean()
111
+ tm.assert_frame_equal(result, expected)
112
+
113
+
114
+ def test_groupby_aggregation_multi_level_column():
115
+ # GH 29772
116
+ lst = [
117
+ [True, True, True, False],
118
+ [True, False, np.nan, False],
119
+ [True, True, np.nan, False],
120
+ [True, True, np.nan, False],
121
+ ]
122
+ df = DataFrame(
123
+ data=lst,
124
+ columns=MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]),
125
+ )
126
+
127
+ gb = df.groupby(level=1, axis=1)
128
+ result = gb.sum(numeric_only=False)
129
+ expected = DataFrame({0: [2.0, True, True, True], 1: [1, 0, 1, 1]})
130
+
131
+ tm.assert_frame_equal(result, expected)
132
+
133
+
134
+ def test_agg_apply_corner(ts, tsframe):
135
+ # nothing to group, all NA
136
+ grouped = ts.groupby(ts * np.nan, group_keys=False)
137
+ assert ts.dtype == np.float64
138
+
139
+ # groupby float64 values results in a float64 Index
140
+ exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64))
141
+ tm.assert_series_equal(grouped.sum(), exp)
142
+ tm.assert_series_equal(grouped.agg(np.sum), exp)
143
+ tm.assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
144
+
145
+ # DataFrame
146
+ grouped = tsframe.groupby(tsframe["A"] * np.nan, group_keys=False)
147
+ exp_df = DataFrame(
148
+ columns=tsframe.columns,
149
+ dtype=float,
150
+ index=Index([], name="A", dtype=np.float64),
151
+ )
152
+ tm.assert_frame_equal(grouped.sum(), exp_df)
153
+ tm.assert_frame_equal(grouped.agg(np.sum), exp_df)
154
+ tm.assert_frame_equal(grouped.apply(np.sum), exp_df)
155
+
156
+
157
+ def test_agg_grouping_is_list_tuple(ts):
158
+ df = tm.makeTimeDataFrame()
159
+
160
+ grouped = df.groupby(lambda x: x.year)
161
+ grouper = grouped.grouper.groupings[0].grouping_vector
162
+ grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper))
163
+
164
+ result = grouped.agg(np.mean)
165
+ expected = grouped.mean()
166
+ tm.assert_frame_equal(result, expected)
167
+
168
+ grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
169
+
170
+ result = grouped.agg(np.mean)
171
+ expected = grouped.mean()
172
+ tm.assert_frame_equal(result, expected)
173
+
174
+
175
+ def test_agg_python_multiindex(mframe):
176
+ grouped = mframe.groupby(["A", "B"])
177
+
178
+ result = grouped.agg(np.mean)
179
+ expected = grouped.mean()
180
+ tm.assert_frame_equal(result, expected)
181
+
182
+
183
+ @pytest.mark.parametrize(
184
+ "groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]
185
+ )
186
+ def test_aggregate_str_func(tsframe, groupbyfunc):
187
+ grouped = tsframe.groupby(groupbyfunc)
188
+
189
+ # single series
190
+ result = grouped["A"].agg("std")
191
+ expected = grouped["A"].std()
192
+ tm.assert_series_equal(result, expected)
193
+
194
+ # group frame by function name
195
+ result = grouped.aggregate("var")
196
+ expected = grouped.var()
197
+ tm.assert_frame_equal(result, expected)
198
+
199
+ # group frame by function dict
200
+ result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"})
201
+ expected = DataFrame(
202
+ {
203
+ "A": grouped["A"].var(),
204
+ "B": grouped["B"].std(),
205
+ "C": grouped["C"].mean(),
206
+ "D": grouped["D"].sem(),
207
+ }
208
+ )
209
+ tm.assert_frame_equal(result, expected)
210
+
211
+
212
+ def test_std_masked_dtype(any_numeric_ea_dtype):
213
+ # GH#35516
214
+ df = DataFrame(
215
+ {
216
+ "a": [2, 1, 1, 1, 2, 2, 1],
217
+ "b": Series([pd.NA, 1, 2, 1, 1, 1, 2], dtype="Float64"),
218
+ }
219
+ )
220
+ result = df.groupby("a").std()
221
+ expected = DataFrame(
222
+ {"b": [0.57735, 0]}, index=Index([1, 2], name="a"), dtype="Float64"
223
+ )
224
+ tm.assert_frame_equal(result, expected)
225
+
226
+
227
+ def test_agg_str_with_kwarg_axis_1_raises(df, reduction_func):
228
+ gb = df.groupby(level=0)
229
+ if reduction_func in ("idxmax", "idxmin"):
230
+ error = TypeError
231
+ msg = "reduction operation '.*' not allowed for this dtype"
232
+ else:
233
+ error = ValueError
234
+ msg = f"Operation {reduction_func} does not support axis=1"
235
+ with pytest.raises(error, match=msg):
236
+ gb.agg(reduction_func, axis=1)
237
+
238
+
239
+ @pytest.mark.parametrize(
240
+ "func, expected, dtype, result_dtype_dict",
241
+ [
242
+ ("sum", [5, 7, 9], "int64", {}),
243
+ ("std", [4.5**0.5] * 3, int, {"i": float, "j": float, "k": float}),
244
+ ("var", [4.5] * 3, int, {"i": float, "j": float, "k": float}),
245
+ ("sum", [5, 7, 9], "Int64", {"j": "int64"}),
246
+ ("std", [4.5**0.5] * 3, "Int64", {"i": float, "j": float, "k": float}),
247
+ ("var", [4.5] * 3, "Int64", {"i": "float64", "j": "float64", "k": "float64"}),
248
+ ],
249
+ )
250
+ def test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype_dict):
251
+ # GH#43209
252
+ df = DataFrame(
253
+ [[1, 2, 3, 4, 5, 6]] * 3,
254
+ columns=MultiIndex.from_product([["a", "b"], ["i", "j", "k"]]),
255
+ ).astype({("a", "j"): dtype, ("b", "j"): dtype})
256
+ result = df.groupby(level=1, axis=1).agg(func)
257
+ expected = DataFrame([expected] * 3, columns=["i", "j", "k"]).astype(
258
+ result_dtype_dict
259
+ )
260
+
261
+ tm.assert_frame_equal(result, expected)
262
+
263
+
264
+ @pytest.mark.parametrize(
265
+ "func, expected_data, result_dtype_dict",
266
+ [
267
+ ("sum", [[2, 4], [10, 12], [18, 20]], {10: "int64", 20: "int64"}),
268
+ # std should ideally return Int64 / Float64 #43330
269
+ ("std", [[2**0.5] * 2] * 3, "float64"),
270
+ ("var", [[2] * 2] * 3, {10: "float64", 20: "float64"}),
271
+ ],
272
+ )
273
+ def test_groupby_mixed_cols_axis1(func, expected_data, result_dtype_dict):
274
+ # GH#43209
275
+ df = DataFrame(
276
+ np.arange(12).reshape(3, 4),
277
+ index=Index([0, 1, 0], name="y"),
278
+ columns=Index([10, 20, 10, 20], name="x"),
279
+ dtype="int64",
280
+ ).astype({10: "Int64"})
281
+ result = df.groupby("x", axis=1).agg(func)
282
+ expected = DataFrame(
283
+ data=expected_data,
284
+ index=Index([0, 1, 0], name="y"),
285
+ columns=Index([10, 20], name="x"),
286
+ ).astype(result_dtype_dict)
287
+ tm.assert_frame_equal(result, expected)
288
+
289
+
290
+ def test_aggregate_item_by_item(df):
291
+ grouped = df.groupby("A")
292
+
293
+ aggfun_0 = lambda ser: ser.size
294
+ result = grouped.agg(aggfun_0)
295
+ foosum = (df.A == "foo").sum()
296
+ barsum = (df.A == "bar").sum()
297
+ K = len(result.columns)
298
+
299
+ # GH5782
300
+ exp = Series(np.array([foosum] * K), index=list("BCD"), name="foo")
301
+ tm.assert_series_equal(result.xs("foo"), exp)
302
+
303
+ exp = Series(np.array([barsum] * K), index=list("BCD"), name="bar")
304
+ tm.assert_almost_equal(result.xs("bar"), exp)
305
+
306
+ def aggfun_1(ser):
307
+ return ser.size
308
+
309
+ result = DataFrame().groupby(df.A).agg(aggfun_1)
310
+ assert isinstance(result, DataFrame)
311
+ assert len(result) == 0
312
+
313
+
314
+ def test_wrap_agg_out(three_group):
315
+ grouped = three_group.groupby(["A", "B"])
316
+
317
+ def func(ser):
318
+ if ser.dtype == object:
319
+ raise TypeError("Test error message")
320
+ return ser.sum()
321
+
322
+ with pytest.raises(TypeError, match="Test error message"):
323
+ grouped.aggregate(func)
324
+ result = grouped[[c for c in three_group if c != "C"]].aggregate(func)
325
+ exp_grouped = three_group.loc[:, three_group.columns != "C"]
326
+ expected = exp_grouped.groupby(["A", "B"]).aggregate(func)
327
+ tm.assert_frame_equal(result, expected)
328
+
329
+
330
+ def test_agg_multiple_functions_maintain_order(df):
331
+ # GH #610
332
+ funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)]
333
+ result = df.groupby("A")["C"].agg(funcs)
334
+ exp_cols = Index(["mean", "max", "min"])
335
+
336
+ tm.assert_index_equal(result.columns, exp_cols)
337
+
338
+
339
+ def test_agg_multiple_functions_same_name():
340
+ # GH 30880
341
+ df = DataFrame(
342
+ np.random.randn(1000, 3),
343
+ index=pd.date_range("1/1/2012", freq="S", periods=1000),
344
+ columns=["A", "B", "C"],
345
+ )
346
+ result = df.resample("3T").agg(
347
+ {"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
348
+ )
349
+ expected_index = pd.date_range("1/1/2012", freq="3T", periods=6)
350
+ expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")])
351
+ expected_values = np.array(
352
+ [df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]]
353
+ ).T
354
+ expected = DataFrame(
355
+ expected_values, columns=expected_columns, index=expected_index
356
+ )
357
+ tm.assert_frame_equal(result, expected)
358
+
359
+
360
+ def test_agg_multiple_functions_same_name_with_ohlc_present():
361
+ # GH 30880
362
+ # ohlc expands dimensions, so different test to the above is required.
363
+ df = DataFrame(
364
+ np.random.randn(1000, 3),
365
+ index=pd.date_range("1/1/2012", freq="S", periods=1000, name="dti"),
366
+ columns=Index(["A", "B", "C"], name="alpha"),
367
+ )
368
+ result = df.resample("3T").agg(
369
+ {"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
370
+ )
371
+ expected_index = pd.date_range("1/1/2012", freq="3T", periods=6, name="dti")
372
+ expected_columns = MultiIndex.from_tuples(
373
+ [
374
+ ("A", "ohlc", "open"),
375
+ ("A", "ohlc", "high"),
376
+ ("A", "ohlc", "low"),
377
+ ("A", "ohlc", "close"),
378
+ ("A", "quantile", "A"),
379
+ ("A", "quantile", "A"),
380
+ ],
381
+ names=["alpha", None, None],
382
+ )
383
+ non_ohlc_expected_values = np.array(
384
+ [df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]]
385
+ ).T
386
+ expected_values = np.hstack([df.resample("3T").A.ohlc(), non_ohlc_expected_values])
387
+ expected = DataFrame(
388
+ expected_values, columns=expected_columns, index=expected_index
389
+ )
390
+ tm.assert_frame_equal(result, expected)
391
+
392
+
393
+ def test_multiple_functions_tuples_and_non_tuples(df):
394
+ # #1359
395
+ # Columns B and C would cause partial failure
396
+ df = df.drop(columns=["B", "C"])
397
+
398
+ funcs = [("foo", "mean"), "std"]
399
+ ex_funcs = [("foo", "mean"), ("std", "std")]
400
+
401
+ result = df.groupby("A")["D"].agg(funcs)
402
+ expected = df.groupby("A")["D"].agg(ex_funcs)
403
+ tm.assert_frame_equal(result, expected)
404
+
405
+ result = df.groupby("A").agg(funcs)
406
+ expected = df.groupby("A").agg(ex_funcs)
407
+ tm.assert_frame_equal(result, expected)
408
+
409
+
410
+ def test_more_flexible_frame_multi_function(df):
411
+ grouped = df.groupby("A")
412
+
413
+ exmean = grouped.agg({"C": np.mean, "D": np.mean})
414
+ exstd = grouped.agg({"C": np.std, "D": np.std})
415
+
416
+ expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)
417
+ expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
418
+
419
+ d = {"C": [np.mean, np.std], "D": [np.mean, np.std]}
420
+ result = grouped.aggregate(d)
421
+
422
+ tm.assert_frame_equal(result, expected)
423
+
424
+ # be careful
425
+ result = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
426
+ expected = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
427
+ tm.assert_frame_equal(result, expected)
428
+
429
+ def numpymean(x):
430
+ return np.mean(x)
431
+
432
+ def numpystd(x):
433
+ return np.std(x, ddof=1)
434
+
435
+ # this uses column selection & renaming
436
+ msg = r"nested renamer is not supported"
437
+ with pytest.raises(SpecificationError, match=msg):
438
+ d = {"C": np.mean, "D": {"foo": np.mean, "bar": np.std}}
439
+ grouped.aggregate(d)
440
+
441
+ # But without renaming, these functions are OK
442
+ d = {"C": [np.mean], "D": [numpymean, numpystd]}
443
+ grouped.aggregate(d)
444
+
445
+
446
+ def test_multi_function_flexible_mix(df):
447
+ # GH #1268
448
+ grouped = df.groupby("A")
449
+
450
+ # Expected
451
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}}
452
+ # this uses column selection & renaming
453
+ msg = r"nested renamer is not supported"
454
+ with pytest.raises(SpecificationError, match=msg):
455
+ grouped.aggregate(d)
456
+
457
+ # Test 1
458
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
459
+ # this uses column selection & renaming
460
+ with pytest.raises(SpecificationError, match=msg):
461
+ grouped.aggregate(d)
462
+
463
+ # Test 2
464
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
465
+ # this uses column selection & renaming
466
+ with pytest.raises(SpecificationError, match=msg):
467
+ grouped.aggregate(d)
468
+
469
+
470
+ def test_groupby_agg_coercing_bools():
471
+ # issue 14873
472
+ dat = DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]})
473
+ gp = dat.groupby("a")
474
+
475
+ index = Index([1, 2], name="a")
476
+
477
+ result = gp["b"].aggregate(lambda x: (x != 0).all())
478
+ expected = Series([False, True], index=index, name="b")
479
+ tm.assert_series_equal(result, expected)
480
+
481
+ result = gp["c"].aggregate(lambda x: x.isnull().all())
482
+ expected = Series([True, False], index=index, name="c")
483
+ tm.assert_series_equal(result, expected)
484
+
485
+
486
+ def test_groupby_agg_dict_with_getitem():
487
+ # issue 25471
488
+ dat = DataFrame({"A": ["A", "A", "B", "B", "B"], "B": [1, 2, 1, 1, 2]})
489
+ result = dat.groupby("A")[["B"]].agg({"B": "sum"})
490
+
491
+ expected = DataFrame({"B": [3, 4]}, index=["A", "B"]).rename_axis("A", axis=0)
492
+
493
+ tm.assert_frame_equal(result, expected)
494
+
495
+
496
+ @pytest.mark.parametrize(
497
+ "op",
498
+ [
499
+ lambda x: x.sum(),
500
+ lambda x: x.cumsum(),
501
+ lambda x: x.transform("sum"),
502
+ lambda x: x.transform("cumsum"),
503
+ lambda x: x.agg("sum"),
504
+ lambda x: x.agg("cumsum"),
505
+ ],
506
+ )
507
+ def test_bool_agg_dtype(op):
508
+ # GH 7001
509
+ # Bool sum aggregations result in int
510
+ df = DataFrame({"a": [1, 1], "b": [False, True]})
511
+ s = df.set_index("a")["b"]
512
+
513
+ result = op(df.groupby("a"))["b"].dtype
514
+ assert is_integer_dtype(result)
515
+
516
+ result = op(s.groupby("a")).dtype
517
+ assert is_integer_dtype(result)
518
+
519
+
520
+ @pytest.mark.parametrize(
521
+ "keys, agg_index",
522
+ [
523
+ (["a"], Index([1], name="a")),
524
+ (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
525
+ ],
526
+ )
527
+ @pytest.mark.parametrize(
528
+ "input_dtype", ["bool", "int32", "int64", "float32", "float64"]
529
+ )
530
+ @pytest.mark.parametrize(
531
+ "result_dtype", ["bool", "int32", "int64", "float32", "float64"]
532
+ )
533
+ @pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
534
+ def test_callable_result_dtype_frame(
535
+ keys, agg_index, input_dtype, result_dtype, method
536
+ ):
537
+ # GH 21240
538
+ df = DataFrame({"a": [1], "b": [2], "c": [True]})
539
+ df["c"] = df["c"].astype(input_dtype)
540
+ op = getattr(df.groupby(keys)[["c"]], method)
541
+ result = op(lambda x: x.astype(result_dtype).iloc[0])
542
+ expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
543
+ expected = DataFrame({"c": [df["c"].iloc[0]]}, index=expected_index).astype(
544
+ result_dtype
545
+ )
546
+ if method == "apply":
547
+ expected.columns.names = [0]
548
+ tm.assert_frame_equal(result, expected)
549
+
550
+
551
+ @pytest.mark.parametrize(
552
+ "keys, agg_index",
553
+ [
554
+ (["a"], Index([1], name="a")),
555
+ (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
556
+ ],
557
+ )
558
+ @pytest.mark.parametrize("input", [True, 1, 1.0])
559
+ @pytest.mark.parametrize("dtype", [bool, int, float])
560
+ @pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
561
+ def test_callable_result_dtype_series(keys, agg_index, input, dtype, method):
562
+ # GH 21240
563
+ df = DataFrame({"a": [1], "b": [2], "c": [input]})
564
+ op = getattr(df.groupby(keys)["c"], method)
565
+ result = op(lambda x: x.astype(dtype).iloc[0])
566
+ expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
567
+ expected = Series([df["c"].iloc[0]], index=expected_index, name="c").astype(dtype)
568
+ tm.assert_series_equal(result, expected)
569
+
570
+
571
+ def test_order_aggregate_multiple_funcs():
572
+ # GH 25692
573
+ df = DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]})
574
+
575
+ res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"])
576
+ result = res.columns.levels[1]
577
+
578
+ expected = Index(["sum", "max", "mean", "ohlc", "min"])
579
+
580
+ tm.assert_index_equal(result, expected)
581
+
582
+
583
+ def test_ohlc_ea_dtypes(any_numeric_ea_dtype):
584
+ # GH#37493
585
+ df = DataFrame(
586
+ {"a": [1, 1, 2, 3, 4, 4], "b": [22, 11, pd.NA, 10, 20, pd.NA]},
587
+ dtype=any_numeric_ea_dtype,
588
+ )
589
+ gb = df.groupby("a")
590
+ result = gb.ohlc()
591
+ expected = DataFrame(
592
+ [[22, 22, 11, 11], [pd.NA] * 4, [10] * 4, [20] * 4],
593
+ columns=MultiIndex.from_product([["b"], ["open", "high", "low", "close"]]),
594
+ index=Index([1, 2, 3, 4], dtype=any_numeric_ea_dtype, name="a"),
595
+ dtype=any_numeric_ea_dtype,
596
+ )
597
+ tm.assert_frame_equal(result, expected)
598
+
599
+ gb2 = df.groupby("a", as_index=False)
600
+ result2 = gb2.ohlc()
601
+ expected2 = expected.reset_index()
602
+ tm.assert_frame_equal(result2, expected2)
603
+
604
+
605
+ @pytest.mark.parametrize("dtype", [np.int64, np.uint64])
606
+ @pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"])
607
+ def test_uint64_type_handling(dtype, how):
608
+ # GH 26310
609
+ df = DataFrame({"x": 6903052872240755750, "y": [1, 2]})
610
+ expected = df.groupby("y").agg({"x": how})
611
+ df.x = df.x.astype(dtype)
612
+ result = df.groupby("y").agg({"x": how})
613
+ if how not in ("mean", "median"):
614
+ # mean and median always result in floats
615
+ result.x = result.x.astype(np.int64)
616
+ tm.assert_frame_equal(result, expected, check_exact=True)
617
+
618
+
619
+ def test_func_duplicates_raises():
620
+ # GH28426
621
+ msg = "Function names"
622
+ df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
623
+ with pytest.raises(SpecificationError, match=msg):
624
+ df.groupby("A").agg(["min", "min"])
625
+
626
+
627
+ @pytest.mark.parametrize(
628
+ "index",
629
+ [
630
+ pd.CategoricalIndex(list("abc")),
631
+ pd.interval_range(0, 3),
632
+ pd.period_range("2020", periods=3, freq="D"),
633
+ MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
634
+ ],
635
+ )
636
+ def test_agg_index_has_complex_internals(index):
637
+ # GH 31223
638
+ df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
639
+ result = df.groupby("group").agg({"value": Series.nunique})
640
+ expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group")
641
+ tm.assert_frame_equal(result, expected)
642
+
643
+
644
+ def test_agg_split_block():
645
+ # https://github.com/pandas-dev/pandas/issues/31522
646
+ df = DataFrame(
647
+ {
648
+ "key1": ["a", "a", "b", "b", "a"],
649
+ "key2": ["one", "two", "one", "two", "one"],
650
+ "key3": ["three", "three", "three", "six", "six"],
651
+ }
652
+ )
653
+ result = df.groupby("key1").min()
654
+ expected = DataFrame(
655
+ {"key2": ["one", "one"], "key3": ["six", "six"]},
656
+ index=Index(["a", "b"], name="key1"),
657
+ )
658
+ tm.assert_frame_equal(result, expected)
659
+
660
+
661
+ def test_agg_split_object_part_datetime():
662
+ # https://github.com/pandas-dev/pandas/pull/31616
663
+ df = DataFrame(
664
+ {
665
+ "A": pd.date_range("2000", periods=4),
666
+ "B": ["a", "b", "c", "d"],
667
+ "C": [1, 2, 3, 4],
668
+ "D": ["b", "c", "d", "e"],
669
+ "E": pd.date_range("2000", periods=4),
670
+ "F": [1, 2, 3, 4],
671
+ }
672
+ ).astype(object)
673
+ result = df.groupby([0, 0, 0, 0]).min()
674
+ expected = DataFrame(
675
+ {
676
+ "A": [pd.Timestamp("2000")],
677
+ "B": ["a"],
678
+ "C": [1],
679
+ "D": ["b"],
680
+ "E": [pd.Timestamp("2000")],
681
+ "F": [1],
682
+ },
683
+ index=np.array([0]),
684
+ dtype=object,
685
+ )
686
+ tm.assert_frame_equal(result, expected)
687
+
688
+
689
+ class TestNamedAggregationSeries:
690
+ def test_series_named_agg(self):
691
+ df = Series([1, 2, 3, 4])
692
+ gr = df.groupby([0, 0, 1, 1])
693
+ result = gr.agg(a="sum", b="min")
694
+ expected = DataFrame(
695
+ {"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=np.array([0, 1])
696
+ )
697
+ tm.assert_frame_equal(result, expected)
698
+
699
+ result = gr.agg(b="min", a="sum")
700
+ expected = expected[["b", "a"]]
701
+ tm.assert_frame_equal(result, expected)
702
+
703
+ def test_no_args_raises(self):
704
+ gr = Series([1, 2]).groupby([0, 1])
705
+ with pytest.raises(TypeError, match="Must provide"):
706
+ gr.agg()
707
+
708
+ # but we do allow this
709
+ result = gr.agg([])
710
+ expected = DataFrame(columns=[])
711
+ tm.assert_frame_equal(result, expected)
712
+
713
+ def test_series_named_agg_duplicates_no_raises(self):
714
+ # GH28426
715
+ gr = Series([1, 2, 3]).groupby([0, 0, 1])
716
+ grouped = gr.agg(a="sum", b="sum")
717
+ expected = DataFrame({"a": [3, 3], "b": [3, 3]}, index=np.array([0, 1]))
718
+ tm.assert_frame_equal(expected, grouped)
719
+
720
+ def test_mangled(self):
721
+ gr = Series([1, 2, 3]).groupby([0, 0, 1])
722
+ result = gr.agg(a=lambda x: 0, b=lambda x: 1)
723
+ expected = DataFrame({"a": [0, 0], "b": [1, 1]}, index=np.array([0, 1]))
724
+ tm.assert_frame_equal(result, expected)
725
+
726
+ @pytest.mark.parametrize(
727
+ "inp",
728
+ [
729
+ pd.NamedAgg(column="anything", aggfunc="min"),
730
+ ("anything", "min"),
731
+ ["anything", "min"],
732
+ ],
733
+ )
734
+ def test_named_agg_nametuple(self, inp):
735
+ # GH34422
736
+ s = Series([1, 1, 2, 2, 3, 3, 4, 5])
737
+ msg = f"func is expected but received {type(inp).__name__}"
738
+ with pytest.raises(TypeError, match=msg):
739
+ s.groupby(s.values).agg(a=inp)
740
+
741
+
742
+ class TestNamedAggregationDataFrame:
743
+ def test_agg_relabel(self):
744
+ df = DataFrame(
745
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
746
+ )
747
+ result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max"))
748
+ expected = DataFrame(
749
+ {"a_max": [1, 3], "b_max": [6, 8]},
750
+ index=Index(["a", "b"], name="group"),
751
+ columns=["a_max", "b_max"],
752
+ )
753
+ tm.assert_frame_equal(result, expected)
754
+
755
+ # order invariance
756
+ p98 = functools.partial(np.percentile, q=98)
757
+ result = df.groupby("group").agg(
758
+ b_min=("B", "min"),
759
+ a_min=("A", min),
760
+ a_mean=("A", np.mean),
761
+ a_max=("A", "max"),
762
+ b_max=("B", "max"),
763
+ a_98=("A", p98),
764
+ )
765
+ expected = DataFrame(
766
+ {
767
+ "b_min": [5, 7],
768
+ "a_min": [0, 2],
769
+ "a_mean": [0.5, 2.5],
770
+ "a_max": [1, 3],
771
+ "b_max": [6, 8],
772
+ "a_98": [0.98, 2.98],
773
+ },
774
+ index=Index(["a", "b"], name="group"),
775
+ columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"],
776
+ )
777
+ tm.assert_frame_equal(result, expected)
778
+
779
+ def test_agg_relabel_non_identifier(self):
780
+ df = DataFrame(
781
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
782
+ )
783
+
784
+ result = df.groupby("group").agg(**{"my col": ("A", "max")})
785
+ expected = DataFrame({"my col": [1, 3]}, index=Index(["a", "b"], name="group"))
786
+ tm.assert_frame_equal(result, expected)
787
+
788
+ def test_duplicate_no_raises(self):
789
+ # GH 28426, if use same input function on same column,
790
+ # no error should raise
791
+ df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
792
+
793
+ grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min"))
794
+ expected = DataFrame({"a": [1, 3], "b": [1, 3]}, index=Index([0, 1], name="A"))
795
+ tm.assert_frame_equal(grouped, expected)
796
+
797
+ quant50 = functools.partial(np.percentile, q=50)
798
+ quant70 = functools.partial(np.percentile, q=70)
799
+ quant50.__name__ = "quant50"
800
+ quant70.__name__ = "quant70"
801
+
802
+ test = DataFrame({"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]})
803
+
804
+ grouped = test.groupby("col1").agg(
805
+ quantile_50=("col2", quant50), quantile_70=("col2", quant70)
806
+ )
807
+ expected = DataFrame(
808
+ {"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]},
809
+ index=Index(["a", "b"], name="col1"),
810
+ )
811
+ tm.assert_frame_equal(grouped, expected)
812
+
813
+ def test_agg_relabel_with_level(self):
814
+ df = DataFrame(
815
+ {"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]},
816
+ index=MultiIndex.from_product([["A", "B"], ["a", "b"]]),
817
+ )
818
+ result = df.groupby(level=0).agg(
819
+ aa=("A", "max"), bb=("A", "min"), cc=("B", "mean")
820
+ )
821
+ expected = DataFrame(
822
+ {"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"]
823
+ )
824
+ tm.assert_frame_equal(result, expected)
825
+
826
+ def test_agg_relabel_other_raises(self):
827
+ df = DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]})
828
+ grouped = df.groupby("A")
829
+ match = "Must provide"
830
+ with pytest.raises(TypeError, match=match):
831
+ grouped.agg(foo=1)
832
+
833
+ with pytest.raises(TypeError, match=match):
834
+ grouped.agg()
835
+
836
+ with pytest.raises(TypeError, match=match):
837
+ grouped.agg(a=("B", "max"), b=(1, 2, 3))
838
+
839
+ def test_missing_raises(self):
840
+ df = DataFrame({"A": [0, 1], "B": [1, 2]})
841
+ match = re.escape("Column(s) ['C'] do not exist")
842
+ with pytest.raises(KeyError, match=match):
843
+ df.groupby("A").agg(c=("C", "sum"))
844
+
845
+ def test_agg_namedtuple(self):
846
+ df = DataFrame({"A": [0, 1], "B": [1, 2]})
847
+ result = df.groupby("A").agg(
848
+ b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count")
849
+ )
850
+ expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count"))
851
+ tm.assert_frame_equal(result, expected)
852
+
853
+ def test_mangled(self):
854
+ df = DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]})
855
+ result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1))
856
+ expected = DataFrame({"b": [0, 0], "c": [1, 1]}, index=Index([0, 1], name="A"))
857
+ tm.assert_frame_equal(result, expected)
858
+
859
+
860
+ @pytest.mark.parametrize(
861
+ "agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3",
862
+ [
863
+ (
864
+ (("y", "A"), "max"),
865
+ (("y", "A"), np.min),
866
+ (("y", "B"), "mean"),
867
+ [1, 3],
868
+ [0, 2],
869
+ [5.5, 7.5],
870
+ ),
871
+ (
872
+ (("y", "A"), lambda x: max(x)),
873
+ (("y", "A"), lambda x: 1),
874
+ (("y", "B"), "mean"),
875
+ [1, 3],
876
+ [1, 1],
877
+ [5.5, 7.5],
878
+ ),
879
+ (
880
+ pd.NamedAgg(("y", "A"), "max"),
881
+ pd.NamedAgg(("y", "B"), np.mean),
882
+ pd.NamedAgg(("y", "A"), lambda x: 1),
883
+ [1, 3],
884
+ [5.5, 7.5],
885
+ [1, 1],
886
+ ),
887
+ ],
888
+ )
889
+ def test_agg_relabel_multiindex_column(
890
+ agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3
891
+ ):
892
+ # GH 29422, add tests for multiindex column cases
893
+ df = DataFrame(
894
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
895
+ )
896
+ df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
897
+ idx = Index(["a", "b"], name=("x", "group"))
898
+
899
+ result = df.groupby(("x", "group")).agg(a_max=(("y", "A"), "max"))
900
+ expected = DataFrame({"a_max": [1, 3]}, index=idx)
901
+ tm.assert_frame_equal(result, expected)
902
+
903
+ result = df.groupby(("x", "group")).agg(
904
+ col_1=agg_col1, col_2=agg_col2, col_3=agg_col3
905
+ )
906
+ expected = DataFrame(
907
+ {"col_1": agg_result1, "col_2": agg_result2, "col_3": agg_result3}, index=idx
908
+ )
909
+ tm.assert_frame_equal(result, expected)
910
+
911
+
912
+ def test_agg_relabel_multiindex_raises_not_exist():
913
+ # GH 29422, add test for raises scenario when aggregate column does not exist
914
+ df = DataFrame(
915
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
916
+ )
917
+ df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
918
+
919
+ with pytest.raises(KeyError, match="do not exist"):
920
+ df.groupby(("x", "group")).agg(a=(("Y", "a"), "max"))
921
+
922
+
923
+ def test_agg_relabel_multiindex_duplicates():
924
+ # GH29422, add test for raises scenario when getting duplicates
925
+ # GH28426, after this change, duplicates should also work if the relabelling is
926
+ # different
927
+ df = DataFrame(
928
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
929
+ )
930
+ df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
931
+
932
+ result = df.groupby(("x", "group")).agg(
933
+ a=(("y", "A"), "min"), b=(("y", "A"), "min")
934
+ )
935
+ idx = Index(["a", "b"], name=("x", "group"))
936
+ expected = DataFrame({"a": [0, 2], "b": [0, 2]}, index=idx)
937
+ tm.assert_frame_equal(result, expected)
938
+
939
+
940
+ @pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}])
941
+ def test_groupby_aggregate_empty_key(kwargs):
942
+ # GH: 32580
943
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
944
+ result = df.groupby("a").agg(kwargs)
945
+ expected = DataFrame(
946
+ [1, 4],
947
+ index=Index([1, 2], dtype="int64", name="a"),
948
+ columns=MultiIndex.from_tuples([["c", "min"]]),
949
+ )
950
+ tm.assert_frame_equal(result, expected)
951
+
952
+
953
+ def test_groupby_aggregate_empty_key_empty_return():
954
+ # GH: 32580 Check if everything works, when return is empty
955
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
956
+ result = df.groupby("a").agg({"b": []})
957
+ expected = DataFrame(columns=MultiIndex(levels=[["b"], []], codes=[[], []]))
958
+ tm.assert_frame_equal(result, expected)
959
+
960
+
961
+ def test_groupby_aggregate_empty_with_multiindex_frame():
962
+ # GH 39178
963
+ df = DataFrame(columns=["a", "b", "c"])
964
+ result = df.groupby(["a", "b"], group_keys=False).agg(d=("c", list))
965
+ expected = DataFrame(
966
+ columns=["d"], index=MultiIndex([[], []], [[], []], names=["a", "b"])
967
+ )
968
+ tm.assert_frame_equal(result, expected)
969
+
970
+
971
+ def test_grouby_agg_loses_results_with_as_index_false_relabel():
972
+ # GH 32240: When the aggregate function relabels column names and
973
+ # as_index=False is specified, the results are dropped.
974
+
975
+ df = DataFrame(
976
+ {"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}
977
+ )
978
+
979
+ grouped = df.groupby("key", as_index=False)
980
+ result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
981
+ expected = DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]})
982
+ tm.assert_frame_equal(result, expected)
983
+
984
+
985
+ def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():
986
+ # GH 32240: When the aggregate function relabels column names and
987
+ # as_index=False is specified, the results are dropped. Check if
988
+ # multiindex is returned in the right order
989
+
990
+ df = DataFrame(
991
+ {
992
+ "key": ["x", "y", "x", "y", "x", "x"],
993
+ "key1": ["a", "b", "c", "b", "a", "c"],
994
+ "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75],
995
+ }
996
+ )
997
+
998
+ grouped = df.groupby(["key", "key1"], as_index=False)
999
+ result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
1000
+ expected = DataFrame(
1001
+ {"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]}
1002
+ )
1003
+ tm.assert_frame_equal(result, expected)
1004
+
1005
+
1006
+ @pytest.mark.parametrize(
1007
+ "func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]
1008
+ )
1009
+ def test_multiindex_custom_func(func):
1010
+ # GH 31777
1011
+ data = [[1, 4, 2], [5, 7, 1]]
1012
+ df = DataFrame(
1013
+ data,
1014
+ columns=MultiIndex.from_arrays(
1015
+ [[1, 1, 2], [3, 4, 3]], names=["Sisko", "Janeway"]
1016
+ ),
1017
+ )
1018
+ result = df.groupby(np.array([0, 1])).agg(func)
1019
+ expected_dict = {
1020
+ (1, 3): {0: 1.0, 1: 5.0},
1021
+ (1, 4): {0: 4.0, 1: 7.0},
1022
+ (2, 3): {0: 2.0, 1: 1.0},
1023
+ }
1024
+ expected = DataFrame(expected_dict, index=np.array([0, 1]), columns=df.columns)
1025
+ tm.assert_frame_equal(result, expected)
1026
+
1027
+
1028
+ def myfunc(s):
1029
+ return np.percentile(s, q=0.90)
1030
+
1031
+
1032
+ @pytest.mark.parametrize("func", [lambda s: np.percentile(s, q=0.90), myfunc])
1033
+ def test_lambda_named_agg(func):
1034
+ # see gh-28467
1035
+ animals = DataFrame(
1036
+ {
1037
+ "kind": ["cat", "dog", "cat", "dog"],
1038
+ "height": [9.1, 6.0, 9.5, 34.0],
1039
+ "weight": [7.9, 7.5, 9.9, 198.0],
1040
+ }
1041
+ )
1042
+
1043
+ result = animals.groupby("kind").agg(
1044
+ mean_height=("height", "mean"), perc90=("height", func)
1045
+ )
1046
+ expected = DataFrame(
1047
+ [[9.3, 9.1036], [20.0, 6.252]],
1048
+ columns=["mean_height", "perc90"],
1049
+ index=Index(["cat", "dog"], name="kind"),
1050
+ )
1051
+
1052
+ tm.assert_frame_equal(result, expected)
1053
+
1054
+
1055
+ def test_aggregate_mixed_types():
1056
+ # GH 16916
1057
+ df = DataFrame(
1058
+ data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc")
1059
+ )
1060
+ df["grouping"] = ["group 1", "group 1", 2]
1061
+ result = df.groupby("grouping").aggregate(lambda x: x.tolist())
1062
+ expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]
1063
+ expected = DataFrame(
1064
+ expected_data,
1065
+ index=Index([2, "group 1"], dtype="object", name="grouping"),
1066
+ columns=Index(["X", "Y", "Z"], dtype="object"),
1067
+ )
1068
+ tm.assert_frame_equal(result, expected)
1069
+
1070
+
1071
+ @pytest.mark.xfail(reason="Not implemented;see GH 31256")
1072
+ def test_aggregate_udf_na_extension_type():
1073
+ # https://github.com/pandas-dev/pandas/pull/31359
1074
+ # This is currently failing to cast back to Int64Dtype.
1075
+ # The presence of the NA causes two problems
1076
+ # 1. NA is not an instance of Int64Dtype.type (numpy.int64)
1077
+ # 2. The presence of an NA forces object type, so the non-NA values is
1078
+ # a Python int rather than a NumPy int64. Python ints aren't
1079
+ # instances of numpy.int64.
1080
+ def aggfunc(x):
1081
+ if all(x > 2):
1082
+ return 1
1083
+ else:
1084
+ return pd.NA
1085
+
1086
+ df = DataFrame({"A": pd.array([1, 2, 3])})
1087
+ result = df.groupby([1, 1, 2]).agg(aggfunc)
1088
+ expected = DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2])
1089
+ tm.assert_frame_equal(result, expected)
1090
+
1091
+
1092
+ class TestLambdaMangling:
1093
+ def test_basic(self):
1094
+ df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
1095
+ result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})
1096
+
1097
+ expected = DataFrame(
1098
+ {("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]},
1099
+ index=Index([0, 1], name="A"),
1100
+ )
1101
+ tm.assert_frame_equal(result, expected)
1102
+
1103
+ def test_mangle_series_groupby(self):
1104
+ gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
1105
+ result = gr.agg([lambda x: 0, lambda x: 1])
1106
+ exp_data = {"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]}
1107
+ expected = DataFrame(exp_data, index=np.array([0, 1]))
1108
+ tm.assert_frame_equal(result, expected)
1109
+
1110
+ @pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.")
1111
+ def test_with_kwargs(self):
1112
+ f1 = lambda x, y, b=1: x.sum() + y + b
1113
+ f2 = lambda x, y, b=2: x.sum() + y * b
1114
+ result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
1115
+ expected = DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})
1116
+ tm.assert_frame_equal(result, expected)
1117
+
1118
+ result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
1119
+ expected = DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
1120
+ tm.assert_frame_equal(result, expected)
1121
+
1122
+ def test_agg_with_one_lambda(self):
1123
+ # GH 25719, write tests for DataFrameGroupby.agg with only one lambda
1124
+ df = DataFrame(
1125
+ {
1126
+ "kind": ["cat", "dog", "cat", "dog"],
1127
+ "height": [9.1, 6.0, 9.5, 34.0],
1128
+ "weight": [7.9, 7.5, 9.9, 198.0],
1129
+ }
1130
+ )
1131
+
1132
+ columns = ["height_sqr_min", "height_max", "weight_max"]
1133
+ expected = DataFrame(
1134
+ {
1135
+ "height_sqr_min": [82.81, 36.00],
1136
+ "height_max": [9.5, 34.0],
1137
+ "weight_max": [9.9, 198.0],
1138
+ },
1139
+ index=Index(["cat", "dog"], name="kind"),
1140
+ columns=columns,
1141
+ )
1142
+
1143
+ # check pd.NameAgg case
1144
+ result1 = df.groupby(by="kind").agg(
1145
+ height_sqr_min=pd.NamedAgg(
1146
+ column="height", aggfunc=lambda x: np.min(x**2)
1147
+ ),
1148
+ height_max=pd.NamedAgg(column="height", aggfunc="max"),
1149
+ weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
1150
+ )
1151
+ tm.assert_frame_equal(result1, expected)
1152
+
1153
+ # check agg(key=(col, aggfunc)) case
1154
+ result2 = df.groupby(by="kind").agg(
1155
+ height_sqr_min=("height", lambda x: np.min(x**2)),
1156
+ height_max=("height", "max"),
1157
+ weight_max=("weight", "max"),
1158
+ )
1159
+ tm.assert_frame_equal(result2, expected)
1160
+
1161
+ def test_agg_multiple_lambda(self):
1162
+ # GH25719, test for DataFrameGroupby.agg with multiple lambdas
1163
+ # with mixed aggfunc
1164
+ df = DataFrame(
1165
+ {
1166
+ "kind": ["cat", "dog", "cat", "dog"],
1167
+ "height": [9.1, 6.0, 9.5, 34.0],
1168
+ "weight": [7.9, 7.5, 9.9, 198.0],
1169
+ }
1170
+ )
1171
+ columns = [
1172
+ "height_sqr_min",
1173
+ "height_max",
1174
+ "weight_max",
1175
+ "height_max_2",
1176
+ "weight_min",
1177
+ ]
1178
+ expected = DataFrame(
1179
+ {
1180
+ "height_sqr_min": [82.81, 36.00],
1181
+ "height_max": [9.5, 34.0],
1182
+ "weight_max": [9.9, 198.0],
1183
+ "height_max_2": [9.5, 34.0],
1184
+ "weight_min": [7.9, 7.5],
1185
+ },
1186
+ index=Index(["cat", "dog"], name="kind"),
1187
+ columns=columns,
1188
+ )
1189
+
1190
+ # check agg(key=(col, aggfunc)) case
1191
+ result1 = df.groupby(by="kind").agg(
1192
+ height_sqr_min=("height", lambda x: np.min(x**2)),
1193
+ height_max=("height", "max"),
1194
+ weight_max=("weight", "max"),
1195
+ height_max_2=("height", lambda x: np.max(x)),
1196
+ weight_min=("weight", lambda x: np.min(x)),
1197
+ )
1198
+ tm.assert_frame_equal(result1, expected)
1199
+
1200
+ # check pd.NamedAgg case
1201
+ result2 = df.groupby(by="kind").agg(
1202
+ height_sqr_min=pd.NamedAgg(
1203
+ column="height", aggfunc=lambda x: np.min(x**2)
1204
+ ),
1205
+ height_max=pd.NamedAgg(column="height", aggfunc="max"),
1206
+ weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
1207
+ height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),
1208
+ weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
1209
+ )
1210
+ tm.assert_frame_equal(result2, expected)
1211
+
1212
+
1213
+ def test_groupby_get_by_index():
1214
+ # GH 33439
1215
+ df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})
1216
+ res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])})
1217
+ expected = DataFrame({"A": ["S", "W"], "B": [1.0, 2.0]}).set_index("A")
1218
+ tm.assert_frame_equal(res, expected)
1219
+
1220
+
1221
+ @pytest.mark.parametrize(
1222
+ "grp_col_dict, exp_data",
1223
+ [
1224
+ ({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}),
1225
+ ({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}),
1226
+ ({"nr": "min"}, {"nr": [1, 5]}),
1227
+ ],
1228
+ )
1229
+ def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):
1230
+ # test single aggregations on ordered categorical cols GHGH27800
1231
+
1232
+ # create the result dataframe
1233
+ input_df = DataFrame(
1234
+ {
1235
+ "nr": [1, 2, 3, 4, 5, 6, 7, 8],
1236
+ "cat_ord": list("aabbccdd"),
1237
+ "cat": list("aaaabbbb"),
1238
+ }
1239
+ )
1240
+
1241
+ input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
1242
+ input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
1243
+ result_df = input_df.groupby("cat").agg(grp_col_dict)
1244
+
1245
+ # create expected dataframe
1246
+ cat_index = pd.CategoricalIndex(
1247
+ ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
1248
+ )
1249
+
1250
+ expected_df = DataFrame(data=exp_data, index=cat_index)
1251
+
1252
+ if "cat_ord" in expected_df:
1253
+ # ordered categorical columns should be preserved
1254
+ dtype = input_df["cat_ord"].dtype
1255
+ expected_df["cat_ord"] = expected_df["cat_ord"].astype(dtype)
1256
+
1257
+ tm.assert_frame_equal(result_df, expected_df)
1258
+
1259
+
1260
+ @pytest.mark.parametrize(
1261
+ "grp_col_dict, exp_data",
1262
+ [
1263
+ ({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]),
1264
+ ({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]),
1265
+ ({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]),
1266
+ ],
1267
+ )
1268
+ def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):
1269
+ # test combined aggregations on ordered categorical cols GH27800
1270
+
1271
+ # create the result dataframe
1272
+ input_df = DataFrame(
1273
+ {
1274
+ "nr": [1, 2, 3, 4, 5, 6, 7, 8],
1275
+ "cat_ord": list("aabbccdd"),
1276
+ "cat": list("aaaabbbb"),
1277
+ }
1278
+ )
1279
+
1280
+ input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
1281
+ input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
1282
+ result_df = input_df.groupby("cat").agg(grp_col_dict)
1283
+
1284
+ # create expected dataframe
1285
+ cat_index = pd.CategoricalIndex(
1286
+ ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
1287
+ )
1288
+
1289
+ # unpack the grp_col_dict to create the multi-index tuple
1290
+ # this tuple will be used to create the expected dataframe index
1291
+ multi_index_list = []
1292
+ for k, v in grp_col_dict.items():
1293
+ if isinstance(v, list):
1294
+ for value in v:
1295
+ multi_index_list.append([k, value])
1296
+ else:
1297
+ multi_index_list.append([k, v])
1298
+ multi_index = MultiIndex.from_tuples(tuple(multi_index_list))
1299
+
1300
+ expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index)
1301
+ for col in expected_df.columns:
1302
+ if isinstance(col, tuple) and "cat_ord" in col:
1303
+ # ordered categorical should be preserved
1304
+ expected_df[col] = expected_df[col].astype(input_df["cat_ord"].dtype)
1305
+
1306
+ tm.assert_frame_equal(result_df, expected_df)
1307
+
1308
+
1309
+ def test_nonagg_agg():
1310
+ # GH 35490 - Single/Multiple agg of non-agg function give same results
1311
+ # TODO: agg should raise for functions that don't aggregate
1312
+ df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]})
1313
+ g = df.groupby("a")
1314
+
1315
+ result = g.agg(["cumsum"])
1316
+ result.columns = result.columns.droplevel(-1)
1317
+ expected = g.agg("cumsum")
1318
+
1319
+ tm.assert_frame_equal(result, expected)
1320
+
1321
+
1322
+ def test_aggregate_datetime_objects():
1323
+ # https://github.com/pandas-dev/pandas/issues/36003
1324
+ # ensure we don't raise an error but keep object dtype for out-of-bounds
1325
+ # datetimes
1326
+ df = DataFrame(
1327
+ {
1328
+ "A": ["X", "Y"],
1329
+ "B": [
1330
+ datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
1331
+ datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
1332
+ ],
1333
+ }
1334
+ )
1335
+ result = df.groupby("A").B.max()
1336
+ expected = df.set_index("A")["B"]
1337
+ tm.assert_series_equal(result, expected)
1338
+
1339
+
1340
+ def test_groupby_index_object_dtype():
1341
+ # GH 40014
1342
+ df = DataFrame({"c0": ["x", "x", "x"], "c1": ["x", "x", "y"], "p": [0, 1, 2]})
1343
+ df.index = df.index.astype("O")
1344
+ grouped = df.groupby(["c0", "c1"])
1345
+ res = grouped.p.agg(lambda x: all(x > 0))
1346
+ # Check that providing a user-defined function in agg()
1347
+ # produces the correct index shape when using an object-typed index.
1348
+ expected_index = MultiIndex.from_tuples(
1349
+ [("x", "x"), ("x", "y")], names=("c0", "c1")
1350
+ )
1351
+ expected = Series([False, True], index=expected_index, name="p")
1352
+ tm.assert_series_equal(res, expected)
1353
+
1354
+
1355
+ def test_timeseries_groupby_agg():
1356
+ # GH#43290
1357
+
1358
+ def func(ser):
1359
+ if ser.isna().all():
1360
+ return None
1361
+ return np.sum(ser)
1362
+
1363
+ df = DataFrame([1.0], index=[pd.Timestamp("2018-01-16 00:00:00+00:00")])
1364
+ res = df.groupby(lambda x: 1).agg(func)
1365
+
1366
+ expected = DataFrame([[1.0]], index=[1])
1367
+ tm.assert_frame_equal(res, expected)
1368
+
1369
+
1370
+ def test_groupby_aggregate_directory(reduction_func):
1371
+ # GH#32793
1372
+ if reduction_func in ["corrwith", "nth"]:
1373
+ return None
1374
+
1375
+ obj = DataFrame([[0, 1], [0, np.nan]])
1376
+
1377
+ result_reduced_series = obj.groupby(0).agg(reduction_func)
1378
+ result_reduced_frame = obj.groupby(0).agg({1: reduction_func})
1379
+
1380
+ if reduction_func in ["size", "ngroup"]:
1381
+ # names are different: None / 1
1382
+ tm.assert_series_equal(
1383
+ result_reduced_series, result_reduced_frame[1], check_names=False
1384
+ )
1385
+ else:
1386
+ tm.assert_frame_equal(result_reduced_series, result_reduced_frame)
1387
+ tm.assert_series_equal(
1388
+ result_reduced_series.dtypes, result_reduced_frame.dtypes
1389
+ )
1390
+
1391
+
1392
+ def test_group_mean_timedelta_nat():
1393
+ # GH43132
1394
+ data = Series(["1 day", "3 days", "NaT"], dtype="timedelta64[ns]")
1395
+ expected = Series(["2 days"], dtype="timedelta64[ns]", index=np.array([0]))
1396
+
1397
+ result = data.groupby([0, 0, 0]).mean()
1398
+
1399
+ tm.assert_series_equal(result, expected)
1400
+
1401
+
1402
+ @pytest.mark.parametrize(
1403
+ "input_data, expected_output",
1404
+ [
1405
+ ( # no timezone
1406
+ ["2021-01-01T00:00", "NaT", "2021-01-01T02:00"],
1407
+ ["2021-01-01T01:00"],
1408
+ ),
1409
+ ( # timezone
1410
+ ["2021-01-01T00:00-0100", "NaT", "2021-01-01T02:00-0100"],
1411
+ ["2021-01-01T01:00-0100"],
1412
+ ),
1413
+ ],
1414
+ )
1415
+ def test_group_mean_datetime64_nat(input_data, expected_output):
1416
+ # GH43132
1417
+ data = to_datetime(Series(input_data))
1418
+ expected = to_datetime(Series(expected_output, index=np.array([0])))
1419
+
1420
+ result = data.groupby([0, 0, 0]).mean()
1421
+ tm.assert_series_equal(result, expected)
1422
+
1423
+
1424
+ @pytest.mark.parametrize(
1425
+ "func, output", [("mean", [8 + 18j, 10 + 22j]), ("sum", [40 + 90j, 50 + 110j])]
1426
+ )
1427
+ def test_groupby_complex(func, output):
1428
+ # GH#43701
1429
+ data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))
1430
+ result = data.groupby(data.index % 2).agg(func)
1431
+ expected = Series(output)
1432
+ tm.assert_series_equal(result, expected)
1433
+
1434
+
1435
+ @pytest.mark.parametrize("func", ["min", "max", "var"])
1436
+ def test_groupby_complex_raises(func):
1437
+ # GH#43701
1438
+ data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))
1439
+ msg = "No matching signature found"
1440
+ with pytest.raises(TypeError, match=msg):
1441
+ data.groupby(data.index % 2).agg(func)
1442
+
1443
+
1444
+ @pytest.mark.parametrize(
1445
+ "func", [["min"], ["mean", "max"], {"b": "sum"}, {"b": "prod", "c": "median"}]
1446
+ )
1447
+ def test_multi_axis_1_raises(func):
1448
+ # GH#46995
1449
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5], "c": [6, 7, 8]})
1450
+ gb = df.groupby("a", axis=1)
1451
+ with pytest.raises(NotImplementedError, match="axis other than 0 is not supported"):
1452
+ gb.agg(func)
1453
+
1454
+
1455
+ @pytest.mark.parametrize(
1456
+ "test, constant",
1457
+ [
1458
+ ([[20, "A"], [20, "B"], [10, "C"]], {0: [10, 20], 1: ["C", ["A", "B"]]}),
1459
+ ([[20, "A"], [20, "B"], [30, "C"]], {0: [20, 30], 1: [["A", "B"], "C"]}),
1460
+ ([["a", 1], ["a", 1], ["b", 2], ["b", 3]], {0: ["a", "b"], 1: [1, [2, 3]]}),
1461
+ pytest.param(
1462
+ [["a", 1], ["a", 2], ["b", 3], ["b", 3]],
1463
+ {0: ["a", "b"], 1: [[1, 2], 3]},
1464
+ marks=pytest.mark.xfail,
1465
+ ),
1466
+ ],
1467
+ )
1468
+ def test_agg_of_mode_list(test, constant):
1469
+ # GH#25581
1470
+ df1 = DataFrame(test)
1471
+ result = df1.groupby(0).agg(Series.mode)
1472
+ # Mode usually only returns 1 value, but can return a list in the case of a tie.
1473
+
1474
+ expected = DataFrame(constant)
1475
+ expected = expected.set_index(0)
1476
+
1477
+ tm.assert_frame_equal(result, expected)
1478
+
1479
+
1480
+ def test__dataframe_groupy_agg_list_like_func_with_args():
1481
+ # GH 50624
1482
+ df = DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]})
1483
+ gb = df.groupby("y")
1484
+
1485
+ def foo1(x, a=1, c=0):
1486
+ return x.sum() + a + c
1487
+
1488
+ def foo2(x, b=2, c=0):
1489
+ return x.sum() + b + c
1490
+
1491
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
1492
+ with pytest.raises(TypeError, match=msg):
1493
+ gb.agg([foo1, foo2], 3, b=3, c=4)
1494
+
1495
+ result = gb.agg([foo1, foo2], 3, c=4)
1496
+ expected = DataFrame(
1497
+ [[8, 8], [9, 9], [10, 10]],
1498
+ index=Index(["a", "b", "c"], name="y"),
1499
+ columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),
1500
+ )
1501
+ tm.assert_frame_equal(result, expected)
1502
+
1503
+
1504
+ def test__series_groupy_agg_list_like_func_with_args():
1505
+ # GH 50624
1506
+ s = Series([1, 2, 3])
1507
+ sgb = s.groupby(s)
1508
+
1509
+ def foo1(x, a=1, c=0):
1510
+ return x.sum() + a + c
1511
+
1512
+ def foo2(x, b=2, c=0):
1513
+ return x.sum() + b + c
1514
+
1515
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
1516
+ with pytest.raises(TypeError, match=msg):
1517
+ sgb.agg([foo1, foo2], 3, b=3, c=4)
1518
+
1519
+ result = sgb.agg([foo1, foo2], 3, c=4)
1520
+ expected = DataFrame(
1521
+ [[8, 8], [9, 9], [10, 10]], index=Index([1, 2, 3]), columns=["foo1", "foo2"]
1522
+ )
1523
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_cython.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test cython .agg behavior
3
+ """
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from pandas.core.dtypes.common import (
9
+ is_float_dtype,
10
+ is_integer_dtype,
11
+ )
12
+
13
+ import pandas as pd
14
+ from pandas import (
15
+ DataFrame,
16
+ Index,
17
+ NaT,
18
+ Series,
19
+ Timedelta,
20
+ Timestamp,
21
+ bdate_range,
22
+ )
23
+ import pandas._testing as tm
24
+
25
+
26
+ @pytest.mark.parametrize(
27
+ "op_name",
28
+ [
29
+ "count",
30
+ "sum",
31
+ "std",
32
+ "var",
33
+ "sem",
34
+ "mean",
35
+ pytest.param(
36
+ "median",
37
+ # ignore mean of empty slice
38
+ # and all-NaN
39
+ marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")],
40
+ ),
41
+ "prod",
42
+ "min",
43
+ "max",
44
+ ],
45
+ )
46
+ def test_cythonized_aggers(op_name):
47
+ data = {
48
+ "A": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1.0, np.nan, np.nan],
49
+ "B": ["A", "B"] * 6,
50
+ "C": np.random.randn(12),
51
+ }
52
+ df = DataFrame(data)
53
+ df.loc[2:10:2, "C"] = np.nan
54
+
55
+ op = lambda x: getattr(x, op_name)()
56
+
57
+ # single column
58
+ grouped = df.drop(["B"], axis=1).groupby("A")
59
+ exp = {cat: op(group["C"]) for cat, group in grouped}
60
+ exp = DataFrame({"C": exp})
61
+ exp.index.name = "A"
62
+ result = op(grouped)
63
+ tm.assert_frame_equal(result, exp)
64
+
65
+ # multiple columns
66
+ grouped = df.groupby(["A", "B"])
67
+ expd = {}
68
+ for (cat1, cat2), group in grouped:
69
+ expd.setdefault(cat1, {})[cat2] = op(group["C"])
70
+ exp = DataFrame(expd).T.stack(dropna=False)
71
+ exp.index.names = ["A", "B"]
72
+ exp.name = "C"
73
+
74
+ result = op(grouped)["C"]
75
+ if op_name in ["sum", "prod"]:
76
+ tm.assert_series_equal(result, exp)
77
+
78
+
79
+ def test_cython_agg_boolean():
80
+ frame = DataFrame(
81
+ {
82
+ "a": np.random.randint(0, 5, 50),
83
+ "b": np.random.randint(0, 2, 50).astype("bool"),
84
+ }
85
+ )
86
+ result = frame.groupby("a")["b"].mean()
87
+ expected = frame.groupby("a")["b"].agg(np.mean)
88
+
89
+ tm.assert_series_equal(result, expected)
90
+
91
+
92
+ def test_cython_agg_nothing_to_agg():
93
+ frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
94
+
95
+ msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
96
+ with pytest.raises(TypeError, match=msg):
97
+ frame.groupby("a")["b"].mean(numeric_only=True)
98
+
99
+ frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
100
+
101
+ result = frame[["b"]].groupby(frame["a"]).mean(numeric_only=True)
102
+ expected = DataFrame(
103
+ [], index=frame["a"].sort_values().drop_duplicates(), columns=[]
104
+ )
105
+ tm.assert_frame_equal(result, expected)
106
+
107
+
108
+ def test_cython_agg_nothing_to_agg_with_dates():
109
+ frame = DataFrame(
110
+ {
111
+ "a": np.random.randint(0, 5, 50),
112
+ "b": ["foo", "bar"] * 25,
113
+ "dates": pd.date_range("now", periods=50, freq="T"),
114
+ }
115
+ )
116
+ msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
117
+ with pytest.raises(TypeError, match=msg):
118
+ frame.groupby("b").dates.mean(numeric_only=True)
119
+
120
+
121
+ def test_cython_agg_frame_columns():
122
+ # #2113
123
+ df = DataFrame({"x": [1, 2, 3], "y": [3, 4, 5]})
124
+
125
+ df.groupby(level=0, axis="columns").mean()
126
+ df.groupby(level=0, axis="columns").mean()
127
+ df.groupby(level=0, axis="columns").mean()
128
+ df.groupby(level=0, axis="columns").mean()
129
+
130
+
131
+ def test_cython_agg_return_dict():
132
+ # GH 16741
133
+ df = DataFrame(
134
+ {
135
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
136
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
137
+ "C": np.random.randn(8),
138
+ "D": np.random.randn(8),
139
+ }
140
+ )
141
+
142
+ ts = df.groupby("A")["B"].agg(lambda x: x.value_counts().to_dict())
143
+ expected = Series(
144
+ [{"two": 1, "one": 1, "three": 1}, {"two": 2, "one": 2, "three": 1}],
145
+ index=Index(["bar", "foo"], name="A"),
146
+ name="B",
147
+ )
148
+ tm.assert_series_equal(ts, expected)
149
+
150
+
151
+ def test_cython_fail_agg():
152
+ dr = bdate_range("1/1/2000", periods=50)
153
+ ts = Series(["A", "B", "C", "D", "E"] * 10, index=dr)
154
+
155
+ grouped = ts.groupby(lambda x: x.month)
156
+ summed = grouped.sum()
157
+ expected = grouped.agg(np.sum)
158
+ tm.assert_series_equal(summed, expected)
159
+
160
+
161
+ @pytest.mark.parametrize(
162
+ "op, targop",
163
+ [
164
+ ("mean", np.mean),
165
+ ("median", np.median),
166
+ ("var", np.var),
167
+ ("sum", np.sum),
168
+ ("prod", np.prod),
169
+ ("min", np.min),
170
+ ("max", np.max),
171
+ ("first", lambda x: x.iloc[0]),
172
+ ("last", lambda x: x.iloc[-1]),
173
+ ],
174
+ )
175
+ def test__cython_agg_general(op, targop):
176
+ df = DataFrame(np.random.randn(1000))
177
+ labels = np.random.randint(0, 50, size=1000).astype(float)
178
+
179
+ result = df.groupby(labels)._cython_agg_general(op, alt=None, numeric_only=True)
180
+ expected = df.groupby(labels).agg(targop)
181
+ tm.assert_frame_equal(result, expected)
182
+
183
+
184
+ @pytest.mark.parametrize(
185
+ "op, targop",
186
+ [
187
+ ("mean", np.mean),
188
+ ("median", lambda x: np.median(x) if len(x) > 0 else np.nan),
189
+ ("var", lambda x: np.var(x, ddof=1)),
190
+ ("min", np.min),
191
+ ("max", np.max),
192
+ ],
193
+ )
194
+ def test_cython_agg_empty_buckets(op, targop, observed):
195
+ df = DataFrame([11, 12, 13])
196
+ grps = range(0, 55, 5)
197
+
198
+ # calling _cython_agg_general directly, instead of via the user API
199
+ # which sets different values for min_count, so do that here.
200
+ g = df.groupby(pd.cut(df[0], grps), observed=observed)
201
+ result = g._cython_agg_general(op, alt=None, numeric_only=True)
202
+
203
+ g = df.groupby(pd.cut(df[0], grps), observed=observed)
204
+ expected = g.agg(lambda x: targop(x))
205
+ tm.assert_frame_equal(result, expected)
206
+
207
+
208
+ def test_cython_agg_empty_buckets_nanops(observed):
209
+ # GH-18869 can't call nanops on empty groups, so hardcode expected
210
+ # for these
211
+ df = DataFrame([11, 12, 13], columns=["a"])
212
+ grps = np.arange(0, 25, 5, dtype=np.int_)
213
+ # add / sum
214
+ result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
215
+ "sum", alt=None, numeric_only=True
216
+ )
217
+ intervals = pd.interval_range(0, 20, freq=5)
218
+ expected = DataFrame(
219
+ {"a": [0, 0, 36, 0]},
220
+ index=pd.CategoricalIndex(intervals, name="a", ordered=True),
221
+ )
222
+ if observed:
223
+ expected = expected[expected.a != 0]
224
+
225
+ tm.assert_frame_equal(result, expected)
226
+
227
+ # prod
228
+ result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
229
+ "prod", alt=None, numeric_only=True
230
+ )
231
+ expected = DataFrame(
232
+ {"a": [1, 1, 1716, 1]},
233
+ index=pd.CategoricalIndex(intervals, name="a", ordered=True),
234
+ )
235
+ if observed:
236
+ expected = expected[expected.a != 1]
237
+
238
+ tm.assert_frame_equal(result, expected)
239
+
240
+
241
+ @pytest.mark.parametrize("op", ["first", "last", "max", "min"])
242
+ @pytest.mark.parametrize(
243
+ "data", [Timestamp("2016-10-14 21:00:44.557"), Timedelta("17088 days 21:00:44.557")]
244
+ )
245
+ def test_cython_with_timestamp_and_nat(op, data):
246
+ # https://github.com/pandas-dev/pandas/issues/19526
247
+ df = DataFrame({"a": [0, 1], "b": [data, NaT]})
248
+ index = Index([0, 1], name="a")
249
+
250
+ # We will group by a and test the cython aggregations
251
+ expected = DataFrame({"b": [data, NaT]}, index=index)
252
+
253
+ result = df.groupby("a").aggregate(op)
254
+ tm.assert_frame_equal(expected, result)
255
+
256
+
257
+ @pytest.mark.parametrize(
258
+ "agg",
259
+ [
260
+ "min",
261
+ "max",
262
+ "count",
263
+ "sum",
264
+ "prod",
265
+ "var",
266
+ "mean",
267
+ "median",
268
+ "ohlc",
269
+ "cumprod",
270
+ "cumsum",
271
+ "shift",
272
+ "any",
273
+ "all",
274
+ "quantile",
275
+ "first",
276
+ "last",
277
+ "rank",
278
+ "cummin",
279
+ "cummax",
280
+ ],
281
+ )
282
+ def test_read_only_buffer_source_agg(agg):
283
+ # https://github.com/pandas-dev/pandas/issues/36014
284
+ df = DataFrame(
285
+ {
286
+ "sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0],
287
+ "species": ["setosa", "setosa", "setosa", "setosa", "setosa"],
288
+ }
289
+ )
290
+ df._mgr.arrays[0].flags.writeable = False
291
+
292
+ result = df.groupby(["species"]).agg({"sepal_length": agg})
293
+ expected = df.copy().groupby(["species"]).agg({"sepal_length": agg})
294
+
295
+ tm.assert_equal(result, expected)
296
+
297
+
298
+ @pytest.mark.parametrize(
299
+ "op_name",
300
+ [
301
+ "count",
302
+ "sum",
303
+ "std",
304
+ "var",
305
+ "sem",
306
+ "mean",
307
+ "median",
308
+ "prod",
309
+ "min",
310
+ "max",
311
+ ],
312
+ )
313
+ def test_cython_agg_nullable_int(op_name):
314
+ # ensure that the cython-based aggregations don't fail for nullable dtype
315
+ # (eg https://github.com/pandas-dev/pandas/issues/37415)
316
+ df = DataFrame(
317
+ {
318
+ "A": ["A", "B"] * 5,
319
+ "B": pd.array([1, 2, 3, 4, 5, 6, 7, 8, 9, pd.NA], dtype="Int64"),
320
+ }
321
+ )
322
+ result = getattr(df.groupby("A")["B"], op_name)()
323
+ df2 = df.assign(B=df["B"].astype("float64"))
324
+ expected = getattr(df2.groupby("A")["B"], op_name)()
325
+
326
+ if op_name != "count":
327
+ # the result is not yet consistently using Int64/Float64 dtype,
328
+ # so for now just checking the values by casting to float
329
+ result = result.astype("float64")
330
+ tm.assert_series_equal(result, expected)
331
+
332
+
333
+ @pytest.mark.parametrize("with_na", [True, False])
334
+ @pytest.mark.parametrize(
335
+ "op_name, action",
336
+ [
337
+ # ("count", "always_int"),
338
+ ("sum", "large_int"),
339
+ # ("std", "always_float"),
340
+ ("var", "always_float"),
341
+ # ("sem", "always_float"),
342
+ ("mean", "always_float"),
343
+ ("median", "always_float"),
344
+ ("prod", "large_int"),
345
+ ("min", "preserve"),
346
+ ("max", "preserve"),
347
+ ("first", "preserve"),
348
+ ("last", "preserve"),
349
+ ],
350
+ )
351
+ @pytest.mark.parametrize(
352
+ "data",
353
+ [
354
+ pd.array([1, 2, 3, 4], dtype="Int64"),
355
+ pd.array([1, 2, 3, 4], dtype="Int8"),
356
+ pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float32"),
357
+ pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64"),
358
+ pd.array([True, True, False, False], dtype="boolean"),
359
+ ],
360
+ )
361
+ def test_cython_agg_EA_known_dtypes(data, op_name, action, with_na):
362
+ if with_na:
363
+ data[3] = pd.NA
364
+
365
+ df = DataFrame({"key": ["a", "a", "b", "b"], "col": data})
366
+ grouped = df.groupby("key")
367
+
368
+ if action == "always_int":
369
+ # always Int64
370
+ expected_dtype = pd.Int64Dtype()
371
+ elif action == "large_int":
372
+ # for any int/bool use Int64, for float preserve dtype
373
+ if is_float_dtype(data.dtype):
374
+ expected_dtype = data.dtype
375
+ elif is_integer_dtype(data.dtype):
376
+ # match the numpy dtype we'd get with the non-nullable analogue
377
+ expected_dtype = data.dtype
378
+ else:
379
+ expected_dtype = pd.Int64Dtype()
380
+ elif action == "always_float":
381
+ # for any int/bool use Float64, for float preserve dtype
382
+ if is_float_dtype(data.dtype):
383
+ expected_dtype = data.dtype
384
+ else:
385
+ expected_dtype = pd.Float64Dtype()
386
+ elif action == "preserve":
387
+ expected_dtype = data.dtype
388
+
389
+ result = getattr(grouped, op_name)()
390
+ assert result["col"].dtype == expected_dtype
391
+
392
+ result = grouped.aggregate(op_name)
393
+ assert result["col"].dtype == expected_dtype
394
+
395
+ result = getattr(grouped["col"], op_name)()
396
+ assert result.dtype == expected_dtype
397
+
398
+ result = grouped["col"].aggregate(op_name)
399
+ assert result.dtype == expected_dtype
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_numba.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.errors import NumbaUtilError
5
+ import pandas.util._test_decorators as td
6
+
7
+ from pandas import (
8
+ DataFrame,
9
+ Index,
10
+ NamedAgg,
11
+ Series,
12
+ option_context,
13
+ )
14
+ import pandas._testing as tm
15
+
16
+
17
+ @td.skip_if_no("numba")
18
+ def test_correct_function_signature():
19
+ def incorrect_function(x):
20
+ return sum(x) * 2.7
21
+
22
+ data = DataFrame(
23
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
24
+ columns=["key", "data"],
25
+ )
26
+ with pytest.raises(NumbaUtilError, match="The first 2"):
27
+ data.groupby("key").agg(incorrect_function, engine="numba")
28
+
29
+ with pytest.raises(NumbaUtilError, match="The first 2"):
30
+ data.groupby("key")["data"].agg(incorrect_function, engine="numba")
31
+
32
+
33
+ @td.skip_if_no("numba")
34
+ def test_check_nopython_kwargs():
35
+ def incorrect_function(values, index):
36
+ return sum(values) * 2.7
37
+
38
+ data = DataFrame(
39
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
40
+ columns=["key", "data"],
41
+ )
42
+ with pytest.raises(NumbaUtilError, match="numba does not support"):
43
+ data.groupby("key").agg(incorrect_function, engine="numba", a=1)
44
+
45
+ with pytest.raises(NumbaUtilError, match="numba does not support"):
46
+ data.groupby("key")["data"].agg(incorrect_function, engine="numba", a=1)
47
+
48
+
49
+ @td.skip_if_no("numba")
50
+ @pytest.mark.filterwarnings("ignore")
51
+ # Filter warnings when parallel=True and the function can't be parallelized by Numba
52
+ @pytest.mark.parametrize("jit", [True, False])
53
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
54
+ @pytest.mark.parametrize("as_index", [True, False])
55
+ def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
56
+ def func_numba(values, index):
57
+ return np.mean(values) * 2.7
58
+
59
+ if jit:
60
+ # Test accepted jitted functions
61
+ import numba
62
+
63
+ func_numba = numba.jit(func_numba)
64
+
65
+ data = DataFrame(
66
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
67
+ )
68
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
69
+ grouped = data.groupby(0, as_index=as_index)
70
+ if pandas_obj == "Series":
71
+ grouped = grouped[1]
72
+
73
+ result = grouped.agg(func_numba, engine="numba", engine_kwargs=engine_kwargs)
74
+ expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")
75
+
76
+ tm.assert_equal(result, expected)
77
+
78
+
79
+ @td.skip_if_no("numba")
80
+ @pytest.mark.filterwarnings("ignore")
81
+ # Filter warnings when parallel=True and the function can't be parallelized by Numba
82
+ @pytest.mark.parametrize("jit", [True, False])
83
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
84
+ def test_cache(jit, pandas_obj, nogil, parallel, nopython):
85
+ # Test that the functions are cached correctly if we switch functions
86
+ def func_1(values, index):
87
+ return np.mean(values) - 3.4
88
+
89
+ def func_2(values, index):
90
+ return np.mean(values) * 2.7
91
+
92
+ if jit:
93
+ import numba
94
+
95
+ func_1 = numba.jit(func_1)
96
+ func_2 = numba.jit(func_2)
97
+
98
+ data = DataFrame(
99
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
100
+ )
101
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
102
+ grouped = data.groupby(0)
103
+ if pandas_obj == "Series":
104
+ grouped = grouped[1]
105
+
106
+ result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)
107
+ expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")
108
+ tm.assert_equal(result, expected)
109
+
110
+ # Add func_2 to the cache
111
+ result = grouped.agg(func_2, engine="numba", engine_kwargs=engine_kwargs)
112
+ expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")
113
+ tm.assert_equal(result, expected)
114
+
115
+ # Retest func_1 which should use the cache
116
+ result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)
117
+ expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")
118
+ tm.assert_equal(result, expected)
119
+
120
+
121
+ @td.skip_if_no("numba")
122
+ def test_use_global_config():
123
+ def func_1(values, index):
124
+ return np.mean(values) - 3.4
125
+
126
+ data = DataFrame(
127
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
128
+ )
129
+ grouped = data.groupby(0)
130
+ expected = grouped.agg(func_1, engine="numba")
131
+ with option_context("compute.use_numba", True):
132
+ result = grouped.agg(func_1, engine=None)
133
+ tm.assert_frame_equal(expected, result)
134
+
135
+
136
+ @td.skip_if_no("numba")
137
+ @pytest.mark.parametrize(
138
+ "agg_func",
139
+ [
140
+ ["min", "max"],
141
+ "min",
142
+ {"B": ["min", "max"], "C": "sum"},
143
+ NamedAgg(column="B", aggfunc="min"),
144
+ ],
145
+ )
146
+ def test_multifunc_notimplimented(agg_func):
147
+ data = DataFrame(
148
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
149
+ )
150
+ grouped = data.groupby(0)
151
+ with pytest.raises(NotImplementedError, match="Numba engine can"):
152
+ grouped.agg(agg_func, engine="numba")
153
+
154
+ with pytest.raises(NotImplementedError, match="Numba engine can"):
155
+ grouped[1].agg(agg_func, engine="numba")
156
+
157
+
158
+ @td.skip_if_no("numba")
159
+ def test_args_not_cached():
160
+ # GH 41647
161
+ def sum_last(values, index, n):
162
+ return values[-n:].sum()
163
+
164
+ df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]})
165
+ grouped_x = df.groupby("id")["x"]
166
+ result = grouped_x.agg(sum_last, 1, engine="numba")
167
+ expected = Series([1.0] * 2, name="x", index=Index([0, 1], name="id"))
168
+ tm.assert_series_equal(result, expected)
169
+
170
+ result = grouped_x.agg(sum_last, 2, engine="numba")
171
+ expected = Series([2.0] * 2, name="x", index=Index([0, 1], name="id"))
172
+ tm.assert_series_equal(result, expected)
173
+
174
+
175
+ @td.skip_if_no("numba")
176
+ def test_index_data_correctly_passed():
177
+ # GH 43133
178
+ def f(values, index):
179
+ return np.mean(index)
180
+
181
+ df = DataFrame({"group": ["A", "A", "B"], "v": [4, 5, 6]}, index=[-1, -2, -3])
182
+ result = df.groupby("group").aggregate(f, engine="numba")
183
+ expected = DataFrame(
184
+ [-1.5, -3.0], columns=["v"], index=Index(["A", "B"], name="group")
185
+ )
186
+ tm.assert_frame_equal(result, expected)
187
+
188
+
189
+ @td.skip_if_no("numba")
190
+ def test_engine_kwargs_not_cached():
191
+ # If the user passes a different set of engine_kwargs don't return the same
192
+ # jitted function
193
+ nogil = True
194
+ parallel = False
195
+ nopython = True
196
+
197
+ def func_kwargs(values, index):
198
+ return nogil + parallel + nopython
199
+
200
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
201
+ df = DataFrame({"value": [0, 0, 0]})
202
+ result = df.groupby(level=0).aggregate(
203
+ func_kwargs, engine="numba", engine_kwargs=engine_kwargs
204
+ )
205
+ expected = DataFrame({"value": [2.0, 2.0, 2.0]})
206
+ tm.assert_frame_equal(result, expected)
207
+
208
+ nogil = False
209
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
210
+ result = df.groupby(level=0).aggregate(
211
+ func_kwargs, engine="numba", engine_kwargs=engine_kwargs
212
+ )
213
+ expected = DataFrame({"value": [1.0, 1.0, 1.0]})
214
+ tm.assert_frame_equal(result, expected)
215
+
216
+
217
+ @td.skip_if_no("numba")
218
+ @pytest.mark.filterwarnings("ignore")
219
+ def test_multiindex_one_key(nogil, parallel, nopython):
220
+ def numba_func(values, index):
221
+ return 1
222
+
223
+ df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])
224
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
225
+ result = df.groupby("A").agg(
226
+ numba_func, engine="numba", engine_kwargs=engine_kwargs
227
+ )
228
+ expected = DataFrame([1.0], index=Index([1], name="A"), columns=["C"])
229
+ tm.assert_frame_equal(result, expected)
230
+
231
+
232
+ @td.skip_if_no("numba")
233
+ def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
234
+ def numba_func(values, index):
235
+ return 1
236
+
237
+ df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])
238
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
239
+ with pytest.raises(NotImplementedError, match="More than 1 grouping labels"):
240
+ df.groupby(["A", "B"]).agg(
241
+ numba_func, engine="numba", engine_kwargs=engine_kwargs
242
+ )
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_other.py ADDED
@@ -0,0 +1,664 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test all other .agg behavior
3
+ """
4
+
5
+ import datetime as dt
6
+ from functools import partial
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas.errors import SpecificationError
12
+
13
+ import pandas as pd
14
+ from pandas import (
15
+ DataFrame,
16
+ Index,
17
+ MultiIndex,
18
+ PeriodIndex,
19
+ Series,
20
+ date_range,
21
+ period_range,
22
+ )
23
+ import pandas._testing as tm
24
+
25
+ from pandas.io.formats.printing import pprint_thing
26
+
27
+
28
+ def test_agg_partial_failure_raises():
29
+ # GH#43741
30
+
31
+ df = DataFrame(
32
+ {
33
+ "data1": np.random.randn(5),
34
+ "data2": np.random.randn(5),
35
+ "key1": ["a", "a", "b", "b", "a"],
36
+ "key2": ["one", "two", "one", "two", "one"],
37
+ }
38
+ )
39
+ grouped = df.groupby("key1")
40
+
41
+ def peak_to_peak(arr):
42
+ return arr.max() - arr.min()
43
+
44
+ with pytest.raises(TypeError, match="unsupported operand type"):
45
+ grouped.agg([peak_to_peak])
46
+
47
+ with pytest.raises(TypeError, match="unsupported operand type"):
48
+ grouped.agg(peak_to_peak)
49
+
50
+
51
+ def test_agg_datetimes_mixed():
52
+ data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]]
53
+
54
+ df1 = DataFrame(
55
+ {
56
+ "key": [x[0] for x in data],
57
+ "date": [x[1] for x in data],
58
+ "value": [x[2] for x in data],
59
+ }
60
+ )
61
+
62
+ data = [
63
+ [
64
+ row[0],
65
+ (dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None),
66
+ row[2],
67
+ ]
68
+ for row in data
69
+ ]
70
+
71
+ df2 = DataFrame(
72
+ {
73
+ "key": [x[0] for x in data],
74
+ "date": [x[1] for x in data],
75
+ "value": [x[2] for x in data],
76
+ }
77
+ )
78
+
79
+ df1["weights"] = df1["value"] / df1["value"].sum()
80
+ gb1 = df1.groupby("date").aggregate(np.sum)
81
+
82
+ df2["weights"] = df1["value"] / df1["value"].sum()
83
+ gb2 = df2.groupby("date").aggregate(np.sum)
84
+
85
+ assert len(gb1) == len(gb2)
86
+
87
+
88
+ def test_agg_period_index():
89
+ prng = period_range("2012-1-1", freq="M", periods=3)
90
+ df = DataFrame(np.random.randn(3, 2), index=prng)
91
+ rs = df.groupby(level=0).sum()
92
+ assert isinstance(rs.index, PeriodIndex)
93
+
94
+ # GH 3579
95
+ index = period_range(start="1999-01", periods=5, freq="M")
96
+ s1 = Series(np.random.rand(len(index)), index=index)
97
+ s2 = Series(np.random.rand(len(index)), index=index)
98
+ df = DataFrame.from_dict({"s1": s1, "s2": s2})
99
+ grouped = df.groupby(df.index.month)
100
+ list(grouped)
101
+
102
+
103
+ def test_agg_dict_parameter_cast_result_dtypes():
104
+ # GH 12821
105
+
106
+ df = DataFrame(
107
+ {
108
+ "class": ["A", "A", "B", "B", "C", "C", "D", "D"],
109
+ "time": date_range("1/1/2011", periods=8, freq="H"),
110
+ }
111
+ )
112
+ df.loc[[0, 1, 2, 5], "time"] = None
113
+
114
+ # test for `first` function
115
+ exp = df.loc[[0, 3, 4, 6]].set_index("class")
116
+ grouped = df.groupby("class")
117
+ tm.assert_frame_equal(grouped.first(), exp)
118
+ tm.assert_frame_equal(grouped.agg("first"), exp)
119
+ tm.assert_frame_equal(grouped.agg({"time": "first"}), exp)
120
+ tm.assert_series_equal(grouped.time.first(), exp["time"])
121
+ tm.assert_series_equal(grouped.time.agg("first"), exp["time"])
122
+
123
+ # test for `last` function
124
+ exp = df.loc[[0, 3, 4, 7]].set_index("class")
125
+ grouped = df.groupby("class")
126
+ tm.assert_frame_equal(grouped.last(), exp)
127
+ tm.assert_frame_equal(grouped.agg("last"), exp)
128
+ tm.assert_frame_equal(grouped.agg({"time": "last"}), exp)
129
+ tm.assert_series_equal(grouped.time.last(), exp["time"])
130
+ tm.assert_series_equal(grouped.time.agg("last"), exp["time"])
131
+
132
+ # count
133
+ exp = Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
134
+ tm.assert_series_equal(grouped.time.agg(len), exp)
135
+ tm.assert_series_equal(grouped.time.size(), exp)
136
+
137
+ exp = Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
138
+ tm.assert_series_equal(grouped.time.count(), exp)
139
+
140
+
141
+ def test_agg_cast_results_dtypes():
142
+ # similar to GH12821
143
+ # xref #11444
144
+ u = [dt.datetime(2015, x + 1, 1) for x in range(12)]
145
+ v = list("aaabbbbbbccd")
146
+ df = DataFrame({"X": v, "Y": u})
147
+
148
+ result = df.groupby("X")["Y"].agg(len)
149
+ expected = df.groupby("X")["Y"].count()
150
+ tm.assert_series_equal(result, expected)
151
+
152
+
153
+ def test_aggregate_float64_no_int64():
154
+ # see gh-11199
155
+ df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]})
156
+
157
+ expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
158
+ expected.index.name = "b"
159
+
160
+ result = df.groupby("b")[["a"]].mean()
161
+ tm.assert_frame_equal(result, expected)
162
+
163
+ expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
164
+ expected.index.name = "b"
165
+
166
+ result = df.groupby("b")[["a", "c"]].mean()
167
+ tm.assert_frame_equal(result, expected)
168
+
169
+
170
+ def test_aggregate_api_consistency():
171
+ # GH 9052
172
+ # make sure that the aggregates via dict
173
+ # are consistent
174
+ df = DataFrame(
175
+ {
176
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
177
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
178
+ "C": np.random.randn(8) + 1.0,
179
+ "D": np.arange(8),
180
+ }
181
+ )
182
+
183
+ grouped = df.groupby(["A", "B"])
184
+ c_mean = grouped["C"].mean()
185
+ c_sum = grouped["C"].sum()
186
+ d_mean = grouped["D"].mean()
187
+ d_sum = grouped["D"].sum()
188
+
189
+ result = grouped["D"].agg(["sum", "mean"])
190
+ expected = pd.concat([d_sum, d_mean], axis=1)
191
+ expected.columns = ["sum", "mean"]
192
+ tm.assert_frame_equal(result, expected, check_like=True)
193
+
194
+ result = grouped.agg([np.sum, np.mean])
195
+ expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
196
+ expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]])
197
+ tm.assert_frame_equal(result, expected, check_like=True)
198
+
199
+ result = grouped[["D", "C"]].agg([np.sum, np.mean])
200
+ expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
201
+ expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]])
202
+ tm.assert_frame_equal(result, expected, check_like=True)
203
+
204
+ result = grouped.agg({"C": "mean", "D": "sum"})
205
+ expected = pd.concat([d_sum, c_mean], axis=1)
206
+ tm.assert_frame_equal(result, expected, check_like=True)
207
+
208
+ result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]})
209
+ expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
210
+ expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]])
211
+
212
+ msg = r"Column\(s\) \['r', 'r2'\] do not exist"
213
+ with pytest.raises(KeyError, match=msg):
214
+ grouped[["D", "C"]].agg({"r": np.sum, "r2": np.mean})
215
+
216
+
217
+ def test_agg_dict_renaming_deprecation():
218
+ # 15931
219
+ df = DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})
220
+
221
+ msg = r"nested renamer is not supported"
222
+ with pytest.raises(SpecificationError, match=msg):
223
+ df.groupby("A").agg(
224
+ {"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}}
225
+ )
226
+
227
+ msg = r"Column\(s\) \['ma'\] do not exist"
228
+ with pytest.raises(KeyError, match=msg):
229
+ df.groupby("A")[["B", "C"]].agg({"ma": "max"})
230
+
231
+ msg = r"nested renamer is not supported"
232
+ with pytest.raises(SpecificationError, match=msg):
233
+ df.groupby("A").B.agg({"foo": "count"})
234
+
235
+
236
+ def test_agg_compat():
237
+ # GH 12334
238
+ df = DataFrame(
239
+ {
240
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
241
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
242
+ "C": np.random.randn(8) + 1.0,
243
+ "D": np.arange(8),
244
+ }
245
+ )
246
+
247
+ g = df.groupby(["A", "B"])
248
+
249
+ msg = r"nested renamer is not supported"
250
+ with pytest.raises(SpecificationError, match=msg):
251
+ g["D"].agg({"C": ["sum", "std"]})
252
+
253
+ with pytest.raises(SpecificationError, match=msg):
254
+ g["D"].agg({"C": "sum", "D": "std"})
255
+
256
+
257
+ def test_agg_nested_dicts():
258
+ # API change for disallowing these types of nested dicts
259
+ df = DataFrame(
260
+ {
261
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
262
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
263
+ "C": np.random.randn(8) + 1.0,
264
+ "D": np.arange(8),
265
+ }
266
+ )
267
+
268
+ g = df.groupby(["A", "B"])
269
+
270
+ msg = r"nested renamer is not supported"
271
+ with pytest.raises(SpecificationError, match=msg):
272
+ g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}})
273
+
274
+ with pytest.raises(SpecificationError, match=msg):
275
+ g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}})
276
+
277
+ # same name as the original column
278
+ # GH9052
279
+ with pytest.raises(SpecificationError, match=msg):
280
+ g["D"].agg({"result1": np.sum, "result2": np.mean})
281
+
282
+ with pytest.raises(SpecificationError, match=msg):
283
+ g["D"].agg({"D": np.sum, "result2": np.mean})
284
+
285
+
286
+ def test_agg_item_by_item_raise_typeerror():
287
+ df = DataFrame(np.random.randint(10, size=(20, 10)))
288
+
289
+ def raiseException(df):
290
+ pprint_thing("----------------------------------------")
291
+ pprint_thing(df.to_string())
292
+ raise TypeError("test")
293
+
294
+ with pytest.raises(TypeError, match="test"):
295
+ df.groupby(0).agg(raiseException)
296
+
297
+
298
+ def test_series_agg_multikey():
299
+ ts = tm.makeTimeSeries()
300
+ grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
301
+
302
+ result = grouped.agg(np.sum)
303
+ expected = grouped.sum()
304
+ tm.assert_series_equal(result, expected)
305
+
306
+
307
+ def test_series_agg_multi_pure_python():
308
+ data = DataFrame(
309
+ {
310
+ "A": [
311
+ "foo",
312
+ "foo",
313
+ "foo",
314
+ "foo",
315
+ "bar",
316
+ "bar",
317
+ "bar",
318
+ "bar",
319
+ "foo",
320
+ "foo",
321
+ "foo",
322
+ ],
323
+ "B": [
324
+ "one",
325
+ "one",
326
+ "one",
327
+ "two",
328
+ "one",
329
+ "one",
330
+ "one",
331
+ "two",
332
+ "two",
333
+ "two",
334
+ "one",
335
+ ],
336
+ "C": [
337
+ "dull",
338
+ "dull",
339
+ "shiny",
340
+ "dull",
341
+ "dull",
342
+ "shiny",
343
+ "shiny",
344
+ "dull",
345
+ "shiny",
346
+ "shiny",
347
+ "shiny",
348
+ ],
349
+ "D": np.random.randn(11),
350
+ "E": np.random.randn(11),
351
+ "F": np.random.randn(11),
352
+ }
353
+ )
354
+
355
+ def bad(x):
356
+ assert len(x.values.base) > 0
357
+ return "foo"
358
+
359
+ result = data.groupby(["A", "B"]).agg(bad)
360
+ expected = data.groupby(["A", "B"]).agg(lambda x: "foo")
361
+ tm.assert_frame_equal(result, expected)
362
+
363
+
364
+ def test_agg_consistency():
365
+ # agg with ([]) and () not consistent
366
+ # GH 6715
367
+ def P1(a):
368
+ return np.percentile(a.dropna(), q=1)
369
+
370
+ df = DataFrame(
371
+ {
372
+ "col1": [1, 2, 3, 4],
373
+ "col2": [10, 25, 26, 31],
374
+ "date": [
375
+ dt.date(2013, 2, 10),
376
+ dt.date(2013, 2, 10),
377
+ dt.date(2013, 2, 11),
378
+ dt.date(2013, 2, 11),
379
+ ],
380
+ }
381
+ )
382
+
383
+ g = df.groupby("date")
384
+
385
+ expected = g.agg([P1])
386
+ expected.columns = expected.columns.levels[0]
387
+
388
+ result = g.agg(P1)
389
+ tm.assert_frame_equal(result, expected)
390
+
391
+
392
+ def test_agg_callables():
393
+ # GH 7929
394
+ df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64)
395
+
396
+ class fn_class:
397
+ def __call__(self, x):
398
+ return sum(x)
399
+
400
+ equiv_callables = [
401
+ sum,
402
+ np.sum,
403
+ lambda x: sum(x),
404
+ lambda x: x.sum(),
405
+ partial(sum),
406
+ fn_class(),
407
+ ]
408
+
409
+ expected = df.groupby("foo").agg(sum)
410
+ for ecall in equiv_callables:
411
+ result = df.groupby("foo").agg(ecall)
412
+ tm.assert_frame_equal(result, expected)
413
+
414
+
415
+ def test_agg_over_numpy_arrays():
416
+ # GH 3788
417
+ df = DataFrame(
418
+ [
419
+ [1, np.array([10, 20, 30])],
420
+ [1, np.array([40, 50, 60])],
421
+ [2, np.array([20, 30, 40])],
422
+ ],
423
+ columns=["category", "arraydata"],
424
+ )
425
+ gb = df.groupby("category")
426
+
427
+ expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
428
+ expected_index = Index([1, 2], name="category")
429
+ expected_column = ["arraydata"]
430
+ expected = DataFrame(expected_data, index=expected_index, columns=expected_column)
431
+
432
+ alt = gb.sum(numeric_only=False)
433
+ tm.assert_frame_equal(alt, expected)
434
+
435
+ result = gb.agg("sum", numeric_only=False)
436
+ tm.assert_frame_equal(result, expected)
437
+
438
+ # FIXME: the original version of this test called `gb.agg(sum)`
439
+ # and that raises TypeError if `numeric_only=False` is passed
440
+
441
+
442
+ @pytest.mark.parametrize("as_period", [True, False])
443
+ def test_agg_tzaware_non_datetime_result(as_period):
444
+ # discussed in GH#29589, fixed in GH#29641, operating on tzaware values
445
+ # with function that is not dtype-preserving
446
+ dti = date_range("2012-01-01", periods=4, tz="UTC")
447
+ if as_period:
448
+ dti = dti.tz_localize(None).to_period("D")
449
+
450
+ df = DataFrame({"a": [0, 0, 1, 1], "b": dti})
451
+ gb = df.groupby("a")
452
+
453
+ # Case that _does_ preserve the dtype
454
+ result = gb["b"].agg(lambda x: x.iloc[0])
455
+ expected = Series(dti[::2], name="b")
456
+ expected.index.name = "a"
457
+ tm.assert_series_equal(result, expected)
458
+
459
+ # Cases that do _not_ preserve the dtype
460
+ result = gb["b"].agg(lambda x: x.iloc[0].year)
461
+ expected = Series([2012, 2012], name="b")
462
+ expected.index.name = "a"
463
+ tm.assert_series_equal(result, expected)
464
+
465
+ result = gb["b"].agg(lambda x: x.iloc[-1] - x.iloc[0])
466
+ expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b")
467
+ expected.index.name = "a"
468
+ if as_period:
469
+ expected = Series([pd.offsets.Day(1), pd.offsets.Day(1)], name="b")
470
+ expected.index.name = "a"
471
+ tm.assert_series_equal(result, expected)
472
+
473
+
474
+ def test_agg_timezone_round_trip():
475
+ # GH 15426
476
+ ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
477
+ df = DataFrame({"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]})
478
+
479
+ result1 = df.groupby("a")["b"].agg(np.min).iloc[0]
480
+ result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0]
481
+ result3 = df.groupby("a")["b"].min().iloc[0]
482
+
483
+ assert result1 == ts
484
+ assert result2 == ts
485
+ assert result3 == ts
486
+
487
+ dates = [
488
+ pd.Timestamp(f"2016-01-0{i:d} 12:00:00", tz="US/Pacific") for i in range(1, 5)
489
+ ]
490
+ df = DataFrame({"A": ["a", "b"] * 2, "B": dates})
491
+ grouped = df.groupby("A")
492
+
493
+ ts = df["B"].iloc[0]
494
+ assert ts == grouped.nth(0)["B"].iloc[0]
495
+ assert ts == grouped.head(1)["B"].iloc[0]
496
+ assert ts == grouped.first()["B"].iloc[0]
497
+
498
+ # GH#27110 applying iloc should return a DataFrame
499
+ assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1]
500
+
501
+ ts = df["B"].iloc[2]
502
+ assert ts == grouped.last()["B"].iloc[0]
503
+
504
+ # GH#27110 applying iloc should return a DataFrame
505
+ assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1]
506
+
507
+
508
+ def test_sum_uint64_overflow():
509
+ # see gh-14758
510
+ # Convert to uint64 and don't overflow
511
+ df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
512
+ df = df + 9223372036854775807
513
+
514
+ index = Index(
515
+ [9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64
516
+ )
517
+ expected = DataFrame(
518
+ {1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},
519
+ index=index,
520
+ dtype=object,
521
+ )
522
+
523
+ expected.index.name = 0
524
+ result = df.groupby(0).sum(numeric_only=False)
525
+ tm.assert_frame_equal(result, expected)
526
+
527
+ # out column is non-numeric, so with numeric_only=True it is dropped
528
+ result2 = df.groupby(0).sum(numeric_only=True)
529
+ expected2 = expected[[]]
530
+ tm.assert_frame_equal(result2, expected2)
531
+
532
+
533
+ @pytest.mark.parametrize(
534
+ "structure, expected",
535
+ [
536
+ (tuple, DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
537
+ (list, DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
538
+ (
539
+ lambda x: tuple(x),
540
+ DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),
541
+ ),
542
+ (
543
+ lambda x: list(x),
544
+ DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),
545
+ ),
546
+ ],
547
+ )
548
+ def test_agg_structs_dataframe(structure, expected):
549
+ df = DataFrame(
550
+ {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
551
+ )
552
+
553
+ result = df.groupby(["A", "B"]).aggregate(structure)
554
+ expected.index.names = ["A", "B"]
555
+ tm.assert_frame_equal(result, expected)
556
+
557
+
558
+ @pytest.mark.parametrize(
559
+ "structure, expected",
560
+ [
561
+ (tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
562
+ (list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
563
+ (lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
564
+ (lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
565
+ ],
566
+ )
567
+ def test_agg_structs_series(structure, expected):
568
+ # Issue #18079
569
+ df = DataFrame(
570
+ {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
571
+ )
572
+
573
+ result = df.groupby("A")["C"].aggregate(structure)
574
+ expected.index.name = "A"
575
+ tm.assert_series_equal(result, expected)
576
+
577
+
578
+ def test_agg_category_nansum(observed):
579
+ categories = ["a", "b", "c"]
580
+ df = DataFrame(
581
+ {"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}
582
+ )
583
+ result = df.groupby("A", observed=observed).B.agg(np.nansum)
584
+ expected = Series(
585
+ [3, 3, 0],
586
+ index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),
587
+ name="B",
588
+ )
589
+ if observed:
590
+ expected = expected[expected != 0]
591
+ tm.assert_series_equal(result, expected)
592
+
593
+
594
+ def test_agg_list_like_func():
595
+ # GH 18473
596
+ df = DataFrame({"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]})
597
+ grouped = df.groupby("A", as_index=False, sort=False)
598
+ result = grouped.agg({"B": lambda x: list(x)})
599
+ expected = DataFrame(
600
+ {"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]}
601
+ )
602
+ tm.assert_frame_equal(result, expected)
603
+
604
+
605
+ def test_agg_lambda_with_timezone():
606
+ # GH 23683
607
+ df = DataFrame(
608
+ {
609
+ "tag": [1, 1],
610
+ "date": [
611
+ pd.Timestamp("2018-01-01", tz="UTC"),
612
+ pd.Timestamp("2018-01-02", tz="UTC"),
613
+ ],
614
+ }
615
+ )
616
+ result = df.groupby("tag").agg({"date": lambda e: e.head(1)})
617
+ expected = DataFrame(
618
+ [pd.Timestamp("2018-01-01", tz="UTC")],
619
+ index=Index([1], name="tag"),
620
+ columns=["date"],
621
+ )
622
+ tm.assert_frame_equal(result, expected)
623
+
624
+
625
+ @pytest.mark.parametrize(
626
+ "err_cls",
627
+ [
628
+ NotImplementedError,
629
+ RuntimeError,
630
+ KeyError,
631
+ IndexError,
632
+ OSError,
633
+ ValueError,
634
+ ArithmeticError,
635
+ AttributeError,
636
+ ],
637
+ )
638
+ def test_groupby_agg_err_catching(err_cls):
639
+ # make sure we suppress anything other than TypeError or AssertionError
640
+ # in _python_agg_general
641
+
642
+ # Use a non-standard EA to make sure we don't go down ndarray paths
643
+ from pandas.tests.extension.decimal.array import (
644
+ DecimalArray,
645
+ make_data,
646
+ to_decimal,
647
+ )
648
+
649
+ data = make_data()[:5]
650
+ df = DataFrame(
651
+ {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
652
+ )
653
+
654
+ expected = Series(to_decimal([data[0], data[3]]))
655
+
656
+ def weird_func(x):
657
+ # weird function that raise something other than TypeError or IndexError
658
+ # in _python_agg_general
659
+ if len(x) == 0:
660
+ raise err_cls
661
+ return x.iloc[0]
662
+
663
+ result = df["decimals"].groupby(df["id1"]).agg(weird_func)
664
+ tm.assert_series_equal(result, expected, check_names=False)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/conftest.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import DataFrame
5
+ import pandas._testing as tm
6
+ from pandas.core.groupby.base import (
7
+ reduction_kernels,
8
+ transformation_kernels,
9
+ )
10
+
11
+
12
+ @pytest.fixture(params=[True, False])
13
+ def sort(request):
14
+ return request.param
15
+
16
+
17
+ @pytest.fixture(params=[True, False])
18
+ def as_index(request):
19
+ return request.param
20
+
21
+
22
+ @pytest.fixture(params=[True, False])
23
+ def dropna(request):
24
+ return request.param
25
+
26
+
27
+ @pytest.fixture(params=[True, False])
28
+ def observed(request):
29
+ return request.param
30
+
31
+
32
+ @pytest.fixture
33
+ def mframe(multiindex_dataframe_random_data):
34
+ return multiindex_dataframe_random_data
35
+
36
+
37
+ @pytest.fixture
38
+ def df():
39
+ return DataFrame(
40
+ {
41
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
42
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
43
+ "C": np.random.randn(8),
44
+ "D": np.random.randn(8),
45
+ }
46
+ )
47
+
48
+
49
+ @pytest.fixture
50
+ def ts():
51
+ return tm.makeTimeSeries()
52
+
53
+
54
+ @pytest.fixture
55
+ def tsd():
56
+ return tm.getTimeSeriesData()
57
+
58
+
59
+ @pytest.fixture
60
+ def tsframe(tsd):
61
+ return DataFrame(tsd)
62
+
63
+
64
+ @pytest.fixture
65
+ def df_mixed_floats():
66
+ return DataFrame(
67
+ {
68
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
69
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
70
+ "C": np.random.randn(8),
71
+ "D": np.array(np.random.randn(8), dtype="float32"),
72
+ }
73
+ )
74
+
75
+
76
+ @pytest.fixture
77
+ def three_group():
78
+ return DataFrame(
79
+ {
80
+ "A": [
81
+ "foo",
82
+ "foo",
83
+ "foo",
84
+ "foo",
85
+ "bar",
86
+ "bar",
87
+ "bar",
88
+ "bar",
89
+ "foo",
90
+ "foo",
91
+ "foo",
92
+ ],
93
+ "B": [
94
+ "one",
95
+ "one",
96
+ "one",
97
+ "two",
98
+ "one",
99
+ "one",
100
+ "one",
101
+ "two",
102
+ "two",
103
+ "two",
104
+ "one",
105
+ ],
106
+ "C": [
107
+ "dull",
108
+ "dull",
109
+ "shiny",
110
+ "dull",
111
+ "dull",
112
+ "shiny",
113
+ "shiny",
114
+ "dull",
115
+ "shiny",
116
+ "shiny",
117
+ "shiny",
118
+ ],
119
+ "D": np.random.randn(11),
120
+ "E": np.random.randn(11),
121
+ "F": np.random.randn(11),
122
+ }
123
+ )
124
+
125
+
126
+ @pytest.fixture()
127
+ def slice_test_df():
128
+ data = [
129
+ [0, "a", "a0_at_0"],
130
+ [1, "b", "b0_at_1"],
131
+ [2, "a", "a1_at_2"],
132
+ [3, "b", "b1_at_3"],
133
+ [4, "c", "c0_at_4"],
134
+ [5, "a", "a2_at_5"],
135
+ [6, "a", "a3_at_6"],
136
+ [7, "a", "a4_at_7"],
137
+ ]
138
+ df = DataFrame(data, columns=["Index", "Group", "Value"])
139
+ return df.set_index("Index")
140
+
141
+
142
+ @pytest.fixture()
143
+ def slice_test_grouped(slice_test_df):
144
+ return slice_test_df.groupby("Group", as_index=False)
145
+
146
+
147
+ @pytest.fixture(params=sorted(reduction_kernels))
148
+ def reduction_func(request):
149
+ """
150
+ yields the string names of all groupby reduction functions, one at a time.
151
+ """
152
+ return request.param
153
+
154
+
155
+ @pytest.fixture(params=sorted(transformation_kernels))
156
+ def transformation_func(request):
157
+ """yields the string names of all groupby transformation functions."""
158
+ return request.param
159
+
160
+
161
+ @pytest.fixture(params=sorted(reduction_kernels) + sorted(transformation_kernels))
162
+ def groupby_func(request):
163
+ """yields both aggregation and transformation functions."""
164
+ return request.param
165
+
166
+
167
+ @pytest.fixture(params=[True, False])
168
+ def parallel(request):
169
+ """parallel keyword argument for numba.jit"""
170
+ return request.param
171
+
172
+
173
+ # Can parameterize nogil & nopython over True | False, but limiting per
174
+ # https://github.com/pandas-dev/pandas/pull/41971#issuecomment-860607472
175
+
176
+
177
+ @pytest.fixture(params=[False])
178
+ def nogil(request):
179
+ """nogil keyword argument for numba.jit"""
180
+ return request.param
181
+
182
+
183
+ @pytest.fixture(params=[True])
184
+ def nopython(request):
185
+ """nopython keyword argument for numba.jit"""
186
+ return request.param
187
+
188
+
189
+ @pytest.fixture(
190
+ params=[
191
+ ("mean", {}),
192
+ ("var", {"ddof": 1}),
193
+ ("var", {"ddof": 0}),
194
+ ("std", {"ddof": 1}),
195
+ ("std", {"ddof": 0}),
196
+ ("sum", {}),
197
+ ("min", {}),
198
+ ("max", {}),
199
+ ],
200
+ ids=["mean", "var_1", "var_0", "std_1", "std_0", "sum", "min", "max"],
201
+ )
202
+ def numba_supported_reductions(request):
203
+ """reductions supported with engine='numba'"""
204
+ return request.param
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_allowlist.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test methods relating to generic function evaluation
3
+ the so-called white/black lists
4
+ """
5
+
6
+ from string import ascii_lowercase
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas import (
12
+ DataFrame,
13
+ Series,
14
+ date_range,
15
+ )
16
+ import pandas._testing as tm
17
+ from pandas.core.groupby.base import (
18
+ groupby_other_methods,
19
+ reduction_kernels,
20
+ transformation_kernels,
21
+ )
22
+
23
+ AGG_FUNCTIONS = [
24
+ "sum",
25
+ "prod",
26
+ "min",
27
+ "max",
28
+ "median",
29
+ "mean",
30
+ "skew",
31
+ "std",
32
+ "var",
33
+ "sem",
34
+ ]
35
+ AGG_FUNCTIONS_WITH_SKIPNA = ["skew"]
36
+
37
+
38
+ @pytest.fixture
39
+ def df():
40
+ return DataFrame(
41
+ {
42
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
43
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
44
+ "C": np.random.randn(8),
45
+ "D": np.random.randn(8),
46
+ }
47
+ )
48
+
49
+
50
+ @pytest.fixture
51
+ def df_letters():
52
+ letters = np.array(list(ascii_lowercase))
53
+ N = 10
54
+ random_letters = letters.take(np.random.randint(0, 26, N))
55
+ df = DataFrame(
56
+ {
57
+ "floats": N / 10 * Series(np.random.random(N)),
58
+ "letters": Series(random_letters),
59
+ }
60
+ )
61
+ return df
62
+
63
+
64
+ @pytest.fixture
65
+ def raw_frame():
66
+ return DataFrame([0])
67
+
68
+
69
+ @pytest.mark.parametrize("op", AGG_FUNCTIONS)
70
+ @pytest.mark.parametrize("axis", [0, 1])
71
+ @pytest.mark.parametrize("skipna", [True, False])
72
+ @pytest.mark.parametrize("sort", [True, False])
73
+ def test_regression_allowlist_methods(raw_frame, op, axis, skipna, sort):
74
+ # GH6944
75
+ # GH 17537
76
+ # explicitly test the allowlist methods
77
+ if axis == 0:
78
+ frame = raw_frame
79
+ else:
80
+ frame = raw_frame.T
81
+
82
+ if op in AGG_FUNCTIONS_WITH_SKIPNA:
83
+ grouped = frame.groupby(level=0, axis=axis, sort=sort)
84
+ result = getattr(grouped, op)(skipna=skipna)
85
+ expected = frame.groupby(level=0).apply(
86
+ lambda h: getattr(h, op)(axis=axis, skipna=skipna)
87
+ )
88
+ if sort:
89
+ expected = expected.sort_index(axis=axis)
90
+ tm.assert_frame_equal(result, expected)
91
+ else:
92
+ grouped = frame.groupby(level=0, axis=axis, sort=sort)
93
+ result = getattr(grouped, op)()
94
+ expected = frame.groupby(level=0).apply(lambda h: getattr(h, op)(axis=axis))
95
+ if sort:
96
+ expected = expected.sort_index(axis=axis)
97
+ tm.assert_frame_equal(result, expected)
98
+
99
+
100
+ def test_groupby_blocklist(df_letters):
101
+ df = df_letters
102
+ s = df_letters.floats
103
+
104
+ blocklist = [
105
+ "eval",
106
+ "query",
107
+ "abs",
108
+ "where",
109
+ "mask",
110
+ "align",
111
+ "groupby",
112
+ "clip",
113
+ "astype",
114
+ "at",
115
+ "combine",
116
+ "consolidate",
117
+ "convert_objects",
118
+ ]
119
+ to_methods = [method for method in dir(df) if method.startswith("to_")]
120
+
121
+ blocklist.extend(to_methods)
122
+
123
+ for bl in blocklist:
124
+ for obj in (df, s):
125
+ gb = obj.groupby(df.letters)
126
+
127
+ # e.g., to_csv
128
+ defined_but_not_allowed = (
129
+ f"(?:^Cannot.+{repr(bl)}.+'{type(gb).__name__}'.+try "
130
+ f"using the 'apply' method$)"
131
+ )
132
+
133
+ # e.g., query, eval
134
+ not_defined = (
135
+ f"(?:^'{type(gb).__name__}' object has no attribute {repr(bl)}$)"
136
+ )
137
+
138
+ msg = f"{defined_but_not_allowed}|{not_defined}"
139
+
140
+ with pytest.raises(AttributeError, match=msg):
141
+ getattr(gb, bl)
142
+
143
+
144
+ def test_tab_completion(mframe):
145
+ grp = mframe.groupby(level="second")
146
+ results = {v for v in dir(grp) if not v.startswith("_")}
147
+ expected = {
148
+ "A",
149
+ "B",
150
+ "C",
151
+ "agg",
152
+ "aggregate",
153
+ "apply",
154
+ "boxplot",
155
+ "filter",
156
+ "first",
157
+ "get_group",
158
+ "groups",
159
+ "hist",
160
+ "indices",
161
+ "last",
162
+ "max",
163
+ "mean",
164
+ "median",
165
+ "min",
166
+ "ngroups",
167
+ "nth",
168
+ "ohlc",
169
+ "plot",
170
+ "prod",
171
+ "size",
172
+ "std",
173
+ "sum",
174
+ "transform",
175
+ "var",
176
+ "sem",
177
+ "count",
178
+ "nunique",
179
+ "head",
180
+ "describe",
181
+ "cummax",
182
+ "quantile",
183
+ "rank",
184
+ "cumprod",
185
+ "tail",
186
+ "resample",
187
+ "cummin",
188
+ "fillna",
189
+ "cumsum",
190
+ "cumcount",
191
+ "ngroup",
192
+ "all",
193
+ "shift",
194
+ "skew",
195
+ "take",
196
+ "pct_change",
197
+ "any",
198
+ "corr",
199
+ "corrwith",
200
+ "cov",
201
+ "dtypes",
202
+ "ndim",
203
+ "diff",
204
+ "idxmax",
205
+ "idxmin",
206
+ "ffill",
207
+ "bfill",
208
+ "rolling",
209
+ "expanding",
210
+ "pipe",
211
+ "sample",
212
+ "ewm",
213
+ "value_counts",
214
+ }
215
+ assert results == expected
216
+
217
+
218
+ def test_groupby_function_rename(mframe):
219
+ grp = mframe.groupby(level="second")
220
+ for name in ["sum", "prod", "min", "max", "first", "last"]:
221
+ f = getattr(grp, name)
222
+ assert f.__name__ == name
223
+
224
+
225
+ @pytest.mark.parametrize(
226
+ "method",
227
+ [
228
+ "count",
229
+ "corr",
230
+ "cummax",
231
+ "cummin",
232
+ "cumprod",
233
+ "describe",
234
+ "rank",
235
+ "quantile",
236
+ "diff",
237
+ "shift",
238
+ "all",
239
+ "any",
240
+ "idxmin",
241
+ "idxmax",
242
+ "ffill",
243
+ "bfill",
244
+ "pct_change",
245
+ ],
246
+ )
247
+ def test_groupby_selection_with_methods(df, method):
248
+ # some methods which require DatetimeIndex
249
+ rng = date_range("2014", periods=len(df))
250
+ df.index = rng
251
+
252
+ g = df.groupby(["A"])[["C"]]
253
+ g_exp = df[["C"]].groupby(df["A"])
254
+ # TODO check groupby with > 1 col ?
255
+
256
+ res = getattr(g, method)()
257
+ exp = getattr(g_exp, method)()
258
+
259
+ # should always be frames!
260
+ tm.assert_frame_equal(res, exp)
261
+
262
+
263
+ def test_groupby_selection_other_methods(df):
264
+ # some methods which require DatetimeIndex
265
+ rng = date_range("2014", periods=len(df))
266
+ df.columns.name = "foo"
267
+ df.index = rng
268
+
269
+ g = df.groupby(["A"])[["C"]]
270
+ g_exp = df[["C"]].groupby(df["A"])
271
+
272
+ # methods which aren't just .foo()
273
+ tm.assert_frame_equal(g.fillna(0), g_exp.fillna(0))
274
+ tm.assert_frame_equal(g.dtypes, g_exp.dtypes)
275
+ tm.assert_frame_equal(g.apply(lambda x: x.sum()), g_exp.apply(lambda x: x.sum()))
276
+
277
+ tm.assert_frame_equal(g.resample("D").mean(), g_exp.resample("D").mean())
278
+ tm.assert_frame_equal(g.resample("D").ohlc(), g_exp.resample("D").ohlc())
279
+
280
+ tm.assert_frame_equal(
281
+ g.filter(lambda x: len(x) == 3), g_exp.filter(lambda x: len(x) == 3)
282
+ )
283
+
284
+
285
+ def test_all_methods_categorized(mframe):
286
+ grp = mframe.groupby(mframe.iloc[:, 0])
287
+ names = {_ for _ in dir(grp) if not _.startswith("_")} - set(mframe.columns)
288
+ new_names = set(names)
289
+ new_names -= reduction_kernels
290
+ new_names -= transformation_kernels
291
+ new_names -= groupby_other_methods
292
+
293
+ assert not reduction_kernels & transformation_kernels
294
+ assert not reduction_kernels & groupby_other_methods
295
+ assert not transformation_kernels & groupby_other_methods
296
+
297
+ # new public method?
298
+ if new_names:
299
+ msg = f"""
300
+ There are uncategorized methods defined on the Grouper class:
301
+ {new_names}.
302
+
303
+ Was a new method recently added?
304
+
305
+ Every public method On Grouper must appear in exactly one the
306
+ following three lists defined in pandas.core.groupby.base:
307
+ - `reduction_kernels`
308
+ - `transformation_kernels`
309
+ - `groupby_other_methods`
310
+ see the comments in pandas/core/groupby/base.py for guidance on
311
+ how to fix this test.
312
+ """
313
+ raise AssertionError(msg)
314
+
315
+ # removed a public method?
316
+ all_categorized = reduction_kernels | transformation_kernels | groupby_other_methods
317
+ if names != all_categorized:
318
+ msg = f"""
319
+ Some methods which are supposed to be on the Grouper class
320
+ are missing:
321
+ {all_categorized - names}.
322
+
323
+ They're still defined in one of the lists that live in pandas/core/groupby/base.py.
324
+ If you removed a method, you should update them
325
+ """
326
+ raise AssertionError(msg)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_any_all.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ DataFrame,
9
+ Index,
10
+ Series,
11
+ isna,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+
16
+ @pytest.mark.parametrize("agg_func", ["any", "all"])
17
+ @pytest.mark.parametrize("skipna", [True, False])
18
+ @pytest.mark.parametrize(
19
+ "vals",
20
+ [
21
+ ["foo", "bar", "baz"],
22
+ ["foo", "", ""],
23
+ ["", "", ""],
24
+ [1, 2, 3],
25
+ [1, 0, 0],
26
+ [0, 0, 0],
27
+ [1.0, 2.0, 3.0],
28
+ [1.0, 0.0, 0.0],
29
+ [0.0, 0.0, 0.0],
30
+ [True, True, True],
31
+ [True, False, False],
32
+ [False, False, False],
33
+ [np.nan, np.nan, np.nan],
34
+ ],
35
+ )
36
+ def test_groupby_bool_aggs(agg_func, skipna, vals):
37
+ df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
38
+
39
+ # Figure out expectation using Python builtin
40
+ exp = getattr(builtins, agg_func)(vals)
41
+
42
+ # edge case for missing data with skipna and 'any'
43
+ if skipna and all(isna(vals)) and agg_func == "any":
44
+ exp = False
45
+
46
+ exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
47
+ result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
48
+ tm.assert_frame_equal(result, exp_df)
49
+
50
+
51
+ def test_any():
52
+ df = DataFrame(
53
+ [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]],
54
+ columns=["A", "B", "C"],
55
+ )
56
+ expected = DataFrame(
57
+ [[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
58
+ )
59
+ expected.index.name = "A"
60
+ result = df.groupby("A").any()
61
+ tm.assert_frame_equal(result, expected)
62
+
63
+
64
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
65
+ def test_bool_aggs_dup_column_labels(bool_agg_func):
66
+ # 21668
67
+ df = DataFrame([[True, True]], columns=["a", "a"])
68
+ grp_by = df.groupby([0])
69
+ result = getattr(grp_by, bool_agg_func)()
70
+
71
+ expected = df.set_axis(np.array([0]))
72
+ tm.assert_frame_equal(result, expected)
73
+
74
+
75
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
76
+ @pytest.mark.parametrize("skipna", [True, False])
77
+ @pytest.mark.parametrize(
78
+ "data",
79
+ [
80
+ [False, False, False],
81
+ [True, True, True],
82
+ [pd.NA, pd.NA, pd.NA],
83
+ [False, pd.NA, False],
84
+ [True, pd.NA, True],
85
+ [True, pd.NA, False],
86
+ ],
87
+ )
88
+ def test_masked_kleene_logic(bool_agg_func, skipna, data):
89
+ # GH#37506
90
+ ser = Series(data, dtype="boolean")
91
+
92
+ # The result should match aggregating on the whole series. Correctness
93
+ # there is verified in test_reductions.py::test_any_all_boolean_kleene_logic
94
+ expected_data = getattr(ser, bool_agg_func)(skipna=skipna)
95
+ expected = Series(expected_data, index=np.array([0]), dtype="boolean")
96
+
97
+ result = ser.groupby([0, 0, 0]).agg(bool_agg_func, skipna=skipna)
98
+ tm.assert_series_equal(result, expected)
99
+
100
+
101
+ @pytest.mark.parametrize(
102
+ "dtype1,dtype2,exp_col1,exp_col2",
103
+ [
104
+ (
105
+ "float",
106
+ "Float64",
107
+ np.array([True], dtype=bool),
108
+ pd.array([pd.NA], dtype="boolean"),
109
+ ),
110
+ (
111
+ "Int64",
112
+ "float",
113
+ pd.array([pd.NA], dtype="boolean"),
114
+ np.array([True], dtype=bool),
115
+ ),
116
+ (
117
+ "Int64",
118
+ "Int64",
119
+ pd.array([pd.NA], dtype="boolean"),
120
+ pd.array([pd.NA], dtype="boolean"),
121
+ ),
122
+ (
123
+ "Float64",
124
+ "boolean",
125
+ pd.array([pd.NA], dtype="boolean"),
126
+ pd.array([pd.NA], dtype="boolean"),
127
+ ),
128
+ ],
129
+ )
130
+ def test_masked_mixed_types(dtype1, dtype2, exp_col1, exp_col2):
131
+ # GH#37506
132
+ data = [1.0, np.nan]
133
+ df = DataFrame(
134
+ {"col1": pd.array(data, dtype=dtype1), "col2": pd.array(data, dtype=dtype2)}
135
+ )
136
+ result = df.groupby([1, 1]).agg("all", skipna=False)
137
+
138
+ expected = DataFrame({"col1": exp_col1, "col2": exp_col2}, index=np.array([1]))
139
+ tm.assert_frame_equal(result, expected)
140
+
141
+
142
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
143
+ @pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])
144
+ @pytest.mark.parametrize("skipna", [True, False])
145
+ def test_masked_bool_aggs_skipna(bool_agg_func, dtype, skipna, frame_or_series):
146
+ # GH#40585
147
+ obj = frame_or_series([pd.NA, 1], dtype=dtype)
148
+ expected_res = True
149
+ if not skipna and bool_agg_func == "all":
150
+ expected_res = pd.NA
151
+ expected = frame_or_series([expected_res], index=np.array([1]), dtype="boolean")
152
+
153
+ result = obj.groupby([1, 1]).agg(bool_agg_func, skipna=skipna)
154
+ tm.assert_equal(result, expected)
155
+
156
+
157
+ @pytest.mark.parametrize(
158
+ "bool_agg_func,data,expected_res",
159
+ [
160
+ ("any", [pd.NA, np.nan], False),
161
+ ("any", [pd.NA, 1, np.nan], True),
162
+ ("all", [pd.NA, pd.NaT], True),
163
+ ("all", [pd.NA, False, pd.NaT], False),
164
+ ],
165
+ )
166
+ def test_object_type_missing_vals(bool_agg_func, data, expected_res, frame_or_series):
167
+ # GH#37501
168
+ obj = frame_or_series(data, dtype=object)
169
+ result = obj.groupby([1] * len(data)).agg(bool_agg_func)
170
+ expected = frame_or_series([expected_res], index=np.array([1]), dtype="bool")
171
+ tm.assert_equal(result, expected)
172
+
173
+
174
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
175
+ def test_object_NA_raises_with_skipna_false(bool_agg_func):
176
+ # GH#37501
177
+ ser = Series([pd.NA], dtype=object)
178
+ with pytest.raises(TypeError, match="boolean value of NA is ambiguous"):
179
+ ser.groupby([1]).agg(bool_agg_func, skipna=False)
180
+
181
+
182
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
183
+ def test_empty(frame_or_series, bool_agg_func):
184
+ # GH 45231
185
+ kwargs = {"columns": ["a"]} if frame_or_series is DataFrame else {"name": "a"}
186
+ obj = frame_or_series(**kwargs, dtype=object)
187
+ result = getattr(obj.groupby(obj.index), bool_agg_func)()
188
+ expected = frame_or_series(**kwargs, dtype=bool)
189
+ tm.assert_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_api_consistency.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the consistency of the groupby API, both internally and with other pandas objects.
3
+ """
4
+
5
+ import inspect
6
+
7
+ import pytest
8
+
9
+ from pandas import (
10
+ DataFrame,
11
+ Series,
12
+ )
13
+ from pandas.core.groupby.generic import (
14
+ DataFrameGroupBy,
15
+ SeriesGroupBy,
16
+ )
17
+
18
+
19
+ def test_frame_consistency(request, groupby_func):
20
+ # GH#48028
21
+ if groupby_func in ("first", "last"):
22
+ msg = "first and last are entirely different between frame and groupby"
23
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
24
+ if groupby_func in ("cumcount",):
25
+ msg = "DataFrame has no such method"
26
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
27
+
28
+ if groupby_func == "ngroup":
29
+ assert not hasattr(DataFrame, groupby_func)
30
+ return
31
+
32
+ frame_method = getattr(DataFrame, groupby_func)
33
+ gb_method = getattr(DataFrameGroupBy, groupby_func)
34
+ result = set(inspect.signature(gb_method).parameters)
35
+ if groupby_func == "size":
36
+ # "size" is a method on GroupBy but property on DataFrame:
37
+ expected = {"self"}
38
+ else:
39
+ expected = set(inspect.signature(frame_method).parameters)
40
+
41
+ # Exclude certain arguments from result and expected depending on the operation
42
+ # Some of these may be purposeful inconsistencies between the APIs
43
+ exclude_expected, exclude_result = set(), set()
44
+ if groupby_func in ("any", "all"):
45
+ exclude_expected = {"kwargs", "bool_only", "axis"}
46
+ elif groupby_func in ("count",):
47
+ exclude_expected = {"numeric_only", "axis"}
48
+ elif groupby_func in ("nunique",):
49
+ exclude_expected = {"axis"}
50
+ elif groupby_func in ("max", "min"):
51
+ exclude_expected = {"axis", "kwargs", "skipna"}
52
+ exclude_result = {"min_count", "engine", "engine_kwargs"}
53
+ elif groupby_func in ("mean", "std", "sum", "var"):
54
+ exclude_expected = {"axis", "kwargs", "skipna"}
55
+ exclude_result = {"engine", "engine_kwargs"}
56
+ elif groupby_func in ("median", "prod", "sem"):
57
+ exclude_expected = {"axis", "kwargs", "skipna"}
58
+ elif groupby_func in ("backfill", "bfill", "ffill", "pad"):
59
+ exclude_expected = {"downcast", "inplace", "axis"}
60
+ elif groupby_func in ("cummax", "cummin"):
61
+ exclude_expected = {"skipna", "args"}
62
+ exclude_result = {"numeric_only"}
63
+ elif groupby_func in ("cumprod", "cumsum"):
64
+ exclude_expected = {"skipna"}
65
+ elif groupby_func in ("pct_change",):
66
+ exclude_expected = {"kwargs"}
67
+ exclude_result = {"axis"}
68
+ elif groupby_func in ("rank",):
69
+ exclude_expected = {"numeric_only"}
70
+ elif groupby_func in ("quantile",):
71
+ exclude_expected = {"method", "axis"}
72
+
73
+ # Ensure excluded arguments are actually in the signatures
74
+ assert result & exclude_result == exclude_result
75
+ assert expected & exclude_expected == exclude_expected
76
+
77
+ result -= exclude_result
78
+ expected -= exclude_expected
79
+ assert result == expected
80
+
81
+
82
+ def test_series_consistency(request, groupby_func):
83
+ # GH#48028
84
+ if groupby_func in ("first", "last"):
85
+ msg = "first and last are entirely different between Series and groupby"
86
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
87
+ if groupby_func in ("cumcount", "corrwith"):
88
+ msg = "Series has no such method"
89
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
90
+
91
+ if groupby_func == "ngroup":
92
+ assert not hasattr(Series, groupby_func)
93
+ return
94
+
95
+ series_method = getattr(Series, groupby_func)
96
+ gb_method = getattr(SeriesGroupBy, groupby_func)
97
+ result = set(inspect.signature(gb_method).parameters)
98
+ if groupby_func == "size":
99
+ # "size" is a method on GroupBy but property on Series
100
+ expected = {"self"}
101
+ else:
102
+ expected = set(inspect.signature(series_method).parameters)
103
+
104
+ # Exclude certain arguments from result and expected depending on the operation
105
+ # Some of these may be purposeful inconsistencies between the APIs
106
+ exclude_expected, exclude_result = set(), set()
107
+ if groupby_func in ("any", "all"):
108
+ exclude_expected = {"kwargs", "bool_only", "axis"}
109
+ elif groupby_func in ("diff",):
110
+ exclude_result = {"axis"}
111
+ elif groupby_func in ("max", "min"):
112
+ exclude_expected = {"axis", "kwargs", "skipna"}
113
+ exclude_result = {"min_count", "engine", "engine_kwargs"}
114
+ elif groupby_func in ("mean", "std", "sum", "var"):
115
+ exclude_expected = {"axis", "kwargs", "skipna"}
116
+ exclude_result = {"engine", "engine_kwargs"}
117
+ elif groupby_func in ("median", "prod", "sem"):
118
+ exclude_expected = {"axis", "kwargs", "skipna"}
119
+ elif groupby_func in ("backfill", "bfill", "ffill", "pad"):
120
+ exclude_expected = {"downcast", "inplace", "axis"}
121
+ elif groupby_func in ("cummax", "cummin"):
122
+ exclude_expected = {"skipna", "args"}
123
+ exclude_result = {"numeric_only"}
124
+ elif groupby_func in ("cumprod", "cumsum"):
125
+ exclude_expected = {"skipna"}
126
+ elif groupby_func in ("pct_change",):
127
+ exclude_expected = {"kwargs"}
128
+ exclude_result = {"axis"}
129
+ elif groupby_func in ("rank",):
130
+ exclude_expected = {"numeric_only"}
131
+ elif groupby_func in ("idxmin", "idxmax"):
132
+ exclude_expected = {"args", "kwargs"}
133
+ elif groupby_func in ("quantile",):
134
+ exclude_result = {"numeric_only"}
135
+
136
+ # Ensure excluded arguments are actually in the signatures
137
+ assert result & exclude_result == exclude_result
138
+ assert expected & exclude_expected == exclude_expected
139
+
140
+ result -= exclude_result
141
+ expected -= exclude_expected
142
+ assert result == expected
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_apply.py ADDED
@@ -0,0 +1,1341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ datetime,
4
+ )
5
+ from io import StringIO
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ import pandas as pd
11
+ from pandas import (
12
+ DataFrame,
13
+ Index,
14
+ MultiIndex,
15
+ Series,
16
+ bdate_range,
17
+ )
18
+ import pandas._testing as tm
19
+ from pandas.tests.groupby import get_groupby_method_args
20
+
21
+
22
+ def test_apply_issues():
23
+ # GH 5788
24
+
25
+ s = """2011.05.16,00:00,1.40893
26
+ 2011.05.16,01:00,1.40760
27
+ 2011.05.16,02:00,1.40750
28
+ 2011.05.16,03:00,1.40649
29
+ 2011.05.17,02:00,1.40893
30
+ 2011.05.17,03:00,1.40760
31
+ 2011.05.17,04:00,1.40750
32
+ 2011.05.17,05:00,1.40649
33
+ 2011.05.18,02:00,1.40893
34
+ 2011.05.18,03:00,1.40760
35
+ 2011.05.18,04:00,1.40750
36
+ 2011.05.18,05:00,1.40649"""
37
+
38
+ df = pd.read_csv(
39
+ StringIO(s),
40
+ header=None,
41
+ names=["date", "time", "value"],
42
+ parse_dates=[["date", "time"]],
43
+ )
44
+ df = df.set_index("date_time")
45
+
46
+ expected = df.groupby(df.index.date).idxmax()
47
+ result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
48
+ tm.assert_frame_equal(result, expected)
49
+
50
+ # GH 5789
51
+ # don't auto coerce dates
52
+ df = pd.read_csv(StringIO(s), header=None, names=["date", "time", "value"])
53
+ exp_idx = Index(
54
+ ["2011.05.16", "2011.05.17", "2011.05.18"], dtype=object, name="date"
55
+ )
56
+ expected = Series(["00:00", "02:00", "02:00"], index=exp_idx)
57
+ result = df.groupby("date", group_keys=False).apply(
58
+ lambda x: x["time"][x["value"].idxmax()]
59
+ )
60
+ tm.assert_series_equal(result, expected)
61
+
62
+
63
+ def test_apply_trivial():
64
+ # GH 20066
65
+ # trivial apply: ignore input and return a constant dataframe.
66
+ df = DataFrame(
67
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
68
+ columns=["key", "data"],
69
+ )
70
+ expected = pd.concat([df.iloc[1:], df.iloc[1:]], axis=1, keys=["float64", "object"])
71
+ result = df.groupby([str(x) for x in df.dtypes], axis=1).apply(
72
+ lambda x: df.iloc[1:]
73
+ )
74
+
75
+ tm.assert_frame_equal(result, expected)
76
+
77
+
78
+ def test_apply_trivial_fail():
79
+ # GH 20066
80
+ df = DataFrame(
81
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
82
+ columns=["key", "data"],
83
+ )
84
+ expected = pd.concat([df, df], axis=1, keys=["float64", "object"])
85
+ result = df.groupby([str(x) for x in df.dtypes], axis=1, group_keys=True).apply(
86
+ lambda x: df
87
+ )
88
+
89
+ tm.assert_frame_equal(result, expected)
90
+
91
+
92
+ @pytest.mark.parametrize(
93
+ "df, group_names",
94
+ [
95
+ (DataFrame({"a": [1, 1, 1, 2, 3], "b": ["a", "a", "a", "b", "c"]}), [1, 2, 3]),
96
+ (DataFrame({"a": [0, 0, 1, 1], "b": [0, 1, 0, 1]}), [0, 1]),
97
+ (DataFrame({"a": [1]}), [1]),
98
+ (DataFrame({"a": [1, 1, 1, 2, 2, 1, 1, 2], "b": range(8)}), [1, 2]),
99
+ (DataFrame({"a": [1, 2, 3, 1, 2, 3], "two": [4, 5, 6, 7, 8, 9]}), [1, 2, 3]),
100
+ (
101
+ DataFrame(
102
+ {
103
+ "a": list("aaabbbcccc"),
104
+ "B": [3, 4, 3, 6, 5, 2, 1, 9, 5, 4],
105
+ "C": [4, 0, 2, 2, 2, 7, 8, 6, 2, 8],
106
+ }
107
+ ),
108
+ ["a", "b", "c"],
109
+ ),
110
+ (DataFrame([[1, 2, 3], [2, 2, 3]], columns=["a", "b", "c"]), [1, 2]),
111
+ ],
112
+ ids=[
113
+ "GH2936",
114
+ "GH7739 & GH10519",
115
+ "GH10519",
116
+ "GH2656",
117
+ "GH12155",
118
+ "GH20084",
119
+ "GH21417",
120
+ ],
121
+ )
122
+ def test_group_apply_once_per_group(df, group_names):
123
+ # GH2936, GH7739, GH10519, GH2656, GH12155, GH20084, GH21417
124
+
125
+ # This test should ensure that a function is only evaluated
126
+ # once per group. Previously the function has been evaluated twice
127
+ # on the first group to check if the Cython index slider is safe to use
128
+ # This test ensures that the side effect (append to list) is only triggered
129
+ # once per group
130
+
131
+ names = []
132
+ # cannot parameterize over the functions since they need external
133
+ # `names` to detect side effects
134
+
135
+ def f_copy(group):
136
+ # this takes the fast apply path
137
+ names.append(group.name)
138
+ return group.copy()
139
+
140
+ def f_nocopy(group):
141
+ # this takes the slow apply path
142
+ names.append(group.name)
143
+ return group
144
+
145
+ def f_scalar(group):
146
+ # GH7739, GH2656
147
+ names.append(group.name)
148
+ return 0
149
+
150
+ def f_none(group):
151
+ # GH10519, GH12155, GH21417
152
+ names.append(group.name)
153
+
154
+ def f_constant_df(group):
155
+ # GH2936, GH20084
156
+ names.append(group.name)
157
+ return DataFrame({"a": [1], "b": [1]})
158
+
159
+ for func in [f_copy, f_nocopy, f_scalar, f_none, f_constant_df]:
160
+ del names[:]
161
+
162
+ df.groupby("a", group_keys=False).apply(func)
163
+ assert names == group_names
164
+
165
+
166
+ def test_group_apply_once_per_group2(capsys):
167
+ # GH: 31111
168
+ # groupby-apply need to execute len(set(group_by_columns)) times
169
+
170
+ expected = 2 # Number of times `apply` should call a function for the current test
171
+
172
+ df = DataFrame(
173
+ {
174
+ "group_by_column": [0, 0, 0, 0, 1, 1, 1, 1],
175
+ "test_column": ["0", "2", "4", "6", "8", "10", "12", "14"],
176
+ },
177
+ index=["0", "2", "4", "6", "8", "10", "12", "14"],
178
+ )
179
+
180
+ df.groupby("group_by_column", group_keys=False).apply(
181
+ lambda df: print("function_called")
182
+ )
183
+
184
+ result = capsys.readouterr().out.count("function_called")
185
+ # If `groupby` behaves unexpectedly, this test will break
186
+ assert result == expected
187
+
188
+
189
+ def test_apply_fast_slow_identical():
190
+ # GH 31613
191
+
192
+ df = DataFrame({"A": [0, 0, 1], "b": range(3)})
193
+
194
+ # For simple index structures we check for fast/slow apply using
195
+ # an identity check on in/output
196
+ def slow(group):
197
+ return group
198
+
199
+ def fast(group):
200
+ return group.copy()
201
+
202
+ fast_df = df.groupby("A", group_keys=False).apply(fast)
203
+ slow_df = df.groupby("A", group_keys=False).apply(slow)
204
+
205
+ tm.assert_frame_equal(fast_df, slow_df)
206
+
207
+
208
+ @pytest.mark.parametrize(
209
+ "func",
210
+ [
211
+ lambda x: x,
212
+ lambda x: x[:],
213
+ lambda x: x.copy(deep=False),
214
+ lambda x: x.copy(deep=True),
215
+ ],
216
+ )
217
+ def test_groupby_apply_identity_maybecopy_index_identical(func):
218
+ # GH 14927
219
+ # Whether the function returns a copy of the input data or not should not
220
+ # have an impact on the index structure of the result since this is not
221
+ # transparent to the user
222
+
223
+ df = DataFrame({"g": [1, 2, 2, 2], "a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})
224
+
225
+ result = df.groupby("g", group_keys=False).apply(func)
226
+ tm.assert_frame_equal(result, df)
227
+
228
+
229
+ def test_apply_with_mixed_dtype():
230
+ # GH3480, apply with mixed dtype on axis=1 breaks in 0.11
231
+ df = DataFrame(
232
+ {
233
+ "foo1": np.random.randn(6),
234
+ "foo2": ["one", "two", "two", "three", "one", "two"],
235
+ }
236
+ )
237
+ result = df.apply(lambda x: x, axis=1).dtypes
238
+ expected = df.dtypes
239
+ tm.assert_series_equal(result, expected)
240
+
241
+ # GH 3610 incorrect dtype conversion with as_index=False
242
+ df = DataFrame({"c1": [1, 2, 6, 6, 8]})
243
+ df["c2"] = df.c1 / 2.0
244
+ result1 = df.groupby("c2").mean().reset_index().c2
245
+ result2 = df.groupby("c2", as_index=False).mean().c2
246
+ tm.assert_series_equal(result1, result2)
247
+
248
+
249
+ def test_groupby_as_index_apply():
250
+ # GH #4648 and #3417
251
+ df = DataFrame(
252
+ {
253
+ "item_id": ["b", "b", "a", "c", "a", "b"],
254
+ "user_id": [1, 2, 1, 1, 3, 1],
255
+ "time": range(6),
256
+ }
257
+ )
258
+
259
+ g_as = df.groupby("user_id", as_index=True)
260
+ g_not_as = df.groupby("user_id", as_index=False)
261
+
262
+ res_as = g_as.head(2).index
263
+ res_not_as = g_not_as.head(2).index
264
+ exp = Index([0, 1, 2, 4])
265
+ tm.assert_index_equal(res_as, exp)
266
+ tm.assert_index_equal(res_not_as, exp)
267
+
268
+ res_as_apply = g_as.apply(lambda x: x.head(2)).index
269
+ res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
270
+
271
+ # apply doesn't maintain the original ordering
272
+ # changed in GH5610 as the as_index=False returns a MI here
273
+ exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (2, 4)])
274
+ tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
275
+ exp_as_apply = MultiIndex.from_tuples(tp, names=["user_id", None])
276
+
277
+ tm.assert_index_equal(res_as_apply, exp_as_apply)
278
+ tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)
279
+
280
+ ind = Index(list("abcde"))
281
+ df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
282
+ res = df.groupby(0, as_index=False, group_keys=False).apply(lambda x: x).index
283
+ tm.assert_index_equal(res, ind)
284
+
285
+
286
+ def test_apply_concat_preserve_names(three_group):
287
+ grouped = three_group.groupby(["A", "B"])
288
+
289
+ def desc(group):
290
+ result = group.describe()
291
+ result.index.name = "stat"
292
+ return result
293
+
294
+ def desc2(group):
295
+ result = group.describe()
296
+ result.index.name = "stat"
297
+ result = result[: len(group)]
298
+ # weirdo
299
+ return result
300
+
301
+ def desc3(group):
302
+ result = group.describe()
303
+
304
+ # names are different
305
+ result.index.name = f"stat_{len(group):d}"
306
+
307
+ result = result[: len(group)]
308
+ # weirdo
309
+ return result
310
+
311
+ result = grouped.apply(desc)
312
+ assert result.index.names == ("A", "B", "stat")
313
+
314
+ result2 = grouped.apply(desc2)
315
+ assert result2.index.names == ("A", "B", "stat")
316
+
317
+ result3 = grouped.apply(desc3)
318
+ assert result3.index.names == ("A", "B", None)
319
+
320
+
321
+ def test_apply_series_to_frame():
322
+ def f(piece):
323
+ with np.errstate(invalid="ignore"):
324
+ logged = np.log(piece)
325
+ return DataFrame(
326
+ {"value": piece, "demeaned": piece - piece.mean(), "logged": logged}
327
+ )
328
+
329
+ dr = bdate_range("1/1/2000", periods=100)
330
+ ts = Series(np.random.randn(100), index=dr)
331
+
332
+ grouped = ts.groupby(lambda x: x.month, group_keys=False)
333
+ result = grouped.apply(f)
334
+
335
+ assert isinstance(result, DataFrame)
336
+ assert not hasattr(result, "name") # GH49907
337
+ tm.assert_index_equal(result.index, ts.index)
338
+
339
+
340
+ def test_apply_series_yield_constant(df):
341
+ result = df.groupby(["A", "B"])["C"].apply(len)
342
+ assert result.index.names[:2] == ("A", "B")
343
+
344
+
345
+ def test_apply_frame_yield_constant(df):
346
+ # GH13568
347
+ result = df.groupby(["A", "B"]).apply(len)
348
+ assert isinstance(result, Series)
349
+ assert result.name is None
350
+
351
+ result = df.groupby(["A", "B"])[["C", "D"]].apply(len)
352
+ assert isinstance(result, Series)
353
+ assert result.name is None
354
+
355
+
356
+ def test_apply_frame_to_series(df):
357
+ grouped = df.groupby(["A", "B"])
358
+ result = grouped.apply(len)
359
+ expected = grouped.count()["C"]
360
+ tm.assert_index_equal(result.index, expected.index)
361
+ tm.assert_numpy_array_equal(result.values, expected.values)
362
+
363
+
364
+ def test_apply_frame_not_as_index_column_name(df):
365
+ # GH 35964 - path within _wrap_applied_output not hit by a test
366
+ grouped = df.groupby(["A", "B"], as_index=False)
367
+ result = grouped.apply(len)
368
+ expected = grouped.count().rename(columns={"C": np.nan}).drop(columns="D")
369
+ # TODO(GH#34306): Use assert_frame_equal when column name is not np.nan
370
+ tm.assert_index_equal(result.index, expected.index)
371
+ tm.assert_numpy_array_equal(result.values, expected.values)
372
+
373
+
374
+ def test_apply_frame_concat_series():
375
+ def trans(group):
376
+ return group.groupby("B")["C"].sum().sort_values().iloc[:2]
377
+
378
+ def trans2(group):
379
+ grouped = group.groupby(df.reindex(group.index)["B"])
380
+ return grouped.sum().sort_values().iloc[:2]
381
+
382
+ df = DataFrame(
383
+ {
384
+ "A": np.random.randint(0, 5, 1000),
385
+ "B": np.random.randint(0, 5, 1000),
386
+ "C": np.random.randn(1000),
387
+ }
388
+ )
389
+
390
+ result = df.groupby("A").apply(trans)
391
+ exp = df.groupby("A")["C"].apply(trans2)
392
+ tm.assert_series_equal(result, exp, check_names=False)
393
+ assert result.name == "C"
394
+
395
+
396
+ def test_apply_transform(ts):
397
+ grouped = ts.groupby(lambda x: x.month, group_keys=False)
398
+ result = grouped.apply(lambda x: x * 2)
399
+ expected = grouped.transform(lambda x: x * 2)
400
+ tm.assert_series_equal(result, expected)
401
+
402
+
403
+ def test_apply_multikey_corner(tsframe):
404
+ grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
405
+
406
+ def f(group):
407
+ return group.sort_values("A")[-5:]
408
+
409
+ result = grouped.apply(f)
410
+ for key, group in grouped:
411
+ tm.assert_frame_equal(result.loc[key], f(group))
412
+
413
+
414
+ @pytest.mark.parametrize("group_keys", [True, False])
415
+ def test_apply_chunk_view(group_keys):
416
+ # Low level tinkering could be unsafe, make sure not
417
+ df = DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
418
+
419
+ result = df.groupby("key", group_keys=group_keys).apply(lambda x: x.iloc[:2])
420
+ expected = df.take([0, 1, 3, 4, 6, 7])
421
+ if group_keys:
422
+ expected.index = MultiIndex.from_arrays(
423
+ [[1, 1, 2, 2, 3, 3], expected.index], names=["key", None]
424
+ )
425
+
426
+ tm.assert_frame_equal(result, expected)
427
+
428
+
429
+ def test_apply_no_name_column_conflict():
430
+ df = DataFrame(
431
+ {
432
+ "name": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
433
+ "name2": [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
434
+ "value": range(9, -1, -1),
435
+ }
436
+ )
437
+
438
+ # it works! #2605
439
+ grouped = df.groupby(["name", "name2"])
440
+ grouped.apply(lambda x: x.sort_values("value", inplace=True))
441
+
442
+
443
+ def test_apply_typecast_fail():
444
+ df = DataFrame(
445
+ {
446
+ "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],
447
+ "c": np.tile(["a", "b", "c"], 2),
448
+ "v": np.arange(1.0, 7.0),
449
+ }
450
+ )
451
+
452
+ def f(group):
453
+ v = group["v"]
454
+ group["v2"] = (v - v.min()) / (v.max() - v.min())
455
+ return group
456
+
457
+ result = df.groupby("d", group_keys=False).apply(f)
458
+
459
+ expected = df.copy()
460
+ expected["v2"] = np.tile([0.0, 0.5, 1], 2)
461
+
462
+ tm.assert_frame_equal(result, expected)
463
+
464
+
465
+ def test_apply_multiindex_fail():
466
+ index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]])
467
+ df = DataFrame(
468
+ {
469
+ "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],
470
+ "c": np.tile(["a", "b", "c"], 2),
471
+ "v": np.arange(1.0, 7.0),
472
+ },
473
+ index=index,
474
+ )
475
+
476
+ def f(group):
477
+ v = group["v"]
478
+ group["v2"] = (v - v.min()) / (v.max() - v.min())
479
+ return group
480
+
481
+ result = df.groupby("d", group_keys=False).apply(f)
482
+
483
+ expected = df.copy()
484
+ expected["v2"] = np.tile([0.0, 0.5, 1], 2)
485
+
486
+ tm.assert_frame_equal(result, expected)
487
+
488
+
489
+ def test_apply_corner(tsframe):
490
+ result = tsframe.groupby(lambda x: x.year, group_keys=False).apply(lambda x: x * 2)
491
+ expected = tsframe * 2
492
+ tm.assert_frame_equal(result, expected)
493
+
494
+
495
+ def test_apply_without_copy():
496
+ # GH 5545
497
+ # returning a non-copy in an applied function fails
498
+
499
+ data = DataFrame(
500
+ {
501
+ "id_field": [100, 100, 200, 300],
502
+ "category": ["a", "b", "c", "c"],
503
+ "value": [1, 2, 3, 4],
504
+ }
505
+ )
506
+
507
+ def filt1(x):
508
+ if x.shape[0] == 1:
509
+ return x.copy()
510
+ else:
511
+ return x[x.category == "c"]
512
+
513
+ def filt2(x):
514
+ if x.shape[0] == 1:
515
+ return x
516
+ else:
517
+ return x[x.category == "c"]
518
+
519
+ expected = data.groupby("id_field").apply(filt1)
520
+ result = data.groupby("id_field").apply(filt2)
521
+ tm.assert_frame_equal(result, expected)
522
+
523
+
524
+ @pytest.mark.parametrize("test_series", [True, False])
525
+ def test_apply_with_duplicated_non_sorted_axis(test_series):
526
+ # GH 30667
527
+ df = DataFrame(
528
+ [["x", "p"], ["x", "p"], ["x", "o"]], columns=["X", "Y"], index=[1, 2, 2]
529
+ )
530
+ if test_series:
531
+ ser = df.set_index("Y")["X"]
532
+ result = ser.groupby(level=0, group_keys=False).apply(lambda x: x)
533
+
534
+ # not expecting the order to remain the same for duplicated axis
535
+ result = result.sort_index()
536
+ expected = ser.sort_index()
537
+ tm.assert_series_equal(result, expected)
538
+ else:
539
+ result = df.groupby("Y", group_keys=False).apply(lambda x: x)
540
+
541
+ # not expecting the order to remain the same for duplicated axis
542
+ result = result.sort_values("Y")
543
+ expected = df.sort_values("Y")
544
+ tm.assert_frame_equal(result, expected)
545
+
546
+
547
+ def test_apply_reindex_values():
548
+ # GH: 26209
549
+ # reindexing from a single column of a groupby object with duplicate indices caused
550
+ # a ValueError (cannot reindex from duplicate axis) in 0.24.2, the problem was
551
+ # solved in #30679
552
+ values = [1, 2, 3, 4]
553
+ indices = [1, 1, 2, 2]
554
+ df = DataFrame({"group": ["Group1", "Group2"] * 2, "value": values}, index=indices)
555
+ expected = Series(values, index=indices, name="value")
556
+
557
+ def reindex_helper(x):
558
+ return x.reindex(np.arange(x.index.min(), x.index.max() + 1))
559
+
560
+ # the following group by raised a ValueError
561
+ result = df.groupby("group", group_keys=False).value.apply(reindex_helper)
562
+ tm.assert_series_equal(expected, result)
563
+
564
+
565
+ def test_apply_corner_cases():
566
+ # #535, can't use sliding iterator
567
+
568
+ N = 1000
569
+ labels = np.random.randint(0, 100, size=N)
570
+ df = DataFrame(
571
+ {
572
+ "key": labels,
573
+ "value1": np.random.randn(N),
574
+ "value2": ["foo", "bar", "baz", "qux"] * (N // 4),
575
+ }
576
+ )
577
+
578
+ grouped = df.groupby("key", group_keys=False)
579
+
580
+ def f(g):
581
+ g["value3"] = g["value1"] * 2
582
+ return g
583
+
584
+ result = grouped.apply(f)
585
+ assert "value3" in result
586
+
587
+
588
+ def test_apply_numeric_coercion_when_datetime():
589
+ # In the past, group-by/apply operations have been over-eager
590
+ # in converting dtypes to numeric, in the presence of datetime
591
+ # columns. Various GH issues were filed, the reproductions
592
+ # for which are here.
593
+
594
+ # GH 15670
595
+ df = DataFrame(
596
+ {"Number": [1, 2], "Date": ["2017-03-02"] * 2, "Str": ["foo", "inf"]}
597
+ )
598
+ expected = df.groupby(["Number"]).apply(lambda x: x.iloc[0])
599
+ df.Date = pd.to_datetime(df.Date)
600
+ result = df.groupby(["Number"]).apply(lambda x: x.iloc[0])
601
+ tm.assert_series_equal(result["Str"], expected["Str"])
602
+
603
+ # GH 15421
604
+ df = DataFrame(
605
+ {"A": [10, 20, 30], "B": ["foo", "3", "4"], "T": [pd.Timestamp("12:31:22")] * 3}
606
+ )
607
+
608
+ def get_B(g):
609
+ return g.iloc[0][["B"]]
610
+
611
+ result = df.groupby("A").apply(get_B)["B"]
612
+ expected = df.B
613
+ expected.index = df.A
614
+ tm.assert_series_equal(result, expected)
615
+
616
+ # GH 14423
617
+ def predictions(tool):
618
+ out = Series(index=["p1", "p2", "useTime"], dtype=object)
619
+ if "step1" in list(tool.State):
620
+ out["p1"] = str(tool[tool.State == "step1"].Machine.values[0])
621
+ if "step2" in list(tool.State):
622
+ out["p2"] = str(tool[tool.State == "step2"].Machine.values[0])
623
+ out["useTime"] = str(tool[tool.State == "step2"].oTime.values[0])
624
+ return out
625
+
626
+ df1 = DataFrame(
627
+ {
628
+ "Key": ["B", "B", "A", "A"],
629
+ "State": ["step1", "step2", "step1", "step2"],
630
+ "oTime": ["", "2016-09-19 05:24:33", "", "2016-09-19 23:59:04"],
631
+ "Machine": ["23", "36L", "36R", "36R"],
632
+ }
633
+ )
634
+ df2 = df1.copy()
635
+ df2.oTime = pd.to_datetime(df2.oTime)
636
+ expected = df1.groupby("Key").apply(predictions).p1
637
+ result = df2.groupby("Key").apply(predictions).p1
638
+ tm.assert_series_equal(expected, result)
639
+
640
+
641
+ def test_apply_aggregating_timedelta_and_datetime():
642
+ # Regression test for GH 15562
643
+ # The following groupby caused ValueErrors and IndexErrors pre 0.20.0
644
+
645
+ df = DataFrame(
646
+ {
647
+ "clientid": ["A", "B", "C"],
648
+ "datetime": [np.datetime64("2017-02-01 00:00:00")] * 3,
649
+ }
650
+ )
651
+ df["time_delta_zero"] = df.datetime - df.datetime
652
+ result = df.groupby("clientid").apply(
653
+ lambda ddf: Series(
654
+ {"clientid_age": ddf.time_delta_zero.min(), "date": ddf.datetime.min()}
655
+ )
656
+ )
657
+ expected = DataFrame(
658
+ {
659
+ "clientid": ["A", "B", "C"],
660
+ "clientid_age": [np.timedelta64(0, "D")] * 3,
661
+ "date": [np.datetime64("2017-02-01 00:00:00")] * 3,
662
+ }
663
+ ).set_index("clientid")
664
+
665
+ tm.assert_frame_equal(result, expected)
666
+
667
+
668
+ def test_apply_groupby_datetimeindex():
669
+ # GH 26182
670
+ # groupby apply failed on dataframe with DatetimeIndex
671
+
672
+ data = [["A", 10], ["B", 20], ["B", 30], ["C", 40], ["C", 50]]
673
+ df = DataFrame(
674
+ data, columns=["Name", "Value"], index=pd.date_range("2020-09-01", "2020-09-05")
675
+ )
676
+
677
+ result = df.groupby("Name").sum()
678
+
679
+ expected = DataFrame({"Name": ["A", "B", "C"], "Value": [10, 50, 90]})
680
+ expected.set_index("Name", inplace=True)
681
+
682
+ tm.assert_frame_equal(result, expected)
683
+
684
+
685
+ def test_time_field_bug():
686
+ # Test a fix for the following error related to GH issue 11324 When
687
+ # non-key fields in a group-by dataframe contained time-based fields
688
+ # that were not returned by the apply function, an exception would be
689
+ # raised.
690
+
691
+ df = DataFrame({"a": 1, "b": [datetime.now() for nn in range(10)]})
692
+
693
+ def func_with_no_date(batch):
694
+ return Series({"c": 2})
695
+
696
+ def func_with_date(batch):
697
+ return Series({"b": datetime(2015, 1, 1), "c": 2})
698
+
699
+ dfg_no_conversion = df.groupby(by=["a"]).apply(func_with_no_date)
700
+ dfg_no_conversion_expected = DataFrame({"c": 2}, index=[1])
701
+ dfg_no_conversion_expected.index.name = "a"
702
+
703
+ dfg_conversion = df.groupby(by=["a"]).apply(func_with_date)
704
+ dfg_conversion_expected = DataFrame({"b": datetime(2015, 1, 1), "c": 2}, index=[1])
705
+ dfg_conversion_expected.index.name = "a"
706
+
707
+ tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)
708
+ tm.assert_frame_equal(dfg_conversion, dfg_conversion_expected)
709
+
710
+
711
+ def test_gb_apply_list_of_unequal_len_arrays():
712
+ # GH1738
713
+ df = DataFrame(
714
+ {
715
+ "group1": ["a", "a", "a", "b", "b", "b", "a", "a", "a", "b", "b", "b"],
716
+ "group2": ["c", "c", "d", "d", "d", "e", "c", "c", "d", "d", "d", "e"],
717
+ "weight": [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],
718
+ "value": [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3],
719
+ }
720
+ )
721
+ df = df.set_index(["group1", "group2"])
722
+ df_grouped = df.groupby(level=["group1", "group2"], sort=True)
723
+
724
+ def noddy(value, weight):
725
+ out = np.array(value * weight).repeat(3)
726
+ return out
727
+
728
+ # the kernel function returns arrays of unequal length
729
+ # pandas sniffs the first one, sees it's an array and not
730
+ # a list, and assumed the rest are of equal length
731
+ # and so tries a vstack
732
+
733
+ # don't die
734
+ df_grouped.apply(lambda x: noddy(x.value, x.weight))
735
+
736
+
737
+ def test_groupby_apply_all_none():
738
+ # Tests to make sure no errors if apply function returns all None
739
+ # values. Issue 9684.
740
+ test_df = DataFrame({"groups": [0, 0, 1, 1], "random_vars": [8, 7, 4, 5]})
741
+
742
+ def test_func(x):
743
+ pass
744
+
745
+ result = test_df.groupby("groups").apply(test_func)
746
+ expected = DataFrame()
747
+ tm.assert_frame_equal(result, expected)
748
+
749
+
750
+ def test_groupby_apply_none_first():
751
+ # GH 12824. Tests if apply returns None first.
752
+ test_df1 = DataFrame({"groups": [1, 1, 1, 2], "vars": [0, 1, 2, 3]})
753
+ test_df2 = DataFrame({"groups": [1, 2, 2, 2], "vars": [0, 1, 2, 3]})
754
+
755
+ def test_func(x):
756
+ if x.shape[0] < 2:
757
+ return None
758
+ return x.iloc[[0, -1]]
759
+
760
+ result1 = test_df1.groupby("groups").apply(test_func)
761
+ result2 = test_df2.groupby("groups").apply(test_func)
762
+ index1 = MultiIndex.from_arrays([[1, 1], [0, 2]], names=["groups", None])
763
+ index2 = MultiIndex.from_arrays([[2, 2], [1, 3]], names=["groups", None])
764
+ expected1 = DataFrame({"groups": [1, 1], "vars": [0, 2]}, index=index1)
765
+ expected2 = DataFrame({"groups": [2, 2], "vars": [1, 3]}, index=index2)
766
+ tm.assert_frame_equal(result1, expected1)
767
+ tm.assert_frame_equal(result2, expected2)
768
+
769
+
770
+ def test_groupby_apply_return_empty_chunk():
771
+ # GH 22221: apply filter which returns some empty groups
772
+ df = DataFrame({"value": [0, 1], "group": ["filled", "empty"]})
773
+ groups = df.groupby("group")
774
+ result = groups.apply(lambda group: group[group.value != 1]["value"])
775
+ expected = Series(
776
+ [0],
777
+ name="value",
778
+ index=MultiIndex.from_product(
779
+ [["empty", "filled"], [0]], names=["group", None]
780
+ ).drop("empty"),
781
+ )
782
+ tm.assert_series_equal(result, expected)
783
+
784
+
785
+ def test_apply_with_mixed_types():
786
+ # gh-20949
787
+ df = DataFrame({"A": "a a b".split(), "B": [1, 2, 3], "C": [4, 6, 5]})
788
+ g = df.groupby("A", group_keys=False)
789
+
790
+ result = g.transform(lambda x: x / x.sum())
791
+ expected = DataFrame({"B": [1 / 3.0, 2 / 3.0, 1], "C": [0.4, 0.6, 1.0]})
792
+ tm.assert_frame_equal(result, expected)
793
+
794
+ result = g.apply(lambda x: x / x.sum())
795
+ tm.assert_frame_equal(result, expected)
796
+
797
+
798
+ def test_func_returns_object():
799
+ # GH 28652
800
+ df = DataFrame({"a": [1, 2]}, index=Index([1, 2]))
801
+ result = df.groupby("a").apply(lambda g: g.index)
802
+ expected = Series([Index([1]), Index([2])], index=Index([1, 2], name="a"))
803
+
804
+ tm.assert_series_equal(result, expected)
805
+
806
+
807
+ @pytest.mark.parametrize(
808
+ "group_column_dtlike",
809
+ [datetime.today(), datetime.today().date(), datetime.today().time()],
810
+ )
811
+ def test_apply_datetime_issue(group_column_dtlike):
812
+ # GH-28247
813
+ # groupby-apply throws an error if one of the columns in the DataFrame
814
+ # is a datetime object and the column labels are different from
815
+ # standard int values in range(len(num_columns))
816
+
817
+ df = DataFrame({"a": ["foo"], "b": [group_column_dtlike]})
818
+ result = df.groupby("a").apply(lambda x: Series(["spam"], index=[42]))
819
+
820
+ expected = DataFrame(
821
+ ["spam"], Index(["foo"], dtype="object", name="a"), columns=[42]
822
+ )
823
+ tm.assert_frame_equal(result, expected)
824
+
825
+
826
+ def test_apply_series_return_dataframe_groups():
827
+ # GH 10078
828
+ tdf = DataFrame(
829
+ {
830
+ "day": {
831
+ 0: pd.Timestamp("2015-02-24 00:00:00"),
832
+ 1: pd.Timestamp("2015-02-24 00:00:00"),
833
+ 2: pd.Timestamp("2015-02-24 00:00:00"),
834
+ 3: pd.Timestamp("2015-02-24 00:00:00"),
835
+ 4: pd.Timestamp("2015-02-24 00:00:00"),
836
+ },
837
+ "userAgent": {
838
+ 0: "some UA string",
839
+ 1: "some UA string",
840
+ 2: "some UA string",
841
+ 3: "another UA string",
842
+ 4: "some UA string",
843
+ },
844
+ "userId": {
845
+ 0: "17661101",
846
+ 1: "17661101",
847
+ 2: "17661101",
848
+ 3: "17661101",
849
+ 4: "17661101",
850
+ },
851
+ }
852
+ )
853
+
854
+ def most_common_values(df):
855
+ return Series({c: s.value_counts().index[0] for c, s in df.items()})
856
+
857
+ result = tdf.groupby("day").apply(most_common_values)["userId"]
858
+ expected = Series(
859
+ ["17661101"], index=pd.DatetimeIndex(["2015-02-24"], name="day"), name="userId"
860
+ )
861
+ tm.assert_series_equal(result, expected)
862
+
863
+
864
+ @pytest.mark.parametrize("category", [False, True])
865
+ def test_apply_multi_level_name(category):
866
+ # https://github.com/pandas-dev/pandas/issues/31068
867
+ b = [1, 2] * 5
868
+ if category:
869
+ b = pd.Categorical(b, categories=[1, 2, 3])
870
+ expected_index = pd.CategoricalIndex([1, 2, 3], categories=[1, 2, 3], name="B")
871
+ # GH#40669 - summing an empty frame gives float dtype
872
+ expected_values = [20.0, 25.0, 0.0]
873
+ else:
874
+ expected_index = Index([1, 2], name="B")
875
+ expected_values = [20, 25]
876
+ expected = DataFrame(
877
+ {"C": expected_values, "D": expected_values}, index=expected_index
878
+ )
879
+
880
+ df = DataFrame(
881
+ {"A": np.arange(10), "B": b, "C": list(range(10)), "D": list(range(10))}
882
+ ).set_index(["A", "B"])
883
+ result = df.groupby("B").apply(lambda x: x.sum())
884
+ tm.assert_frame_equal(result, expected)
885
+ assert df.index.names == ["A", "B"]
886
+
887
+
888
+ def test_groupby_apply_datetime_result_dtypes():
889
+ # GH 14849
890
+ data = DataFrame.from_records(
891
+ [
892
+ (pd.Timestamp(2016, 1, 1), "red", "dark", 1, "8"),
893
+ (pd.Timestamp(2015, 1, 1), "green", "stormy", 2, "9"),
894
+ (pd.Timestamp(2014, 1, 1), "blue", "bright", 3, "10"),
895
+ (pd.Timestamp(2013, 1, 1), "blue", "calm", 4, "potato"),
896
+ ],
897
+ columns=["observation", "color", "mood", "intensity", "score"],
898
+ )
899
+ result = data.groupby("color").apply(lambda g: g.iloc[0]).dtypes
900
+ expected = Series(
901
+ [np.dtype("datetime64[ns]"), object, object, np.int64, object],
902
+ index=["observation", "color", "mood", "intensity", "score"],
903
+ )
904
+ tm.assert_series_equal(result, expected)
905
+
906
+
907
+ @pytest.mark.parametrize(
908
+ "index",
909
+ [
910
+ pd.CategoricalIndex(list("abc")),
911
+ pd.interval_range(0, 3),
912
+ pd.period_range("2020", periods=3, freq="D"),
913
+ MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
914
+ ],
915
+ )
916
+ def test_apply_index_has_complex_internals(index):
917
+ # GH 31248
918
+ df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
919
+ result = df.groupby("group", group_keys=False).apply(lambda x: x)
920
+ tm.assert_frame_equal(result, df)
921
+
922
+
923
+ @pytest.mark.parametrize(
924
+ "function, expected_values",
925
+ [
926
+ (lambda x: x.index.to_list(), [[0, 1], [2, 3]]),
927
+ (lambda x: set(x.index.to_list()), [{0, 1}, {2, 3}]),
928
+ (lambda x: tuple(x.index.to_list()), [(0, 1), (2, 3)]),
929
+ (
930
+ lambda x: dict(enumerate(x.index.to_list())),
931
+ [{0: 0, 1: 1}, {0: 2, 1: 3}],
932
+ ),
933
+ (
934
+ lambda x: [{n: i} for (n, i) in enumerate(x.index.to_list())],
935
+ [[{0: 0}, {1: 1}], [{0: 2}, {1: 3}]],
936
+ ),
937
+ ],
938
+ )
939
+ def test_apply_function_returns_non_pandas_non_scalar(function, expected_values):
940
+ # GH 31441
941
+ df = DataFrame(["A", "A", "B", "B"], columns=["groups"])
942
+ result = df.groupby("groups").apply(function)
943
+ expected = Series(expected_values, index=Index(["A", "B"], name="groups"))
944
+ tm.assert_series_equal(result, expected)
945
+
946
+
947
+ def test_apply_function_returns_numpy_array():
948
+ # GH 31605
949
+ def fct(group):
950
+ return group["B"].values.flatten()
951
+
952
+ df = DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]})
953
+
954
+ result = df.groupby("A").apply(fct)
955
+ expected = Series(
956
+ [[1.0, 2.0], [3.0], [np.nan]], index=Index(["a", "b", "none"], name="A")
957
+ )
958
+ tm.assert_series_equal(result, expected)
959
+
960
+
961
+ @pytest.mark.parametrize("function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1])
962
+ def test_apply_function_index_return(function):
963
+ # GH: 22541
964
+ df = DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"])
965
+ result = df.groupby("id").apply(function)
966
+ expected = Series(
967
+ [Index([0, 4, 7, 9]), Index([1, 2, 3, 5]), Index([6, 8])],
968
+ index=Index([1, 2, 3], name="id"),
969
+ )
970
+ tm.assert_series_equal(result, expected)
971
+
972
+
973
+ def test_apply_function_with_indexing_return_column():
974
+ # GH#7002, GH#41480, GH#49256
975
+ df = DataFrame(
976
+ {
977
+ "foo1": ["one", "two", "two", "three", "one", "two"],
978
+ "foo2": [1, 2, 4, 4, 5, 6],
979
+ }
980
+ )
981
+ result = df.groupby("foo1", as_index=False).apply(lambda x: x.mean())
982
+ expected = DataFrame(
983
+ {
984
+ "foo1": ["one", "three", "two"],
985
+ "foo2": [3.0, 4.0, 4.0],
986
+ }
987
+ )
988
+ tm.assert_frame_equal(result, expected)
989
+
990
+
991
+ @pytest.mark.parametrize(
992
+ "udf",
993
+ [(lambda x: x.copy()), (lambda x: x.copy().rename(lambda y: y + 1))],
994
+ )
995
+ @pytest.mark.parametrize("group_keys", [True, False])
996
+ def test_apply_result_type(group_keys, udf):
997
+ # https://github.com/pandas-dev/pandas/issues/34809
998
+ # We'd like to control whether the group keys end up in the index
999
+ # regardless of whether the UDF happens to be a transform.
1000
+ df = DataFrame({"A": ["a", "b"], "B": [1, 2]})
1001
+ df_result = df.groupby("A", group_keys=group_keys).apply(udf)
1002
+ series_result = df.B.groupby(df.A, group_keys=group_keys).apply(udf)
1003
+
1004
+ if group_keys:
1005
+ assert df_result.index.nlevels == 2
1006
+ assert series_result.index.nlevels == 2
1007
+ else:
1008
+ assert df_result.index.nlevels == 1
1009
+ assert series_result.index.nlevels == 1
1010
+
1011
+
1012
+ def test_result_order_group_keys_false():
1013
+ # GH 34998
1014
+ # apply result order should not depend on whether index is the same or just equal
1015
+ df = DataFrame({"A": [2, 1, 2], "B": [1, 2, 3]})
1016
+ result = df.groupby("A", group_keys=False).apply(lambda x: x)
1017
+ expected = df.groupby("A", group_keys=False).apply(lambda x: x.copy())
1018
+ tm.assert_frame_equal(result, expected)
1019
+
1020
+
1021
+ def test_apply_with_timezones_aware():
1022
+ # GH: 27212
1023
+ dates = ["2001-01-01"] * 2 + ["2001-01-02"] * 2 + ["2001-01-03"] * 2
1024
+ index_no_tz = pd.DatetimeIndex(dates)
1025
+ index_tz = pd.DatetimeIndex(dates, tz="UTC")
1026
+ df1 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_no_tz})
1027
+ df2 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_tz})
1028
+
1029
+ result1 = df1.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy())
1030
+ result2 = df2.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy())
1031
+
1032
+ tm.assert_frame_equal(result1, result2)
1033
+
1034
+
1035
+ def test_apply_is_unchanged_when_other_methods_are_called_first(reduction_func):
1036
+ # GH #34656
1037
+ # GH #34271
1038
+ df = DataFrame(
1039
+ {
1040
+ "a": [99, 99, 99, 88, 88, 88],
1041
+ "b": [1, 2, 3, 4, 5, 6],
1042
+ "c": [10, 20, 30, 40, 50, 60],
1043
+ }
1044
+ )
1045
+
1046
+ expected = DataFrame(
1047
+ {"a": [264, 297], "b": [15, 6], "c": [150, 60]},
1048
+ index=Index([88, 99], name="a"),
1049
+ )
1050
+
1051
+ # Check output when no other methods are called before .apply()
1052
+ grp = df.groupby(by="a")
1053
+ result = grp.apply(sum)
1054
+ tm.assert_frame_equal(result, expected)
1055
+
1056
+ # Check output when another method is called before .apply()
1057
+ grp = df.groupby(by="a")
1058
+ args = get_groupby_method_args(reduction_func, df)
1059
+ _ = getattr(grp, reduction_func)(*args)
1060
+ result = grp.apply(sum)
1061
+ tm.assert_frame_equal(result, expected)
1062
+
1063
+
1064
+ def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp():
1065
+ # GH 29617
1066
+
1067
+ df = DataFrame(
1068
+ {
1069
+ "A": ["a", "a", "a", "b"],
1070
+ "B": [
1071
+ date(2020, 1, 10),
1072
+ date(2020, 1, 10),
1073
+ date(2020, 2, 10),
1074
+ date(2020, 2, 10),
1075
+ ],
1076
+ "C": [1, 2, 3, 4],
1077
+ },
1078
+ index=Index([100, 101, 102, 103], name="idx"),
1079
+ )
1080
+
1081
+ grp = df.groupby(["A", "B"])
1082
+ result = grp.apply(lambda x: x.head(1))
1083
+
1084
+ expected = df.iloc[[0, 2, 3]]
1085
+ expected = expected.reset_index()
1086
+ expected.index = MultiIndex.from_frame(expected[["A", "B", "idx"]])
1087
+ expected = expected.drop(columns="idx")
1088
+
1089
+ tm.assert_frame_equal(result, expected)
1090
+ for val in result.index.levels[1]:
1091
+ assert type(val) is date
1092
+
1093
+
1094
+ def test_apply_by_cols_equals_apply_by_rows_transposed():
1095
+ # GH 16646
1096
+ # Operating on the columns, or transposing and operating on the rows
1097
+ # should give the same result. There was previously a bug where the
1098
+ # by_rows operation would work fine, but by_cols would throw a ValueError
1099
+
1100
+ df = DataFrame(
1101
+ np.random.random([6, 4]),
1102
+ columns=MultiIndex.from_product([["A", "B"], [1, 2]]),
1103
+ )
1104
+
1105
+ by_rows = df.T.groupby(axis=0, level=0).apply(
1106
+ lambda x: x.droplevel(axis=0, level=0)
1107
+ )
1108
+ by_cols = df.groupby(axis=1, level=0).apply(lambda x: x.droplevel(axis=1, level=0))
1109
+
1110
+ tm.assert_frame_equal(by_cols, by_rows.T)
1111
+ tm.assert_frame_equal(by_cols, df)
1112
+
1113
+
1114
+ @pytest.mark.parametrize("dropna", [True, False])
1115
+ def test_apply_dropna_with_indexed_same(dropna):
1116
+ # GH 38227
1117
+ # GH#43205
1118
+ df = DataFrame(
1119
+ {
1120
+ "col": [1, 2, 3, 4, 5],
1121
+ "group": ["a", np.nan, np.nan, "b", "b"],
1122
+ },
1123
+ index=list("xxyxz"),
1124
+ )
1125
+ result = df.groupby("group", dropna=dropna, group_keys=False).apply(lambda x: x)
1126
+ expected = df.dropna() if dropna else df.iloc[[0, 3, 1, 2, 4]]
1127
+ tm.assert_frame_equal(result, expected)
1128
+
1129
+
1130
+ @pytest.mark.parametrize(
1131
+ "as_index, expected",
1132
+ [
1133
+ [
1134
+ False,
1135
+ DataFrame(
1136
+ [[1, 1, 1], [2, 2, 1]], columns=Index(["a", "b", None], dtype=object)
1137
+ ),
1138
+ ],
1139
+ [
1140
+ True,
1141
+ Series(
1142
+ [1, 1], index=MultiIndex.from_tuples([(1, 1), (2, 2)], names=["a", "b"])
1143
+ ),
1144
+ ],
1145
+ ],
1146
+ )
1147
+ def test_apply_as_index_constant_lambda(as_index, expected):
1148
+ # GH 13217
1149
+ df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 1, 2, 2], "c": [1, 1, 1, 1]})
1150
+ result = df.groupby(["a", "b"], as_index=as_index).apply(lambda x: 1)
1151
+ tm.assert_equal(result, expected)
1152
+
1153
+
1154
+ def test_sort_index_groups():
1155
+ # GH 20420
1156
+ df = DataFrame(
1157
+ {"A": [1, 2, 3, 4, 5], "B": [6, 7, 8, 9, 0], "C": [1, 1, 1, 2, 2]},
1158
+ index=range(5),
1159
+ )
1160
+ result = df.groupby("C").apply(lambda x: x.A.sort_index())
1161
+ expected = Series(
1162
+ range(1, 6),
1163
+ index=MultiIndex.from_tuples(
1164
+ [(1, 0), (1, 1), (1, 2), (2, 3), (2, 4)], names=["C", None]
1165
+ ),
1166
+ name="A",
1167
+ )
1168
+ tm.assert_series_equal(result, expected)
1169
+
1170
+
1171
+ def test_positional_slice_groups_datetimelike():
1172
+ # GH 21651
1173
+ expected = DataFrame(
1174
+ {
1175
+ "date": pd.date_range("2010-01-01", freq="12H", periods=5),
1176
+ "vals": range(5),
1177
+ "let": list("abcde"),
1178
+ }
1179
+ )
1180
+ result = expected.groupby(
1181
+ [expected.let, expected.date.dt.date], group_keys=False
1182
+ ).apply(lambda x: x.iloc[0:])
1183
+ tm.assert_frame_equal(result, expected)
1184
+
1185
+
1186
+ def test_groupby_apply_shape_cache_safety():
1187
+ # GH#42702 this fails if we cache_readonly Block.shape
1188
+ df = DataFrame({"A": ["a", "a", "b"], "B": [1, 2, 3], "C": [4, 6, 5]})
1189
+ gb = df.groupby("A")
1190
+ result = gb[["B", "C"]].apply(lambda x: x.astype(float).max() - x.min())
1191
+
1192
+ expected = DataFrame(
1193
+ {"B": [1.0, 0.0], "C": [2.0, 0.0]}, index=Index(["a", "b"], name="A")
1194
+ )
1195
+ tm.assert_frame_equal(result, expected)
1196
+
1197
+
1198
+ @pytest.mark.parametrize("dropna", [True, False])
1199
+ def test_apply_na(dropna):
1200
+ # GH#28984
1201
+ df = DataFrame(
1202
+ {"grp": [1, 1, 2, 2], "y": [1, 0, 2, 5], "z": [1, 2, np.nan, np.nan]}
1203
+ )
1204
+ dfgrp = df.groupby("grp", dropna=dropna)
1205
+ result = dfgrp.apply(lambda grp_df: grp_df.nlargest(1, "z"))
1206
+ expected = dfgrp.apply(lambda x: x.sort_values("z", ascending=False).head(1))
1207
+ tm.assert_frame_equal(result, expected)
1208
+
1209
+
1210
+ def test_apply_empty_string_nan_coerce_bug():
1211
+ # GH#24903
1212
+ result = (
1213
+ DataFrame(
1214
+ {
1215
+ "a": [1, 1, 2, 2],
1216
+ "b": ["", "", "", ""],
1217
+ "c": pd.to_datetime([1, 2, 3, 4], unit="s"),
1218
+ }
1219
+ )
1220
+ .groupby(["a", "b"])
1221
+ .apply(lambda df: df.iloc[-1])
1222
+ )
1223
+ expected = DataFrame(
1224
+ [[1, "", pd.to_datetime(2, unit="s")], [2, "", pd.to_datetime(4, unit="s")]],
1225
+ columns=["a", "b", "c"],
1226
+ index=MultiIndex.from_tuples([(1, ""), (2, "")], names=["a", "b"]),
1227
+ )
1228
+ tm.assert_frame_equal(result, expected)
1229
+
1230
+
1231
+ @pytest.mark.parametrize("index_values", [[1, 2, 3], [1.0, 2.0, 3.0]])
1232
+ def test_apply_index_key_error_bug(index_values):
1233
+ # GH 44310
1234
+ result = DataFrame(
1235
+ {
1236
+ "a": ["aa", "a2", "a3"],
1237
+ "b": [1, 2, 3],
1238
+ },
1239
+ index=Index(index_values),
1240
+ )
1241
+ expected = DataFrame(
1242
+ {
1243
+ "b_mean": [2.0, 3.0, 1.0],
1244
+ },
1245
+ index=Index(["a2", "a3", "aa"], name="a"),
1246
+ )
1247
+ result = result.groupby("a").apply(
1248
+ lambda df: Series([df["b"].mean()], index=["b_mean"])
1249
+ )
1250
+ tm.assert_frame_equal(result, expected)
1251
+
1252
+
1253
+ @pytest.mark.parametrize(
1254
+ "arg,idx",
1255
+ [
1256
+ [
1257
+ [
1258
+ 1,
1259
+ 2,
1260
+ 3,
1261
+ ],
1262
+ [
1263
+ 0.1,
1264
+ 0.3,
1265
+ 0.2,
1266
+ ],
1267
+ ],
1268
+ [
1269
+ [
1270
+ 1,
1271
+ 2,
1272
+ 3,
1273
+ ],
1274
+ [
1275
+ 0.1,
1276
+ 0.2,
1277
+ 0.3,
1278
+ ],
1279
+ ],
1280
+ [
1281
+ [
1282
+ 1,
1283
+ 4,
1284
+ 3,
1285
+ ],
1286
+ [
1287
+ 0.1,
1288
+ 0.4,
1289
+ 0.2,
1290
+ ],
1291
+ ],
1292
+ ],
1293
+ )
1294
+ def test_apply_nonmonotonic_float_index(arg, idx):
1295
+ # GH 34455
1296
+ expected = DataFrame({"col": arg}, index=idx)
1297
+ result = expected.groupby("col", group_keys=False).apply(lambda x: x)
1298
+ tm.assert_frame_equal(result, expected)
1299
+
1300
+
1301
+ @pytest.mark.parametrize("args, kwargs", [([True], {}), ([], {"numeric_only": True})])
1302
+ def test_apply_str_with_args(df, args, kwargs):
1303
+ # GH#46479
1304
+ gb = df.groupby("A")
1305
+ result = gb.apply("sum", *args, **kwargs)
1306
+ expected = gb.sum(numeric_only=True)
1307
+ tm.assert_frame_equal(result, expected)
1308
+
1309
+
1310
+ @pytest.mark.parametrize("name", ["some_name", None])
1311
+ def test_result_name_when_one_group(name):
1312
+ # GH 46369
1313
+ ser = Series([1, 2], name=name)
1314
+ result = ser.groupby(["a", "a"], group_keys=False).apply(lambda x: x)
1315
+ expected = Series([1, 2], name=name)
1316
+
1317
+ tm.assert_series_equal(result, expected)
1318
+
1319
+
1320
+ @pytest.mark.parametrize(
1321
+ "method, op",
1322
+ [
1323
+ ("apply", lambda gb: gb.values[-1]),
1324
+ ("apply", lambda gb: gb["b"].iloc[0]),
1325
+ ("agg", "skew"),
1326
+ ("agg", "prod"),
1327
+ ("agg", "sum"),
1328
+ ],
1329
+ )
1330
+ def test_empty_df(method, op):
1331
+ # GH 47985
1332
+ empty_df = DataFrame({"a": [], "b": []})
1333
+ gb = empty_df.groupby("a", group_keys=True)
1334
+ group = getattr(gb, "b")
1335
+
1336
+ result = getattr(group, method)(op)
1337
+ expected = Series(
1338
+ [], name="b", dtype="float64", index=Index([], dtype="float64", name="a")
1339
+ )
1340
+
1341
+ tm.assert_series_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_apply_mutate.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ import pandas as pd
4
+ import pandas._testing as tm
5
+
6
+
7
+ def test_group_by_copy():
8
+ # GH#44803
9
+ df = pd.DataFrame(
10
+ {
11
+ "name": ["Alice", "Bob", "Carl"],
12
+ "age": [20, 21, 20],
13
+ }
14
+ ).set_index("name")
15
+
16
+ grp_by_same_value = df.groupby(["age"], group_keys=False).apply(lambda group: group)
17
+ grp_by_copy = df.groupby(["age"], group_keys=False).apply(
18
+ lambda group: group.copy()
19
+ )
20
+ tm.assert_frame_equal(grp_by_same_value, grp_by_copy)
21
+
22
+
23
+ def test_mutate_groups():
24
+ # GH3380
25
+
26
+ df = pd.DataFrame(
27
+ {
28
+ "cat1": ["a"] * 8 + ["b"] * 6,
29
+ "cat2": ["c"] * 2
30
+ + ["d"] * 2
31
+ + ["e"] * 2
32
+ + ["f"] * 2
33
+ + ["c"] * 2
34
+ + ["d"] * 2
35
+ + ["e"] * 2,
36
+ "cat3": [f"g{x}" for x in range(1, 15)],
37
+ "val": np.random.randint(100, size=14),
38
+ }
39
+ )
40
+
41
+ def f_copy(x):
42
+ x = x.copy()
43
+ x["rank"] = x.val.rank(method="min")
44
+ return x.groupby("cat2")["rank"].min()
45
+
46
+ def f_no_copy(x):
47
+ x["rank"] = x.val.rank(method="min")
48
+ return x.groupby("cat2")["rank"].min()
49
+
50
+ grpby_copy = df.groupby("cat1").apply(f_copy)
51
+ grpby_no_copy = df.groupby("cat1").apply(f_no_copy)
52
+ tm.assert_series_equal(grpby_copy, grpby_no_copy)
53
+
54
+
55
+ def test_no_mutate_but_looks_like():
56
+ # GH 8467
57
+ # first show's mutation indicator
58
+ # second does not, but should yield the same results
59
+ df = pd.DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
60
+
61
+ result1 = df.groupby("key", group_keys=True).apply(lambda x: x[:].key)
62
+ result2 = df.groupby("key", group_keys=True).apply(lambda x: x.key)
63
+ tm.assert_series_equal(result1, result2)
64
+
65
+
66
+ def test_apply_function_with_indexing():
67
+ # GH: 33058
68
+ df = pd.DataFrame(
69
+ {"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]}
70
+ )
71
+
72
+ def fn(x):
73
+ x.loc[x.index[-1], "col2"] = 0
74
+ return x.col2
75
+
76
+ result = df.groupby(["col1"], as_index=False).apply(fn)
77
+ expected = pd.Series(
78
+ [1, 2, 0, 4, 5, 0],
79
+ index=pd.MultiIndex.from_tuples(
80
+ [(0, 0), (0, 1), (0, 2), (1, 3), (1, 4), (1, 5)]
81
+ ),
82
+ name="col2",
83
+ )
84
+ tm.assert_series_equal(result, expected)
85
+
86
+
87
+ def test_apply_mutate_columns_multiindex():
88
+ # GH 12652
89
+ df = pd.DataFrame(
90
+ {
91
+ ("C", "julian"): [1, 2, 3],
92
+ ("B", "geoffrey"): [1, 2, 3],
93
+ ("A", "julian"): [1, 2, 3],
94
+ ("B", "julian"): [1, 2, 3],
95
+ ("A", "geoffrey"): [1, 2, 3],
96
+ ("C", "geoffrey"): [1, 2, 3],
97
+ },
98
+ columns=pd.MultiIndex.from_tuples(
99
+ [
100
+ ("A", "julian"),
101
+ ("A", "geoffrey"),
102
+ ("B", "julian"),
103
+ ("B", "geoffrey"),
104
+ ("C", "julian"),
105
+ ("C", "geoffrey"),
106
+ ]
107
+ ),
108
+ )
109
+
110
+ def add_column(grouped):
111
+ name = grouped.columns[0][1]
112
+ grouped["sum", name] = grouped.sum(axis=1)
113
+ return grouped
114
+
115
+ result = df.groupby(level=1, axis=1).apply(add_column)
116
+ expected = pd.DataFrame(
117
+ [
118
+ [1, 1, 1, 3, 1, 1, 1, 3],
119
+ [2, 2, 2, 6, 2, 2, 2, 6],
120
+ [
121
+ 3,
122
+ 3,
123
+ 3,
124
+ 9,
125
+ 3,
126
+ 3,
127
+ 3,
128
+ 9,
129
+ ],
130
+ ],
131
+ columns=pd.MultiIndex.from_tuples(
132
+ [
133
+ ("geoffrey", "A", "geoffrey"),
134
+ ("geoffrey", "B", "geoffrey"),
135
+ ("geoffrey", "C", "geoffrey"),
136
+ ("geoffrey", "sum", "geoffrey"),
137
+ ("julian", "A", "julian"),
138
+ ("julian", "B", "julian"),
139
+ ("julian", "C", "julian"),
140
+ ("julian", "sum", "julian"),
141
+ ]
142
+ ),
143
+ )
144
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_bin_groupby.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs import lib
5
+ import pandas.util._test_decorators as td
6
+
7
+ import pandas as pd
8
+ import pandas._testing as tm
9
+
10
+
11
+ def assert_block_lengths(x):
12
+ assert len(x) == len(x._mgr.blocks[0].mgr_locs)
13
+ return 0
14
+
15
+
16
+ def cumsum_max(x):
17
+ x.cumsum().max()
18
+ return 0
19
+
20
+
21
+ @pytest.mark.parametrize(
22
+ "func",
23
+ [
24
+ cumsum_max,
25
+ pytest.param(assert_block_lengths, marks=td.skip_array_manager_invalid_test),
26
+ ],
27
+ )
28
+ def test_mgr_locs_updated(func):
29
+ # https://github.com/pandas-dev/pandas/issues/31802
30
+ # Some operations may require creating new blocks, which requires
31
+ # valid mgr_locs
32
+ df = pd.DataFrame({"A": ["a", "a", "a"], "B": ["a", "b", "b"], "C": [1, 1, 1]})
33
+ result = df.groupby(["A", "B"]).agg(func)
34
+ expected = pd.DataFrame(
35
+ {"C": [0, 0]},
36
+ index=pd.MultiIndex.from_product([["a"], ["a", "b"]], names=["A", "B"]),
37
+ )
38
+ tm.assert_frame_equal(result, expected)
39
+
40
+
41
+ @pytest.mark.parametrize(
42
+ "binner,closed,expected",
43
+ [
44
+ (
45
+ np.array([0, 3, 6, 9], dtype=np.int64),
46
+ "left",
47
+ np.array([2, 5, 6], dtype=np.int64),
48
+ ),
49
+ (
50
+ np.array([0, 3, 6, 9], dtype=np.int64),
51
+ "right",
52
+ np.array([3, 6, 6], dtype=np.int64),
53
+ ),
54
+ (np.array([0, 3, 6], dtype=np.int64), "left", np.array([2, 5], dtype=np.int64)),
55
+ (
56
+ np.array([0, 3, 6], dtype=np.int64),
57
+ "right",
58
+ np.array([3, 6], dtype=np.int64),
59
+ ),
60
+ ],
61
+ )
62
+ def test_generate_bins(binner, closed, expected):
63
+ values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
64
+ result = lib.generate_bins_dt64(values, binner, closed=closed)
65
+ tm.assert_numpy_array_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_categorical.py ADDED
@@ -0,0 +1,2057 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ Categorical,
9
+ CategoricalIndex,
10
+ DataFrame,
11
+ Index,
12
+ MultiIndex,
13
+ Series,
14
+ qcut,
15
+ )
16
+ import pandas._testing as tm
17
+ from pandas.core.groupby.generic import SeriesGroupBy
18
+ from pandas.tests.groupby import get_groupby_method_args
19
+
20
+
21
+ def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
22
+ """Reindex to a cartesian production for the groupers,
23
+ preserving the nature (Categorical) of each grouper
24
+ """
25
+
26
+ def f(a):
27
+ if isinstance(a, (CategoricalIndex, Categorical)):
28
+ categories = a.categories
29
+ a = Categorical.from_codes(
30
+ np.arange(len(categories)), categories=categories, ordered=a.ordered
31
+ )
32
+ return a
33
+
34
+ index = MultiIndex.from_product(map(f, args), names=names)
35
+ return result.reindex(index, fill_value=fill_value).sort_index()
36
+
37
+
38
+ _results_for_groupbys_with_missing_categories = {
39
+ # This maps the builtin groupby functions to their expected outputs for
40
+ # missing categories when they are called on a categorical grouper with
41
+ # observed=False. Some functions are expected to return NaN, some zero.
42
+ # These expected values can be used across several tests (i.e. they are
43
+ # the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
44
+ # hardcoded in one place.
45
+ "all": np.NaN,
46
+ "any": np.NaN,
47
+ "count": 0,
48
+ "corrwith": np.NaN,
49
+ "first": np.NaN,
50
+ "idxmax": np.NaN,
51
+ "idxmin": np.NaN,
52
+ "last": np.NaN,
53
+ "max": np.NaN,
54
+ "mean": np.NaN,
55
+ "median": np.NaN,
56
+ "min": np.NaN,
57
+ "nth": np.NaN,
58
+ "nunique": 0,
59
+ "prod": np.NaN,
60
+ "quantile": np.NaN,
61
+ "sem": np.NaN,
62
+ "size": 0,
63
+ "skew": np.NaN,
64
+ "std": np.NaN,
65
+ "sum": 0,
66
+ "var": np.NaN,
67
+ }
68
+
69
+
70
+ def test_apply_use_categorical_name(df):
71
+ cats = qcut(df.C, 4)
72
+
73
+ def get_stats(group):
74
+ return {
75
+ "min": group.min(),
76
+ "max": group.max(),
77
+ "count": group.count(),
78
+ "mean": group.mean(),
79
+ }
80
+
81
+ result = df.groupby(cats, observed=False).D.apply(get_stats)
82
+ assert result.index.names[0] == "C"
83
+
84
+
85
+ def test_basic(): # TODO: split this test
86
+ cats = Categorical(
87
+ ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
88
+ categories=["a", "b", "c", "d"],
89
+ ordered=True,
90
+ )
91
+ data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
92
+
93
+ exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
94
+ expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
95
+ result = data.groupby("b", observed=False).mean()
96
+ tm.assert_frame_equal(result, expected)
97
+
98
+ cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
99
+ cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
100
+ df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
101
+
102
+ # single grouper
103
+ gb = df.groupby("A", observed=False)
104
+ exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
105
+ expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
106
+ result = gb.sum(numeric_only=True)
107
+ tm.assert_frame_equal(result, expected)
108
+
109
+ # GH 8623
110
+ x = DataFrame(
111
+ [[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]],
112
+ columns=["person_id", "person_name"],
113
+ )
114
+ x["person_name"] = Categorical(x.person_name)
115
+
116
+ g = x.groupby(["person_id"], observed=False)
117
+ result = g.transform(lambda x: x)
118
+ tm.assert_frame_equal(result, x[["person_name"]])
119
+
120
+ result = x.drop_duplicates("person_name")
121
+ expected = x.iloc[[0, 1]]
122
+ tm.assert_frame_equal(result, expected)
123
+
124
+ def f(x):
125
+ return x.drop_duplicates("person_name").iloc[0]
126
+
127
+ result = g.apply(f)
128
+ expected = x.iloc[[0, 1]].copy()
129
+ expected.index = Index([1, 2], name="person_id")
130
+ expected["person_name"] = expected["person_name"].astype("object")
131
+ tm.assert_frame_equal(result, expected)
132
+
133
+ # GH 9921
134
+ # Monotonic
135
+ df = DataFrame({"a": [5, 15, 25]})
136
+ c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
137
+
138
+ result = df.a.groupby(c, observed=False).transform(sum)
139
+ tm.assert_series_equal(result, df["a"])
140
+
141
+ tm.assert_series_equal(
142
+ df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
143
+ )
144
+ tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
145
+
146
+ gbc = df.groupby(c, observed=False)
147
+ result = gbc.transform(lambda xs: np.max(xs, axis=0))
148
+ tm.assert_frame_equal(result, df[["a"]])
149
+
150
+ with tm.assert_produces_warning(None):
151
+ result2 = gbc.transform(lambda xs: np.max(xs, axis=0))
152
+ result3 = gbc.transform(max)
153
+ result4 = gbc.transform(np.maximum.reduce)
154
+ result5 = gbc.transform(lambda xs: np.maximum.reduce(xs))
155
+ tm.assert_frame_equal(result2, df[["a"]], check_dtype=False)
156
+ tm.assert_frame_equal(result3, df[["a"]], check_dtype=False)
157
+ tm.assert_frame_equal(result4, df[["a"]])
158
+ tm.assert_frame_equal(result5, df[["a"]])
159
+
160
+ # Filter
161
+ tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
162
+ tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
163
+
164
+ # Non-monotonic
165
+ df = DataFrame({"a": [5, 15, 25, -5]})
166
+ c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
167
+
168
+ result = df.a.groupby(c, observed=False).transform(sum)
169
+ tm.assert_series_equal(result, df["a"])
170
+
171
+ tm.assert_series_equal(
172
+ df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
173
+ )
174
+ tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
175
+ tm.assert_frame_equal(
176
+ df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
177
+ )
178
+
179
+ # GH 9603
180
+ df = DataFrame({"a": [1, 0, 0, 0]})
181
+ c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
182
+ result = df.groupby(c, observed=False).apply(len)
183
+
184
+ exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
185
+ expected = Series([1, 0, 0, 0], index=exp_index)
186
+ expected.index.name = "a"
187
+ tm.assert_series_equal(result, expected)
188
+
189
+ # more basic
190
+ levels = ["foo", "bar", "baz", "qux"]
191
+ codes = np.random.randint(0, 4, size=100)
192
+
193
+ cats = Categorical.from_codes(codes, levels, ordered=True)
194
+
195
+ data = DataFrame(np.random.randn(100, 4))
196
+
197
+ result = data.groupby(cats, observed=False).mean()
198
+
199
+ expected = data.groupby(np.asarray(cats), observed=False).mean()
200
+ exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
201
+ expected = expected.reindex(exp_idx)
202
+
203
+ tm.assert_frame_equal(result, expected)
204
+
205
+ grouped = data.groupby(cats, observed=False)
206
+ desc_result = grouped.describe()
207
+
208
+ idx = cats.codes.argsort()
209
+ ord_labels = np.asarray(cats).take(idx)
210
+ ord_data = data.take(idx)
211
+
212
+ exp_cats = Categorical(
213
+ ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
214
+ )
215
+ expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
216
+ tm.assert_frame_equal(desc_result, expected)
217
+
218
+ # GH 10460
219
+ expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
220
+ exp = CategoricalIndex(expc)
221
+ tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
222
+ exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
223
+ tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
224
+
225
+
226
+ def test_level_get_group(observed):
227
+ # GH15155
228
+ df = DataFrame(
229
+ data=np.arange(2, 22, 2),
230
+ index=MultiIndex(
231
+ levels=[CategoricalIndex(["a", "b"]), range(10)],
232
+ codes=[[0] * 5 + [1] * 5, range(10)],
233
+ names=["Index1", "Index2"],
234
+ ),
235
+ )
236
+ g = df.groupby(level=["Index1"], observed=observed)
237
+
238
+ # expected should equal test.loc[["a"]]
239
+ # GH15166
240
+ expected = DataFrame(
241
+ data=np.arange(2, 12, 2),
242
+ index=MultiIndex(
243
+ levels=[CategoricalIndex(["a", "b"]), range(5)],
244
+ codes=[[0] * 5, range(5)],
245
+ names=["Index1", "Index2"],
246
+ ),
247
+ )
248
+ result = g.get_group("a")
249
+
250
+ tm.assert_frame_equal(result, expected)
251
+
252
+
253
+ def test_sorting_with_different_categoricals():
254
+ # GH 24271
255
+ df = DataFrame(
256
+ {
257
+ "group": ["A"] * 6 + ["B"] * 6,
258
+ "dose": ["high", "med", "low"] * 4,
259
+ "outcomes": np.arange(12.0),
260
+ }
261
+ )
262
+
263
+ df.dose = Categorical(df.dose, categories=["low", "med", "high"], ordered=True)
264
+
265
+ result = df.groupby("group")["dose"].value_counts()
266
+ result = result.sort_index(level=0, sort_remaining=True)
267
+ index = ["low", "med", "high", "low", "med", "high"]
268
+ index = Categorical(index, categories=["low", "med", "high"], ordered=True)
269
+ index = [["A", "A", "A", "B", "B", "B"], CategoricalIndex(index)]
270
+ index = MultiIndex.from_arrays(index, names=["group", "dose"])
271
+ expected = Series([2] * 6, index=index, name="count")
272
+ tm.assert_series_equal(result, expected)
273
+
274
+
275
+ @pytest.mark.parametrize("ordered", [True, False])
276
+ def test_apply(ordered):
277
+ # GH 10138
278
+
279
+ dense = Categorical(list("abc"), ordered=ordered)
280
+
281
+ # 'b' is in the categories but not in the list
282
+ missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
283
+ values = np.arange(len(dense))
284
+ df = DataFrame({"missing": missing, "dense": dense, "values": values})
285
+ grouped = df.groupby(["missing", "dense"], observed=True)
286
+
287
+ # missing category 'b' should still exist in the output index
288
+ idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
289
+ expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
290
+
291
+ result = grouped.apply(lambda x: np.mean(x, axis=0))
292
+ tm.assert_frame_equal(result, expected)
293
+
294
+ result = grouped.mean()
295
+ tm.assert_frame_equal(result, expected)
296
+
297
+ result = grouped.agg(np.mean)
298
+ tm.assert_frame_equal(result, expected)
299
+
300
+ # but for transform we should still get back the original index
301
+ idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
302
+ expected = Series(1, index=idx)
303
+ result = grouped.apply(lambda x: 1)
304
+ tm.assert_series_equal(result, expected)
305
+
306
+
307
+ def test_observed(observed):
308
+ # multiple groupers, don't re-expand the output space
309
+ # of the grouper
310
+ # gh-14942 (implement)
311
+ # gh-10132 (back-compat)
312
+ # gh-8138 (back-compat)
313
+ # gh-8869
314
+
315
+ cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
316
+ cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
317
+ df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
318
+ df["C"] = ["foo", "bar"] * 2
319
+
320
+ # multiple groupers with a non-cat
321
+ gb = df.groupby(["A", "B", "C"], observed=observed)
322
+ exp_index = MultiIndex.from_arrays(
323
+ [cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
324
+ )
325
+ expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
326
+ result = gb.sum()
327
+ if not observed:
328
+ expected = cartesian_product_for_groupers(
329
+ expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
330
+ )
331
+
332
+ tm.assert_frame_equal(result, expected)
333
+
334
+ gb = df.groupby(["A", "B"], observed=observed)
335
+ exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
336
+ expected = DataFrame(
337
+ {"values": [1, 2, 3, 4], "C": ["foo", "bar", "foo", "bar"]}, index=exp_index
338
+ )
339
+ result = gb.sum()
340
+ if not observed:
341
+ expected = cartesian_product_for_groupers(
342
+ expected, [cat1, cat2], list("AB"), fill_value=0
343
+ )
344
+
345
+ tm.assert_frame_equal(result, expected)
346
+
347
+ # https://github.com/pandas-dev/pandas/issues/8138
348
+ d = {
349
+ "cat": Categorical(
350
+ ["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
351
+ ),
352
+ "ints": [1, 1, 2, 2],
353
+ "val": [10, 20, 30, 40],
354
+ }
355
+ df = DataFrame(d)
356
+
357
+ # Grouping on a single column
358
+ groups_single_key = df.groupby("cat", observed=observed)
359
+ result = groups_single_key.mean()
360
+
361
+ exp_index = CategoricalIndex(
362
+ list("ab"), name="cat", categories=list("abc"), ordered=True
363
+ )
364
+ expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
365
+ if not observed:
366
+ index = CategoricalIndex(
367
+ list("abc"), name="cat", categories=list("abc"), ordered=True
368
+ )
369
+ expected = expected.reindex(index)
370
+
371
+ tm.assert_frame_equal(result, expected)
372
+
373
+ # Grouping on two columns
374
+ groups_double_key = df.groupby(["cat", "ints"], observed=observed)
375
+ result = groups_double_key.agg("mean")
376
+ expected = DataFrame(
377
+ {
378
+ "val": [10.0, 30.0, 20.0, 40.0],
379
+ "cat": Categorical(
380
+ ["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
381
+ ),
382
+ "ints": [1, 2, 1, 2],
383
+ }
384
+ ).set_index(["cat", "ints"])
385
+ if not observed:
386
+ expected = cartesian_product_for_groupers(
387
+ expected, [df.cat.values, [1, 2]], ["cat", "ints"]
388
+ )
389
+
390
+ tm.assert_frame_equal(result, expected)
391
+
392
+ # GH 10132
393
+ for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
394
+ c, i = key
395
+ result = groups_double_key.get_group(key)
396
+ expected = df[(df.cat == c) & (df.ints == i)]
397
+ tm.assert_frame_equal(result, expected)
398
+
399
+ # gh-8869
400
+ # with as_index
401
+ d = {
402
+ "foo": [10, 8, 4, 8, 4, 1, 1],
403
+ "bar": [10, 20, 30, 40, 50, 60, 70],
404
+ "baz": ["d", "c", "e", "a", "a", "d", "c"],
405
+ }
406
+ df = DataFrame(d)
407
+ cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
408
+ df["range"] = cat
409
+ groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
410
+ result = groups.agg("mean")
411
+
412
+ groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
413
+ expected = groups2.agg("mean").reset_index()
414
+ tm.assert_frame_equal(result, expected)
415
+
416
+
417
+ def test_observed_codes_remap(observed):
418
+ d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
419
+ df = DataFrame(d)
420
+ values = pd.cut(df["C1"], [1, 2, 3, 6])
421
+ values.name = "cat"
422
+ groups_double_key = df.groupby([values, "C2"], observed=observed)
423
+
424
+ idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
425
+ expected = DataFrame(
426
+ {"C1": [3.0, 3.0, 4.0, 5.0], "C3": [10.0, 100.0, 200.0, 34.0]}, index=idx
427
+ )
428
+ if not observed:
429
+ expected = cartesian_product_for_groupers(
430
+ expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
431
+ )
432
+
433
+ result = groups_double_key.agg("mean")
434
+ tm.assert_frame_equal(result, expected)
435
+
436
+
437
+ def test_observed_perf():
438
+ # we create a cartesian product, so this is
439
+ # non-performant if we don't use observed values
440
+ # gh-14942
441
+ df = DataFrame(
442
+ {
443
+ "cat": np.random.randint(0, 255, size=30000),
444
+ "int_id": np.random.randint(0, 255, size=30000),
445
+ "other_id": np.random.randint(0, 10000, size=30000),
446
+ "foo": 0,
447
+ }
448
+ )
449
+ df["cat"] = df.cat.astype(str).astype("category")
450
+
451
+ grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
452
+ result = grouped.count()
453
+ assert result.index.levels[0].nunique() == df.cat.nunique()
454
+ assert result.index.levels[1].nunique() == df.int_id.nunique()
455
+ assert result.index.levels[2].nunique() == df.other_id.nunique()
456
+
457
+
458
+ def test_observed_groups(observed):
459
+ # gh-20583
460
+ # test that we have the appropriate groups
461
+
462
+ cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
463
+ df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
464
+ g = df.groupby("cat", observed=observed)
465
+
466
+ result = g.groups
467
+ if observed:
468
+ expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
469
+ else:
470
+ expected = {
471
+ "a": Index([0, 2], dtype="int64"),
472
+ "b": Index([], dtype="int64"),
473
+ "c": Index([1], dtype="int64"),
474
+ }
475
+
476
+ tm.assert_dict_equal(result, expected)
477
+
478
+
479
+ @pytest.mark.parametrize(
480
+ "keys, expected_values, expected_index_levels",
481
+ [
482
+ ("a", [15, 9, 0], CategoricalIndex([1, 2, 3], name="a")),
483
+ (
484
+ ["a", "b"],
485
+ [7, 8, 0, 0, 0, 9, 0, 0, 0],
486
+ [CategoricalIndex([1, 2, 3], name="a"), Index([4, 5, 6])],
487
+ ),
488
+ (
489
+ ["a", "a2"],
490
+ [15, 0, 0, 0, 9, 0, 0, 0, 0],
491
+ [
492
+ CategoricalIndex([1, 2, 3], name="a"),
493
+ CategoricalIndex([1, 2, 3], name="a"),
494
+ ],
495
+ ),
496
+ ],
497
+ )
498
+ @pytest.mark.parametrize("test_series", [True, False])
499
+ def test_unobserved_in_index(keys, expected_values, expected_index_levels, test_series):
500
+ # GH#49354 - ensure unobserved cats occur when grouping by index levels
501
+ df = DataFrame(
502
+ {
503
+ "a": Categorical([1, 1, 2], categories=[1, 2, 3]),
504
+ "a2": Categorical([1, 1, 2], categories=[1, 2, 3]),
505
+ "b": [4, 5, 6],
506
+ "c": [7, 8, 9],
507
+ }
508
+ ).set_index(["a", "a2"])
509
+ if "b" not in keys:
510
+ # Only keep b when it is used for grouping for consistent columns in the result
511
+ df = df.drop(columns="b")
512
+
513
+ gb = df.groupby(keys, observed=False)
514
+ if test_series:
515
+ gb = gb["c"]
516
+ result = gb.sum()
517
+
518
+ if len(keys) == 1:
519
+ index = expected_index_levels
520
+ else:
521
+ codes = [[0, 0, 0, 1, 1, 1, 2, 2, 2], 3 * [0, 1, 2]]
522
+ index = MultiIndex(
523
+ expected_index_levels,
524
+ codes=codes,
525
+ names=keys,
526
+ )
527
+ expected = DataFrame({"c": expected_values}, index=index)
528
+ if test_series:
529
+ expected = expected["c"]
530
+ tm.assert_equal(result, expected)
531
+
532
+
533
+ def test_observed_groups_with_nan(observed):
534
+ # GH 24740
535
+ df = DataFrame(
536
+ {
537
+ "cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
538
+ "vals": [1, 2, 3],
539
+ }
540
+ )
541
+ g = df.groupby("cat", observed=observed)
542
+ result = g.groups
543
+ if observed:
544
+ expected = {"a": Index([0, 2], dtype="int64")}
545
+ else:
546
+ expected = {
547
+ "a": Index([0, 2], dtype="int64"),
548
+ "b": Index([], dtype="int64"),
549
+ "d": Index([], dtype="int64"),
550
+ }
551
+ tm.assert_dict_equal(result, expected)
552
+
553
+
554
+ def test_observed_nth():
555
+ # GH 26385
556
+ cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
557
+ ser = Series([1, 2, 3])
558
+ df = DataFrame({"cat": cat, "ser": ser})
559
+
560
+ result = df.groupby("cat", observed=False)["ser"].nth(0)
561
+ expected = df["ser"].iloc[[0]]
562
+ tm.assert_series_equal(result, expected)
563
+
564
+
565
+ def test_dataframe_categorical_with_nan(observed):
566
+ # GH 21151
567
+ s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
568
+ s2 = Series([1, 2, 3, 4])
569
+ df = DataFrame({"s1": s1, "s2": s2})
570
+ result = df.groupby("s1", observed=observed).first().reset_index()
571
+ if observed:
572
+ expected = DataFrame(
573
+ {"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
574
+ )
575
+ else:
576
+ expected = DataFrame(
577
+ {
578
+ "s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
579
+ "s2": [2, np.nan, np.nan],
580
+ }
581
+ )
582
+ tm.assert_frame_equal(result, expected)
583
+
584
+
585
+ @pytest.mark.parametrize("ordered", [True, False])
586
+ @pytest.mark.parametrize("observed", [True, False])
587
+ @pytest.mark.parametrize("sort", [True, False])
588
+ def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
589
+ # GH 25871: Fix groupby sorting on ordered Categoricals
590
+ # GH 25167: Groupby with observed=True doesn't sort
591
+
592
+ # Build a dataframe with cat having one unobserved category ('missing'),
593
+ # and a Series with identical values
594
+ label = Categorical(
595
+ ["d", "a", "b", "a", "d", "b"],
596
+ categories=["a", "b", "missing", "d"],
597
+ ordered=ordered,
598
+ )
599
+ val = Series(["d", "a", "b", "a", "d", "b"])
600
+ df = DataFrame({"label": label, "val": val})
601
+
602
+ # aggregate on the Categorical
603
+ result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
604
+
605
+ # If ordering works, we expect index labels equal to aggregation results,
606
+ # except for 'observed=False': label 'missing' has aggregation None
607
+ label = Series(result.index.array, dtype="object")
608
+ aggr = Series(result.array)
609
+ if not observed:
610
+ aggr[aggr.isna()] = "missing"
611
+ if not all(label == aggr):
612
+ msg = (
613
+ "Labels and aggregation results not consistently sorted\n"
614
+ f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
615
+ f"Result:\n{result}"
616
+ )
617
+ assert False, msg
618
+
619
+
620
+ def test_datetime():
621
+ # GH9049: ensure backward compatibility
622
+ levels = pd.date_range("2014-01-01", periods=4)
623
+ codes = np.random.randint(0, 4, size=100)
624
+
625
+ cats = Categorical.from_codes(codes, levels, ordered=True)
626
+
627
+ data = DataFrame(np.random.randn(100, 4))
628
+ result = data.groupby(cats, observed=False).mean()
629
+
630
+ expected = data.groupby(np.asarray(cats), observed=False).mean()
631
+ expected = expected.reindex(levels)
632
+ expected.index = CategoricalIndex(
633
+ expected.index, categories=expected.index, ordered=True
634
+ )
635
+
636
+ tm.assert_frame_equal(result, expected)
637
+
638
+ grouped = data.groupby(cats, observed=False)
639
+ desc_result = grouped.describe()
640
+
641
+ idx = cats.codes.argsort()
642
+ ord_labels = cats.take(idx)
643
+ ord_data = data.take(idx)
644
+ expected = ord_data.groupby(ord_labels, observed=False).describe()
645
+ tm.assert_frame_equal(desc_result, expected)
646
+ tm.assert_index_equal(desc_result.index, expected.index)
647
+ tm.assert_index_equal(
648
+ desc_result.index.get_level_values(0), expected.index.get_level_values(0)
649
+ )
650
+
651
+ # GH 10460
652
+ expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
653
+ exp = CategoricalIndex(expc)
654
+ tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
655
+ exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
656
+ tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
657
+
658
+
659
+ def test_categorical_index():
660
+ s = np.random.RandomState(12345)
661
+ levels = ["foo", "bar", "baz", "qux"]
662
+ codes = s.randint(0, 4, size=20)
663
+ cats = Categorical.from_codes(codes, levels, ordered=True)
664
+ df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
665
+ df["cats"] = cats
666
+
667
+ # with a cat index
668
+ result = df.set_index("cats").groupby(level=0, observed=False).sum()
669
+ expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
670
+ expected.index = CategoricalIndex(
671
+ Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
672
+ )
673
+ tm.assert_frame_equal(result, expected)
674
+
675
+ # with a cat column, should produce a cat index
676
+ result = df.groupby("cats", observed=False).sum()
677
+ expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
678
+ expected.index = CategoricalIndex(
679
+ Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
680
+ )
681
+ tm.assert_frame_equal(result, expected)
682
+
683
+
684
+ def test_describe_categorical_columns():
685
+ # GH 11558
686
+ cats = CategoricalIndex(
687
+ ["qux", "foo", "baz", "bar"],
688
+ categories=["foo", "bar", "baz", "qux"],
689
+ ordered=True,
690
+ )
691
+ df = DataFrame(np.random.randn(20, 4), columns=cats)
692
+ result = df.groupby([1, 2, 3, 4] * 5).describe()
693
+
694
+ tm.assert_index_equal(result.stack().columns, cats)
695
+ tm.assert_categorical_equal(result.stack().columns.values, cats.values)
696
+
697
+
698
+ def test_unstack_categorical():
699
+ # GH11558 (example is taken from the original issue)
700
+ df = DataFrame(
701
+ {"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
702
+ )
703
+ df["medium"] = df["medium"].astype("category")
704
+
705
+ gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
706
+ result = gcat.describe()
707
+
708
+ exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
709
+ tm.assert_index_equal(result.columns, exp_columns)
710
+ tm.assert_categorical_equal(result.columns.values, exp_columns.values)
711
+
712
+ result = gcat["A"] + gcat["B"]
713
+ expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
714
+ tm.assert_series_equal(result, expected)
715
+
716
+
717
+ def test_bins_unequal_len():
718
+ # GH3011
719
+ series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
720
+ bins = pd.cut(series.dropna().values, 4)
721
+
722
+ # len(bins) != len(series) here
723
+ with pytest.raises(ValueError, match="Grouper and axis must be same length"):
724
+ series.groupby(bins).mean()
725
+
726
+
727
+ @pytest.mark.parametrize(
728
+ ["series", "data"],
729
+ [
730
+ # Group a series with length and index equal to those of the grouper.
731
+ (Series(range(4)), {"A": [0, 3], "B": [1, 2]}),
732
+ # Group a series with length equal to that of the grouper and index unequal to
733
+ # that of the grouper.
734
+ (Series(range(4)).rename(lambda idx: idx + 1), {"A": [2], "B": [0, 1]}),
735
+ # GH44179: Group a series with length unequal to that of the grouper.
736
+ (Series(range(7)), {"A": [0, 3], "B": [1, 2]}),
737
+ ],
738
+ )
739
+ def test_categorical_series(series, data):
740
+ # Group the given series by a series with categorical data type such that group A
741
+ # takes indices 0 and 3 and group B indices 1 and 2, obtaining the values mapped in
742
+ # the given data.
743
+ groupby = series.groupby(Series(list("ABBA"), dtype="category"))
744
+ result = groupby.aggregate(list)
745
+ expected = Series(data, index=CategoricalIndex(data.keys()))
746
+ tm.assert_series_equal(result, expected)
747
+
748
+
749
+ def test_as_index():
750
+ # GH13204
751
+ df = DataFrame(
752
+ {
753
+ "cat": Categorical([1, 2, 2], [1, 2, 3]),
754
+ "A": [10, 11, 11],
755
+ "B": [101, 102, 103],
756
+ }
757
+ )
758
+ result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
759
+ expected = DataFrame(
760
+ {
761
+ "cat": Categorical([1, 2], categories=df.cat.cat.categories),
762
+ "A": [10, 11],
763
+ "B": [101, 205],
764
+ },
765
+ columns=["cat", "A", "B"],
766
+ )
767
+ tm.assert_frame_equal(result, expected)
768
+
769
+ # function grouper
770
+ f = lambda r: df.loc[r, "A"]
771
+ result = df.groupby(["cat", f], as_index=False, observed=True).sum()
772
+ expected = DataFrame(
773
+ {
774
+ "cat": Categorical([1, 2], categories=df.cat.cat.categories),
775
+ "A": [10, 22],
776
+ "B": [101, 205],
777
+ },
778
+ columns=["cat", "A", "B"],
779
+ )
780
+ tm.assert_frame_equal(result, expected)
781
+
782
+ # another not in-axis grouper (conflicting names in index)
783
+ s = Series(["a", "b", "b"], name="cat")
784
+ result = df.groupby(["cat", s], as_index=False, observed=True).sum()
785
+ tm.assert_frame_equal(result, expected)
786
+
787
+ # is original index dropped?
788
+ group_columns = ["cat", "A"]
789
+ expected = DataFrame(
790
+ {
791
+ "cat": Categorical([1, 2], categories=df.cat.cat.categories),
792
+ "A": [10, 11],
793
+ "B": [101, 205],
794
+ },
795
+ columns=["cat", "A", "B"],
796
+ )
797
+
798
+ for name in [None, "X", "B"]:
799
+ df.index = Index(list("abc"), name=name)
800
+ result = df.groupby(group_columns, as_index=False, observed=True).sum()
801
+
802
+ tm.assert_frame_equal(result, expected)
803
+
804
+
805
+ def test_preserve_categories():
806
+ # GH-13179
807
+ categories = list("abc")
808
+
809
+ # ordered=True
810
+ df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
811
+ sort_index = CategoricalIndex(categories, categories, ordered=True, name="A")
812
+ nosort_index = CategoricalIndex(list("bac"), categories, ordered=True, name="A")
813
+ tm.assert_index_equal(
814
+ df.groupby("A", sort=True, observed=False).first().index, sort_index
815
+ )
816
+ # GH#42482 - don't sort result when sort=False, even when ordered=True
817
+ tm.assert_index_equal(
818
+ df.groupby("A", sort=False, observed=False).first().index, nosort_index
819
+ )
820
+
821
+ # ordered=False
822
+ df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
823
+ sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
824
+ # GH#48749 - don't change order of categories
825
+ # GH#42482 - don't sort result when sort=False, even when ordered=True
826
+ nosort_index = CategoricalIndex(list("bac"), list("abc"), ordered=False, name="A")
827
+ tm.assert_index_equal(
828
+ df.groupby("A", sort=True, observed=False).first().index, sort_index
829
+ )
830
+ tm.assert_index_equal(
831
+ df.groupby("A", sort=False, observed=False).first().index, nosort_index
832
+ )
833
+
834
+
835
+ def test_preserve_categorical_dtype():
836
+ # GH13743, GH13854
837
+ df = DataFrame(
838
+ {
839
+ "A": [1, 2, 1, 1, 2],
840
+ "B": [10, 16, 22, 28, 34],
841
+ "C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
842
+ "C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
843
+ }
844
+ )
845
+ # single grouper
846
+ exp_full = DataFrame(
847
+ {
848
+ "A": [2.0, 1.0, np.nan],
849
+ "B": [25.0, 20.0, np.nan],
850
+ "C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
851
+ "C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
852
+ }
853
+ )
854
+ for col in ["C1", "C2"]:
855
+ result1 = df.groupby(by=col, as_index=False, observed=False).mean(
856
+ numeric_only=True
857
+ )
858
+ result2 = (
859
+ df.groupby(by=col, as_index=True, observed=False)
860
+ .mean(numeric_only=True)
861
+ .reset_index()
862
+ )
863
+ expected = exp_full.reindex(columns=result1.columns)
864
+ tm.assert_frame_equal(result1, expected)
865
+ tm.assert_frame_equal(result2, expected)
866
+
867
+
868
+ @pytest.mark.parametrize(
869
+ "func, values",
870
+ [
871
+ ("first", ["second", "first"]),
872
+ ("last", ["fourth", "third"]),
873
+ ("min", ["fourth", "first"]),
874
+ ("max", ["second", "third"]),
875
+ ],
876
+ )
877
+ def test_preserve_on_ordered_ops(func, values):
878
+ # gh-18502
879
+ # preserve the categoricals on ops
880
+ c = Categorical(["first", "second", "third", "fourth"], ordered=True)
881
+ df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
882
+ g = df.groupby("payload")
883
+ result = getattr(g, func)()
884
+ expected = DataFrame(
885
+ {"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
886
+ ).set_index("payload")
887
+ tm.assert_frame_equal(result, expected)
888
+
889
+ # we should also preserve categorical for SeriesGroupBy
890
+ sgb = df.groupby("payload")["col"]
891
+ result = getattr(sgb, func)()
892
+ expected = expected["col"]
893
+ tm.assert_series_equal(result, expected)
894
+
895
+
896
+ def test_categorical_no_compress():
897
+ data = Series(np.random.randn(9))
898
+
899
+ codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
900
+ cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
901
+
902
+ result = data.groupby(cats, observed=False).mean()
903
+ exp = data.groupby(codes, observed=False).mean()
904
+
905
+ exp.index = CategoricalIndex(
906
+ exp.index, categories=cats.categories, ordered=cats.ordered
907
+ )
908
+ tm.assert_series_equal(result, exp)
909
+
910
+ codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
911
+ cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
912
+
913
+ result = data.groupby(cats, observed=False).mean()
914
+ exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
915
+ exp.index = CategoricalIndex(
916
+ exp.index, categories=cats.categories, ordered=cats.ordered
917
+ )
918
+ tm.assert_series_equal(result, exp)
919
+
920
+ cats = Categorical(
921
+ ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
922
+ categories=["a", "b", "c", "d"],
923
+ ordered=True,
924
+ )
925
+ data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
926
+
927
+ result = data.groupby("b", observed=False).mean()
928
+ result = result["a"].values
929
+ exp = np.array([1, 2, 4, np.nan])
930
+ tm.assert_numpy_array_equal(result, exp)
931
+
932
+
933
+ def test_groupby_empty_with_category():
934
+ # GH-9614
935
+ # test fix for when group by on None resulted in
936
+ # coercion of dtype categorical -> float
937
+ df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
938
+ result = df.groupby("A").first()["B"]
939
+ expected = Series(
940
+ Categorical([], categories=["test", "train"]),
941
+ index=Series([], dtype="object", name="A"),
942
+ name="B",
943
+ )
944
+ tm.assert_series_equal(result, expected)
945
+
946
+
947
+ def test_sort():
948
+ # https://stackoverflow.com/questions/23814368/sorting-pandas-
949
+ # categorical-labels-after-groupby
950
+ # This should result in a properly sorted Series so that the plot
951
+ # has a sorted x axis
952
+ # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
953
+
954
+ df = DataFrame({"value": np.random.randint(0, 10000, 100)})
955
+ labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
956
+ cat_labels = Categorical(labels, labels)
957
+
958
+ df = df.sort_values(by=["value"], ascending=True)
959
+ df["value_group"] = pd.cut(
960
+ df.value, range(0, 10500, 500), right=False, labels=cat_labels
961
+ )
962
+
963
+ res = df.groupby(["value_group"], observed=False)["value_group"].count()
964
+ exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
965
+ exp.index = CategoricalIndex(exp.index, name=exp.index.name)
966
+ tm.assert_series_equal(res, exp)
967
+
968
+
969
+ @pytest.mark.parametrize("ordered", [True, False])
970
+ def test_sort2(sort, ordered):
971
+ # dataframe groupby sort was being ignored # GH 8868
972
+ # GH#48749 - don't change order of categories
973
+ # GH#42482 - don't sort result when sort=False, even when ordered=True
974
+ df = DataFrame(
975
+ [
976
+ ["(7.5, 10]", 10, 10],
977
+ ["(7.5, 10]", 8, 20],
978
+ ["(2.5, 5]", 5, 30],
979
+ ["(5, 7.5]", 6, 40],
980
+ ["(2.5, 5]", 4, 50],
981
+ ["(0, 2.5]", 1, 60],
982
+ ["(5, 7.5]", 7, 70],
983
+ ],
984
+ columns=["range", "foo", "bar"],
985
+ )
986
+ df["range"] = Categorical(df["range"], ordered=ordered)
987
+ result = df.groupby("range", sort=sort, observed=False).first()
988
+
989
+ if sort:
990
+ data_values = [[1, 60], [5, 30], [6, 40], [10, 10]]
991
+ index_values = ["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"]
992
+ else:
993
+ data_values = [[10, 10], [5, 30], [6, 40], [1, 60]]
994
+ index_values = ["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"]
995
+ expected = DataFrame(
996
+ data_values,
997
+ columns=["foo", "bar"],
998
+ index=CategoricalIndex(index_values, name="range", ordered=ordered),
999
+ )
1000
+
1001
+ tm.assert_frame_equal(result, expected)
1002
+
1003
+
1004
+ @pytest.mark.parametrize("ordered", [True, False])
1005
+ def test_sort_datetimelike(sort, ordered):
1006
+ # GH10505
1007
+ # GH#42482 - don't sort result when sort=False, even when ordered=True
1008
+
1009
+ # use same data as test_groupby_sort_categorical, which category is
1010
+ # corresponding to datetime.month
1011
+ df = DataFrame(
1012
+ {
1013
+ "dt": [
1014
+ datetime(2011, 7, 1),
1015
+ datetime(2011, 7, 1),
1016
+ datetime(2011, 2, 1),
1017
+ datetime(2011, 5, 1),
1018
+ datetime(2011, 2, 1),
1019
+ datetime(2011, 1, 1),
1020
+ datetime(2011, 5, 1),
1021
+ ],
1022
+ "foo": [10, 8, 5, 6, 4, 1, 7],
1023
+ "bar": [10, 20, 30, 40, 50, 60, 70],
1024
+ },
1025
+ columns=["dt", "foo", "bar"],
1026
+ )
1027
+
1028
+ # ordered=True
1029
+ df["dt"] = Categorical(df["dt"], ordered=ordered)
1030
+ if sort:
1031
+ data_values = [[1, 60], [5, 30], [6, 40], [10, 10]]
1032
+ index_values = [
1033
+ datetime(2011, 1, 1),
1034
+ datetime(2011, 2, 1),
1035
+ datetime(2011, 5, 1),
1036
+ datetime(2011, 7, 1),
1037
+ ]
1038
+ else:
1039
+ data_values = [[10, 10], [5, 30], [6, 40], [1, 60]]
1040
+ index_values = [
1041
+ datetime(2011, 7, 1),
1042
+ datetime(2011, 2, 1),
1043
+ datetime(2011, 5, 1),
1044
+ datetime(2011, 1, 1),
1045
+ ]
1046
+ expected = DataFrame(
1047
+ data_values,
1048
+ columns=["foo", "bar"],
1049
+ index=CategoricalIndex(index_values, name="dt", ordered=ordered),
1050
+ )
1051
+ result = df.groupby("dt", sort=sort, observed=False).first()
1052
+ tm.assert_frame_equal(result, expected)
1053
+
1054
+
1055
+ def test_empty_sum():
1056
+ # https://github.com/pandas-dev/pandas/issues/18678
1057
+ df = DataFrame(
1058
+ {"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
1059
+ )
1060
+ expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
1061
+
1062
+ # 0 by default
1063
+ result = df.groupby("A", observed=False).B.sum()
1064
+ expected = Series([3, 1, 0], expected_idx, name="B")
1065
+ tm.assert_series_equal(result, expected)
1066
+
1067
+ # min_count=0
1068
+ result = df.groupby("A", observed=False).B.sum(min_count=0)
1069
+ expected = Series([3, 1, 0], expected_idx, name="B")
1070
+ tm.assert_series_equal(result, expected)
1071
+
1072
+ # min_count=1
1073
+ result = df.groupby("A", observed=False).B.sum(min_count=1)
1074
+ expected = Series([3, 1, np.nan], expected_idx, name="B")
1075
+ tm.assert_series_equal(result, expected)
1076
+
1077
+ # min_count>1
1078
+ result = df.groupby("A", observed=False).B.sum(min_count=2)
1079
+ expected = Series([3, np.nan, np.nan], expected_idx, name="B")
1080
+ tm.assert_series_equal(result, expected)
1081
+
1082
+
1083
+ def test_empty_prod():
1084
+ # https://github.com/pandas-dev/pandas/issues/18678
1085
+ df = DataFrame(
1086
+ {"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
1087
+ )
1088
+
1089
+ expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
1090
+
1091
+ # 1 by default
1092
+ result = df.groupby("A", observed=False).B.prod()
1093
+ expected = Series([2, 1, 1], expected_idx, name="B")
1094
+ tm.assert_series_equal(result, expected)
1095
+
1096
+ # min_count=0
1097
+ result = df.groupby("A", observed=False).B.prod(min_count=0)
1098
+ expected = Series([2, 1, 1], expected_idx, name="B")
1099
+ tm.assert_series_equal(result, expected)
1100
+
1101
+ # min_count=1
1102
+ result = df.groupby("A", observed=False).B.prod(min_count=1)
1103
+ expected = Series([2, 1, np.nan], expected_idx, name="B")
1104
+ tm.assert_series_equal(result, expected)
1105
+
1106
+
1107
+ def test_groupby_multiindex_categorical_datetime():
1108
+ # https://github.com/pandas-dev/pandas/issues/21390
1109
+
1110
+ df = DataFrame(
1111
+ {
1112
+ "key1": Categorical(list("abcbabcba")),
1113
+ "key2": Categorical(
1114
+ list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
1115
+ ),
1116
+ "values": np.arange(9),
1117
+ }
1118
+ )
1119
+ result = df.groupby(["key1", "key2"]).mean()
1120
+
1121
+ idx = MultiIndex.from_product(
1122
+ [
1123
+ Categorical(["a", "b", "c"]),
1124
+ Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
1125
+ ],
1126
+ names=["key1", "key2"],
1127
+ )
1128
+ expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
1129
+ tm.assert_frame_equal(result, expected)
1130
+
1131
+
1132
+ @pytest.mark.parametrize(
1133
+ "as_index, expected",
1134
+ [
1135
+ (
1136
+ True,
1137
+ Series(
1138
+ index=MultiIndex.from_arrays(
1139
+ [Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
1140
+ ),
1141
+ data=[1, 2, 3],
1142
+ name="x",
1143
+ ),
1144
+ ),
1145
+ (
1146
+ False,
1147
+ DataFrame(
1148
+ {
1149
+ "a": Series([1, 1, 2], dtype="category"),
1150
+ "b": [1, 2, 2],
1151
+ "x": [1, 2, 3],
1152
+ }
1153
+ ),
1154
+ ),
1155
+ ],
1156
+ )
1157
+ def test_groupby_agg_observed_true_single_column(as_index, expected):
1158
+ # GH-23970
1159
+ df = DataFrame(
1160
+ {"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
1161
+ )
1162
+
1163
+ result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
1164
+
1165
+ tm.assert_equal(result, expected)
1166
+
1167
+
1168
+ @pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
1169
+ def test_shift(fill_value):
1170
+ ct = Categorical(
1171
+ ["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
1172
+ )
1173
+ expected = Categorical(
1174
+ [None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
1175
+ )
1176
+ res = ct.shift(1, fill_value=fill_value)
1177
+ tm.assert_equal(res, expected)
1178
+
1179
+
1180
+ @pytest.fixture
1181
+ def df_cat(df):
1182
+ """
1183
+ DataFrame with multiple categorical columns and a column of integers.
1184
+ Shortened so as not to contain all possible combinations of categories.
1185
+ Useful for testing `observed` kwarg functionality on GroupBy objects.
1186
+
1187
+ Parameters
1188
+ ----------
1189
+ df: DataFrame
1190
+ Non-categorical, longer DataFrame from another fixture, used to derive
1191
+ this one
1192
+
1193
+ Returns
1194
+ -------
1195
+ df_cat: DataFrame
1196
+ """
1197
+ df_cat = df.copy()[:4] # leave out some groups
1198
+ df_cat["A"] = df_cat["A"].astype("category")
1199
+ df_cat["B"] = df_cat["B"].astype("category")
1200
+ df_cat["C"] = Series([1, 2, 3, 4])
1201
+ df_cat = df_cat.drop(["D"], axis=1)
1202
+ return df_cat
1203
+
1204
+
1205
+ @pytest.mark.parametrize("operation", ["agg", "apply"])
1206
+ def test_seriesgroupby_observed_true(df_cat, operation):
1207
+ # GH#24880
1208
+ # GH#49223 - order of results was wrong when grouping by index levels
1209
+ lev_a = Index(["bar", "bar", "foo", "foo"], dtype=df_cat["A"].dtype, name="A")
1210
+ lev_b = Index(["one", "three", "one", "two"], dtype=df_cat["B"].dtype, name="B")
1211
+ index = MultiIndex.from_arrays([lev_a, lev_b])
1212
+ expected = Series(data=[2, 4, 1, 3], index=index, name="C").sort_index()
1213
+
1214
+ grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
1215
+ result = getattr(grouped, operation)(sum)
1216
+ tm.assert_series_equal(result, expected)
1217
+
1218
+
1219
+ @pytest.mark.parametrize("operation", ["agg", "apply"])
1220
+ @pytest.mark.parametrize("observed", [False, None])
1221
+ def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
1222
+ # GH 24880
1223
+ # GH#49223 - order of results was wrong when grouping by index levels
1224
+ index, _ = MultiIndex.from_product(
1225
+ [
1226
+ CategoricalIndex(["bar", "foo"], ordered=False),
1227
+ CategoricalIndex(["one", "three", "two"], ordered=False),
1228
+ ],
1229
+ names=["A", "B"],
1230
+ ).sortlevel()
1231
+
1232
+ expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
1233
+ if operation == "agg":
1234
+ expected = expected.fillna(0, downcast="infer")
1235
+ grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
1236
+ result = getattr(grouped, operation)(sum)
1237
+ tm.assert_series_equal(result, expected)
1238
+
1239
+
1240
+ @pytest.mark.parametrize(
1241
+ "observed, index, data",
1242
+ [
1243
+ (
1244
+ True,
1245
+ MultiIndex.from_arrays(
1246
+ [
1247
+ Index(["bar"] * 4 + ["foo"] * 4, dtype="category", name="A"),
1248
+ Index(
1249
+ ["one", "one", "three", "three", "one", "one", "two", "two"],
1250
+ dtype="category",
1251
+ name="B",
1252
+ ),
1253
+ Index(["min", "max"] * 4),
1254
+ ]
1255
+ ),
1256
+ [2, 2, 4, 4, 1, 1, 3, 3],
1257
+ ),
1258
+ (
1259
+ False,
1260
+ MultiIndex.from_product(
1261
+ [
1262
+ CategoricalIndex(["bar", "foo"], ordered=False),
1263
+ CategoricalIndex(["one", "three", "two"], ordered=False),
1264
+ Index(["min", "max"]),
1265
+ ],
1266
+ names=["A", "B", None],
1267
+ ),
1268
+ [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
1269
+ ),
1270
+ (
1271
+ None,
1272
+ MultiIndex.from_product(
1273
+ [
1274
+ CategoricalIndex(["bar", "foo"], ordered=False),
1275
+ CategoricalIndex(["one", "three", "two"], ordered=False),
1276
+ Index(["min", "max"]),
1277
+ ],
1278
+ names=["A", "B", None],
1279
+ ),
1280
+ [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
1281
+ ),
1282
+ ],
1283
+ )
1284
+ def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
1285
+ # GH 24880
1286
+ expected = Series(data=data, index=index, name="C")
1287
+ result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
1288
+ lambda x: {"min": x.min(), "max": x.max()}
1289
+ )
1290
+ tm.assert_series_equal(result, expected)
1291
+
1292
+
1293
+ def test_groupby_categorical_series_dataframe_consistent(df_cat):
1294
+ # GH 20416
1295
+ expected = df_cat.groupby(["A", "B"])["C"].mean()
1296
+ result = df_cat.groupby(["A", "B"]).mean()["C"]
1297
+ tm.assert_series_equal(result, expected)
1298
+
1299
+
1300
+ @pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
1301
+ def test_groupby_categorical_axis_1(code):
1302
+ # GH 13420
1303
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
1304
+ cat = Categorical.from_codes(code, categories=list("abc"))
1305
+ result = df.groupby(cat, axis=1).mean()
1306
+ expected = df.T.groupby(cat, axis=0).mean().T
1307
+ tm.assert_frame_equal(result, expected)
1308
+
1309
+
1310
+ def test_groupby_cat_preserves_structure(observed, ordered):
1311
+ # GH 28787
1312
+ df = DataFrame(
1313
+ {"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
1314
+ columns=["Name", "Item"],
1315
+ )
1316
+ expected = df.copy()
1317
+
1318
+ result = (
1319
+ df.groupby("Name", observed=observed)
1320
+ .agg(DataFrame.sum, skipna=True)
1321
+ .reset_index()
1322
+ )
1323
+
1324
+ tm.assert_frame_equal(result, expected)
1325
+
1326
+
1327
+ def test_get_nonexistent_category():
1328
+ # Accessing a Category that is not in the dataframe
1329
+ df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)})
1330
+ with pytest.raises(KeyError, match="'vau'"):
1331
+ df.groupby("var").apply(
1332
+ lambda rows: DataFrame(
1333
+ {"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]}
1334
+ )
1335
+ )
1336
+
1337
+
1338
+ def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request):
1339
+ # GH 17605
1340
+ if reduction_func == "ngroup":
1341
+ pytest.skip("ngroup is not truly a reduction")
1342
+
1343
+ if reduction_func == "corrwith": # GH 32293
1344
+ mark = pytest.mark.xfail(
1345
+ reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
1346
+ )
1347
+ request.node.add_marker(mark)
1348
+
1349
+ df = DataFrame(
1350
+ {
1351
+ "cat_1": Categorical(list("AABB"), categories=list("ABCD")),
1352
+ "cat_2": Categorical(list("AB") * 2, categories=list("ABCD")),
1353
+ "value": [0.1] * 4,
1354
+ }
1355
+ )
1356
+ args = get_groupby_method_args(reduction_func, df)
1357
+
1358
+ expected_length = 4 if observed else 16
1359
+
1360
+ series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"]
1361
+ agg = getattr(series_groupby, reduction_func)
1362
+ result = agg(*args)
1363
+
1364
+ assert len(result) == expected_length
1365
+
1366
+
1367
+ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
1368
+ reduction_func, request
1369
+ ):
1370
+ # GH 17605
1371
+ # Tests whether the unobserved categories in the result contain 0 or NaN
1372
+
1373
+ if reduction_func == "ngroup":
1374
+ pytest.skip("ngroup is not truly a reduction")
1375
+
1376
+ if reduction_func == "corrwith": # GH 32293
1377
+ mark = pytest.mark.xfail(
1378
+ reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
1379
+ )
1380
+ request.node.add_marker(mark)
1381
+
1382
+ df = DataFrame(
1383
+ {
1384
+ "cat_1": Categorical(list("AABB"), categories=list("ABC")),
1385
+ "cat_2": Categorical(list("AB") * 2, categories=list("ABC")),
1386
+ "value": [0.1] * 4,
1387
+ }
1388
+ )
1389
+ unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]
1390
+ args = get_groupby_method_args(reduction_func, df)
1391
+
1392
+ series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]
1393
+ agg = getattr(series_groupby, reduction_func)
1394
+ result = agg(*args)
1395
+
1396
+ zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func]
1397
+
1398
+ for idx in unobserved:
1399
+ val = result.loc[idx]
1400
+ assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)
1401
+
1402
+ # If we expect unobserved values to be zero, we also expect the dtype to be int.
1403
+ # Except for .sum(). If the observed categories sum to dtype=float (i.e. their
1404
+ # sums have decimals), then the zeros for the missing categories should also be
1405
+ # floats.
1406
+ if zero_or_nan == 0 and reduction_func != "sum":
1407
+ assert np.issubdtype(result.dtype, np.integer)
1408
+
1409
+
1410
+ def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):
1411
+ # GH 23865
1412
+ # GH 27075
1413
+ # Ensure that df.groupby, when 'by' is two Categorical variables,
1414
+ # does not return the categories that are not in df when observed=True
1415
+ if reduction_func == "ngroup":
1416
+ pytest.skip("ngroup does not return the Categories on the index")
1417
+
1418
+ df = DataFrame(
1419
+ {
1420
+ "cat_1": Categorical(list("AABB"), categories=list("ABC")),
1421
+ "cat_2": Categorical(list("1111"), categories=list("12")),
1422
+ "value": [0.1, 0.1, 0.1, 0.1],
1423
+ }
1424
+ )
1425
+ unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
1426
+
1427
+ df_grp = df.groupby(["cat_1", "cat_2"], observed=True)
1428
+
1429
+ args = get_groupby_method_args(reduction_func, df)
1430
+ res = getattr(df_grp, reduction_func)(*args)
1431
+
1432
+ for cat in unobserved_cats:
1433
+ assert cat not in res.index
1434
+
1435
+
1436
+ @pytest.mark.parametrize("observed", [False, None])
1437
+ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
1438
+ reduction_func, observed
1439
+ ):
1440
+ # GH 23865
1441
+ # GH 27075
1442
+ # Ensure that df.groupby, when 'by' is two Categorical variables,
1443
+ # returns the categories that are not in df when observed=False/None
1444
+
1445
+ if reduction_func == "ngroup":
1446
+ pytest.skip("ngroup does not return the Categories on the index")
1447
+
1448
+ df = DataFrame(
1449
+ {
1450
+ "cat_1": Categorical(list("AABB"), categories=list("ABC")),
1451
+ "cat_2": Categorical(list("1111"), categories=list("12")),
1452
+ "value": [0.1, 0.1, 0.1, 0.1],
1453
+ }
1454
+ )
1455
+ unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
1456
+
1457
+ df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)
1458
+
1459
+ args = get_groupby_method_args(reduction_func, df)
1460
+ res = getattr(df_grp, reduction_func)(*args)
1461
+
1462
+ expected = _results_for_groupbys_with_missing_categories[reduction_func]
1463
+
1464
+ if expected is np.nan:
1465
+ assert res.loc[unobserved_cats].isnull().all().all()
1466
+ else:
1467
+ assert (res.loc[unobserved_cats] == expected).all().all()
1468
+
1469
+
1470
+ def test_series_groupby_categorical_aggregation_getitem():
1471
+ # GH 8870
1472
+ d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]}
1473
+ df = DataFrame(d)
1474
+ cat = pd.cut(df["foo"], np.linspace(0, 20, 5))
1475
+ df["range"] = cat
1476
+ groups = df.groupby(["range", "baz"], as_index=True, sort=True)
1477
+ result = groups["foo"].agg("mean")
1478
+ expected = groups.agg("mean")["foo"]
1479
+ tm.assert_series_equal(result, expected)
1480
+
1481
+
1482
+ @pytest.mark.parametrize(
1483
+ "func, expected_values",
1484
+ [(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])],
1485
+ )
1486
+ def test_groupby_agg_categorical_columns(func, expected_values):
1487
+ # 31256
1488
+ df = DataFrame(
1489
+ {
1490
+ "id": [0, 1, 2, 3, 4],
1491
+ "groups": [0, 1, 1, 2, 2],
1492
+ "value": Categorical([0, 0, 0, 0, 1]),
1493
+ }
1494
+ ).set_index("id")
1495
+ result = df.groupby("groups").agg(func)
1496
+
1497
+ expected = DataFrame(
1498
+ {"value": expected_values}, index=Index([0, 1, 2], name="groups")
1499
+ )
1500
+ tm.assert_frame_equal(result, expected)
1501
+
1502
+
1503
+ def test_groupby_agg_non_numeric():
1504
+ df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])})
1505
+ expected = DataFrame({"A": [2, 1]}, index=np.array([1, 2]))
1506
+
1507
+ result = df.groupby([1, 2, 1]).agg(Series.nunique)
1508
+ tm.assert_frame_equal(result, expected)
1509
+
1510
+ result = df.groupby([1, 2, 1]).nunique()
1511
+ tm.assert_frame_equal(result, expected)
1512
+
1513
+
1514
+ @pytest.mark.parametrize("func", ["first", "last"])
1515
+ def test_groupby_first_returned_categorical_instead_of_dataframe(func):
1516
+ # GH 28641: groupby drops index, when grouping over categorical column with
1517
+ # first/last. Renamed Categorical instead of DataFrame previously.
1518
+ df = DataFrame({"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()})
1519
+ df_grouped = df.groupby("A")["B"]
1520
+ result = getattr(df_grouped, func)()
1521
+
1522
+ # ordered categorical dtype should be preserved
1523
+ expected = Series(
1524
+ ["b"], index=Index([1997], name="A"), name="B", dtype=df["B"].dtype
1525
+ )
1526
+ tm.assert_series_equal(result, expected)
1527
+
1528
+
1529
+ def test_read_only_category_no_sort():
1530
+ # GH33410
1531
+ cats = np.array([1, 2])
1532
+ cats.flags.writeable = False
1533
+ df = DataFrame(
1534
+ {"a": [1, 3, 5, 7], "b": Categorical([1, 1, 2, 2], categories=Index(cats))}
1535
+ )
1536
+ expected = DataFrame(data={"a": [2.0, 6.0]}, index=CategoricalIndex(cats, name="b"))
1537
+ result = df.groupby("b", sort=False).mean()
1538
+ tm.assert_frame_equal(result, expected)
1539
+
1540
+
1541
+ def test_sorted_missing_category_values():
1542
+ # GH 28597
1543
+ df = DataFrame(
1544
+ {
1545
+ "foo": [
1546
+ "small",
1547
+ "large",
1548
+ "large",
1549
+ "large",
1550
+ "medium",
1551
+ "large",
1552
+ "large",
1553
+ "medium",
1554
+ ],
1555
+ "bar": ["C", "A", "A", "C", "A", "C", "A", "C"],
1556
+ }
1557
+ )
1558
+ df["foo"] = (
1559
+ df["foo"]
1560
+ .astype("category")
1561
+ .cat.set_categories(["tiny", "small", "medium", "large"], ordered=True)
1562
+ )
1563
+
1564
+ expected = DataFrame(
1565
+ {
1566
+ "tiny": {"A": 0, "C": 0},
1567
+ "small": {"A": 0, "C": 1},
1568
+ "medium": {"A": 1, "C": 1},
1569
+ "large": {"A": 3, "C": 2},
1570
+ }
1571
+ )
1572
+ expected = expected.rename_axis("bar", axis="index")
1573
+ expected.columns = CategoricalIndex(
1574
+ ["tiny", "small", "medium", "large"],
1575
+ categories=["tiny", "small", "medium", "large"],
1576
+ ordered=True,
1577
+ name="foo",
1578
+ dtype="category",
1579
+ )
1580
+
1581
+ result = df.groupby(["bar", "foo"]).size().unstack()
1582
+
1583
+ tm.assert_frame_equal(result, expected)
1584
+
1585
+
1586
+ def test_agg_cython_category_not_implemented_fallback():
1587
+ # https://github.com/pandas-dev/pandas/issues/31450
1588
+ df = DataFrame({"col_num": [1, 1, 2, 3]})
1589
+ df["col_cat"] = df["col_num"].astype("category")
1590
+
1591
+ result = df.groupby("col_num").col_cat.first()
1592
+
1593
+ # ordered categorical dtype should definitely be preserved;
1594
+ # this is unordered, so is less-clear case (if anything, it should raise)
1595
+ expected = Series(
1596
+ [1, 2, 3],
1597
+ index=Index([1, 2, 3], name="col_num"),
1598
+ name="col_cat",
1599
+ dtype=df["col_cat"].dtype,
1600
+ )
1601
+ tm.assert_series_equal(result, expected)
1602
+
1603
+ result = df.groupby("col_num").agg({"col_cat": "first"})
1604
+ expected = expected.to_frame()
1605
+ tm.assert_frame_equal(result, expected)
1606
+
1607
+
1608
+ def test_aggregate_categorical_with_isnan():
1609
+ # GH 29837
1610
+ df = DataFrame(
1611
+ {
1612
+ "A": [1, 1, 1, 1],
1613
+ "B": [1, 2, 1, 2],
1614
+ "numerical_col": [0.1, 0.2, np.nan, 0.3],
1615
+ "object_col": ["foo", "bar", "foo", "fee"],
1616
+ "categorical_col": ["foo", "bar", "foo", "fee"],
1617
+ }
1618
+ )
1619
+
1620
+ df = df.astype({"categorical_col": "category"})
1621
+
1622
+ result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum())
1623
+ index = MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B"))
1624
+ expected = DataFrame(
1625
+ data={
1626
+ "numerical_col": [1, 0],
1627
+ "object_col": [0, 0],
1628
+ "categorical_col": [0, 0],
1629
+ },
1630
+ index=index,
1631
+ )
1632
+ tm.assert_frame_equal(result, expected)
1633
+
1634
+
1635
+ def test_categorical_transform():
1636
+ # GH 29037
1637
+ df = DataFrame(
1638
+ {
1639
+ "package_id": [1, 1, 1, 2, 2, 3],
1640
+ "status": [
1641
+ "Waiting",
1642
+ "OnTheWay",
1643
+ "Delivered",
1644
+ "Waiting",
1645
+ "OnTheWay",
1646
+ "Waiting",
1647
+ ],
1648
+ }
1649
+ )
1650
+
1651
+ delivery_status_type = pd.CategoricalDtype(
1652
+ categories=["Waiting", "OnTheWay", "Delivered"], ordered=True
1653
+ )
1654
+ df["status"] = df["status"].astype(delivery_status_type)
1655
+ df["last_status"] = df.groupby("package_id")["status"].transform(max)
1656
+ result = df.copy()
1657
+
1658
+ expected = DataFrame(
1659
+ {
1660
+ "package_id": [1, 1, 1, 2, 2, 3],
1661
+ "status": [
1662
+ "Waiting",
1663
+ "OnTheWay",
1664
+ "Delivered",
1665
+ "Waiting",
1666
+ "OnTheWay",
1667
+ "Waiting",
1668
+ ],
1669
+ "last_status": [
1670
+ "Delivered",
1671
+ "Delivered",
1672
+ "Delivered",
1673
+ "OnTheWay",
1674
+ "OnTheWay",
1675
+ "Waiting",
1676
+ ],
1677
+ }
1678
+ )
1679
+
1680
+ expected["status"] = expected["status"].astype(delivery_status_type)
1681
+
1682
+ # .transform(max) should preserve ordered categoricals
1683
+ expected["last_status"] = expected["last_status"].astype(delivery_status_type)
1684
+
1685
+ tm.assert_frame_equal(result, expected)
1686
+
1687
+
1688
+ @pytest.mark.parametrize("func", ["first", "last"])
1689
+ def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals(
1690
+ func: str, observed: bool
1691
+ ):
1692
+ # GH 34951
1693
+ cat = Categorical([0, 0, 1, 1])
1694
+ val = [0, 1, 1, 0]
1695
+ df = DataFrame({"a": cat, "b": cat, "c": val})
1696
+
1697
+ cat2 = Categorical([0, 1])
1698
+ idx = MultiIndex.from_product([cat2, cat2], names=["a", "b"])
1699
+ expected_dict = {
1700
+ "first": Series([0, np.NaN, np.NaN, 1], idx, name="c"),
1701
+ "last": Series([1, np.NaN, np.NaN, 0], idx, name="c"),
1702
+ }
1703
+
1704
+ expected = expected_dict[func]
1705
+ if observed:
1706
+ expected = expected.dropna().astype(np.int64)
1707
+
1708
+ srs_grp = df.groupby(["a", "b"], observed=observed)["c"]
1709
+ result = getattr(srs_grp, func)()
1710
+ tm.assert_series_equal(result, expected)
1711
+
1712
+
1713
+ @pytest.mark.parametrize("func", ["first", "last"])
1714
+ def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals(
1715
+ func: str, observed: bool
1716
+ ):
1717
+ # GH 34951
1718
+ cat = Categorical([0, 0, 1, 1])
1719
+ val = [0, 1, 1, 0]
1720
+ df = DataFrame({"a": cat, "b": cat, "c": val})
1721
+
1722
+ cat2 = Categorical([0, 1])
1723
+ idx = MultiIndex.from_product([cat2, cat2], names=["a", "b"])
1724
+ expected_dict = {
1725
+ "first": Series([0, np.NaN, np.NaN, 1], idx, name="c"),
1726
+ "last": Series([1, np.NaN, np.NaN, 0], idx, name="c"),
1727
+ }
1728
+
1729
+ expected = expected_dict[func].to_frame()
1730
+ if observed:
1731
+ expected = expected.dropna().astype(np.int64)
1732
+
1733
+ df_grp = df.groupby(["a", "b"], observed=observed)
1734
+ result = getattr(df_grp, func)()
1735
+ tm.assert_frame_equal(result, expected)
1736
+
1737
+
1738
+ def test_groupby_categorical_indices_unused_categories():
1739
+ # GH#38642
1740
+ df = DataFrame(
1741
+ {
1742
+ "key": Categorical(["b", "b", "a"], categories=["a", "b", "c"]),
1743
+ "col": range(3),
1744
+ }
1745
+ )
1746
+ grouped = df.groupby("key", sort=False)
1747
+ result = grouped.indices
1748
+ expected = {
1749
+ "b": np.array([0, 1], dtype="intp"),
1750
+ "a": np.array([2], dtype="intp"),
1751
+ "c": np.array([], dtype="intp"),
1752
+ }
1753
+ assert result.keys() == expected.keys()
1754
+ for key in result.keys():
1755
+ tm.assert_numpy_array_equal(result[key], expected[key])
1756
+
1757
+
1758
+ @pytest.mark.parametrize("func", ["first", "last"])
1759
+ def test_groupby_last_first_preserve_categoricaldtype(func):
1760
+ # GH#33090
1761
+ df = DataFrame({"a": [1, 2, 3]})
1762
+ df["b"] = df["a"].astype("category")
1763
+ result = getattr(df.groupby("a")["b"], func)()
1764
+ expected = Series(
1765
+ Categorical([1, 2, 3]), name="b", index=Index([1, 2, 3], name="a")
1766
+ )
1767
+ tm.assert_series_equal(expected, result)
1768
+
1769
+
1770
+ def test_groupby_categorical_observed_nunique():
1771
+ # GH#45128
1772
+ df = DataFrame({"a": [1, 2], "b": [1, 2], "c": [10, 11]})
1773
+ df = df.astype(dtype={"a": "category", "b": "category"})
1774
+ result = df.groupby(["a", "b"], observed=True).nunique()["c"]
1775
+ expected = Series(
1776
+ [1, 1],
1777
+ index=MultiIndex.from_arrays(
1778
+ [CategoricalIndex([1, 2], name="a"), CategoricalIndex([1, 2], name="b")]
1779
+ ),
1780
+ name="c",
1781
+ )
1782
+ tm.assert_series_equal(result, expected)
1783
+
1784
+
1785
+ def test_groupby_categorical_aggregate_functions():
1786
+ # GH#37275
1787
+ dtype = pd.CategoricalDtype(categories=["small", "big"], ordered=True)
1788
+ df = DataFrame(
1789
+ [[1, "small"], [1, "big"], [2, "small"]], columns=["grp", "description"]
1790
+ ).astype({"description": dtype})
1791
+
1792
+ result = df.groupby("grp")["description"].max()
1793
+ expected = Series(
1794
+ ["big", "small"],
1795
+ index=Index([1, 2], name="grp"),
1796
+ name="description",
1797
+ dtype=pd.CategoricalDtype(categories=["small", "big"], ordered=True),
1798
+ )
1799
+
1800
+ tm.assert_series_equal(result, expected)
1801
+
1802
+
1803
+ def test_groupby_categorical_dropna(observed, dropna):
1804
+ # GH#48645 - dropna should have no impact on the result when there are no NA values
1805
+ cat = Categorical([1, 2], categories=[1, 2, 3])
1806
+ df = DataFrame({"x": Categorical([1, 2], categories=[1, 2, 3]), "y": [3, 4]})
1807
+ gb = df.groupby("x", observed=observed, dropna=dropna)
1808
+ result = gb.sum()
1809
+
1810
+ if observed:
1811
+ expected = DataFrame({"y": [3, 4]}, index=cat)
1812
+ else:
1813
+ index = CategoricalIndex([1, 2, 3], [1, 2, 3])
1814
+ expected = DataFrame({"y": [3, 4, 0]}, index=index)
1815
+ expected.index.name = "x"
1816
+
1817
+ tm.assert_frame_equal(result, expected)
1818
+
1819
+
1820
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
1821
+ @pytest.mark.parametrize("ordered", [True, False])
1822
+ def test_category_order_reducer(
1823
+ request, as_index, sort, observed, reduction_func, index_kind, ordered
1824
+ ):
1825
+ # GH#48749
1826
+ if (
1827
+ reduction_func in ("idxmax", "idxmin")
1828
+ and not observed
1829
+ and index_kind != "multi"
1830
+ ):
1831
+ msg = "GH#10694 - idxmax/min fail with unused categories"
1832
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
1833
+ elif reduction_func == "corrwith" and not as_index:
1834
+ msg = "GH#49950 - corrwith with as_index=False may not have grouping column"
1835
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
1836
+ elif index_kind != "range" and not as_index:
1837
+ pytest.skip(reason="Result doesn't have categories, nothing to test")
1838
+ df = DataFrame(
1839
+ {
1840
+ "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),
1841
+ "b": range(4),
1842
+ }
1843
+ )
1844
+ if index_kind == "range":
1845
+ keys = ["a"]
1846
+ elif index_kind == "single":
1847
+ keys = ["a"]
1848
+ df = df.set_index(keys)
1849
+ elif index_kind == "multi":
1850
+ keys = ["a", "a2"]
1851
+ df["a2"] = df["a"]
1852
+ df = df.set_index(keys)
1853
+ args = get_groupby_method_args(reduction_func, df)
1854
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)
1855
+ op_result = getattr(gb, reduction_func)(*args)
1856
+ if as_index:
1857
+ result = op_result.index.get_level_values("a").categories
1858
+ else:
1859
+ result = op_result["a"].cat.categories
1860
+ expected = Index([1, 4, 3, 2])
1861
+ tm.assert_index_equal(result, expected)
1862
+
1863
+ if index_kind == "multi":
1864
+ result = op_result.index.get_level_values("a2").categories
1865
+ tm.assert_index_equal(result, expected)
1866
+
1867
+
1868
+ @pytest.mark.parametrize("index_kind", ["single", "multi"])
1869
+ @pytest.mark.parametrize("ordered", [True, False])
1870
+ def test_category_order_transformer(
1871
+ as_index, sort, observed, transformation_func, index_kind, ordered
1872
+ ):
1873
+ # GH#48749
1874
+ df = DataFrame(
1875
+ {
1876
+ "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),
1877
+ "b": range(4),
1878
+ }
1879
+ )
1880
+ if index_kind == "single":
1881
+ keys = ["a"]
1882
+ df = df.set_index(keys)
1883
+ elif index_kind == "multi":
1884
+ keys = ["a", "a2"]
1885
+ df["a2"] = df["a"]
1886
+ df = df.set_index(keys)
1887
+ args = get_groupby_method_args(transformation_func, df)
1888
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)
1889
+ op_result = getattr(gb, transformation_func)(*args)
1890
+ result = op_result.index.get_level_values("a").categories
1891
+ expected = Index([1, 4, 3, 2])
1892
+ tm.assert_index_equal(result, expected)
1893
+
1894
+ if index_kind == "multi":
1895
+ result = op_result.index.get_level_values("a2").categories
1896
+ tm.assert_index_equal(result, expected)
1897
+
1898
+
1899
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
1900
+ @pytest.mark.parametrize("method", ["head", "tail"])
1901
+ @pytest.mark.parametrize("ordered", [True, False])
1902
+ def test_category_order_head_tail(
1903
+ as_index, sort, observed, method, index_kind, ordered
1904
+ ):
1905
+ # GH#48749
1906
+ df = DataFrame(
1907
+ {
1908
+ "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),
1909
+ "b": range(4),
1910
+ }
1911
+ )
1912
+ if index_kind == "range":
1913
+ keys = ["a"]
1914
+ elif index_kind == "single":
1915
+ keys = ["a"]
1916
+ df = df.set_index(keys)
1917
+ elif index_kind == "multi":
1918
+ keys = ["a", "a2"]
1919
+ df["a2"] = df["a"]
1920
+ df = df.set_index(keys)
1921
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)
1922
+ op_result = getattr(gb, method)()
1923
+ if index_kind == "range":
1924
+ result = op_result["a"].cat.categories
1925
+ else:
1926
+ result = op_result.index.get_level_values("a").categories
1927
+ expected = Index([1, 4, 3, 2])
1928
+ tm.assert_index_equal(result, expected)
1929
+
1930
+ if index_kind == "multi":
1931
+ result = op_result.index.get_level_values("a2").categories
1932
+ tm.assert_index_equal(result, expected)
1933
+
1934
+
1935
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
1936
+ @pytest.mark.parametrize("method", ["apply", "agg", "transform"])
1937
+ @pytest.mark.parametrize("ordered", [True, False])
1938
+ def test_category_order_apply(as_index, sort, observed, method, index_kind, ordered):
1939
+ # GH#48749
1940
+ if (method == "transform" and index_kind == "range") or (
1941
+ not as_index and index_kind != "range"
1942
+ ):
1943
+ pytest.skip("No categories in result, nothing to test")
1944
+ df = DataFrame(
1945
+ {
1946
+ "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),
1947
+ "b": range(4),
1948
+ }
1949
+ )
1950
+ if index_kind == "range":
1951
+ keys = ["a"]
1952
+ elif index_kind == "single":
1953
+ keys = ["a"]
1954
+ df = df.set_index(keys)
1955
+ elif index_kind == "multi":
1956
+ keys = ["a", "a2"]
1957
+ df["a2"] = df["a"]
1958
+ df = df.set_index(keys)
1959
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)
1960
+ op_result = getattr(gb, method)(lambda x: x.sum(numeric_only=True))
1961
+ if (method == "transform" or not as_index) and index_kind == "range":
1962
+ result = op_result["a"].cat.categories
1963
+ else:
1964
+ result = op_result.index.get_level_values("a").categories
1965
+ expected = Index([1, 4, 3, 2])
1966
+ tm.assert_index_equal(result, expected)
1967
+
1968
+ if index_kind == "multi":
1969
+ result = op_result.index.get_level_values("a2").categories
1970
+ tm.assert_index_equal(result, expected)
1971
+
1972
+
1973
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
1974
+ def test_many_categories(as_index, sort, index_kind, ordered):
1975
+ # GH#48749 - Test when the grouper has many categories
1976
+ if index_kind != "range" and not as_index:
1977
+ pytest.skip(reason="Result doesn't have categories, nothing to test")
1978
+ categories = np.arange(9999, -1, -1)
1979
+ grouper = Categorical([2, 1, 2, 3], categories=categories, ordered=ordered)
1980
+ df = DataFrame({"a": grouper, "b": range(4)})
1981
+ if index_kind == "range":
1982
+ keys = ["a"]
1983
+ elif index_kind == "single":
1984
+ keys = ["a"]
1985
+ df = df.set_index(keys)
1986
+ elif index_kind == "multi":
1987
+ keys = ["a", "a2"]
1988
+ df["a2"] = df["a"]
1989
+ df = df.set_index(keys)
1990
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=True)
1991
+ result = gb.sum()
1992
+
1993
+ # Test is setup so that data and index are the same values
1994
+ data = [3, 2, 1] if sort else [2, 1, 3]
1995
+
1996
+ index = CategoricalIndex(
1997
+ data, categories=grouper.categories, ordered=ordered, name="a"
1998
+ )
1999
+ if as_index:
2000
+ expected = DataFrame({"b": data})
2001
+ if index_kind == "multi":
2002
+ expected.index = MultiIndex.from_frame(DataFrame({"a": index, "a2": index}))
2003
+ else:
2004
+ expected.index = index
2005
+ elif index_kind == "multi":
2006
+ expected = DataFrame({"a": Series(index), "a2": Series(index), "b": data})
2007
+ else:
2008
+ expected = DataFrame({"a": Series(index), "b": data})
2009
+
2010
+ tm.assert_frame_equal(result, expected)
2011
+
2012
+
2013
+ @pytest.mark.parametrize("test_series", [True, False])
2014
+ @pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])
2015
+ def test_agg_list(request, as_index, observed, reduction_func, test_series, keys):
2016
+ # GH#52760
2017
+ if test_series and reduction_func == "corrwith":
2018
+ assert not hasattr(SeriesGroupBy, "corrwith")
2019
+ pytest.skip("corrwith not implemented for SeriesGroupBy")
2020
+ elif reduction_func == "corrwith":
2021
+ msg = "GH#32293: attempts to call SeriesGroupBy.corrwith"
2022
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
2023
+ elif (
2024
+ reduction_func == "nunique"
2025
+ and not test_series
2026
+ and len(keys) != 1
2027
+ and not observed
2028
+ and not as_index
2029
+ ):
2030
+ msg = "GH#52848 - raises a ValueError"
2031
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
2032
+
2033
+ df = DataFrame({"a1": [0, 0, 1], "a2": [2, 3, 3], "b": [4, 5, 6]})
2034
+ df = df.astype({"a1": "category", "a2": "category"})
2035
+ if "a2" not in keys:
2036
+ df = df.drop(columns="a2")
2037
+ gb = df.groupby(by=keys, as_index=as_index, observed=observed)
2038
+ if test_series:
2039
+ gb = gb["b"]
2040
+ args = get_groupby_method_args(reduction_func, df)
2041
+
2042
+ result = gb.agg([reduction_func], *args)
2043
+ expected = getattr(gb, reduction_func)(*args)
2044
+
2045
+ if as_index and (test_series or reduction_func == "size"):
2046
+ expected = expected.to_frame(reduction_func)
2047
+ if not test_series:
2048
+ if not as_index:
2049
+ # TODO: GH#52849 - as_index=False is not respected
2050
+ expected = expected.set_index(keys)
2051
+ expected.columns = MultiIndex(
2052
+ levels=[["b"], [reduction_func]], codes=[[0], [0]]
2053
+ )
2054
+ elif not as_index:
2055
+ expected.columns = keys + [reduction_func]
2056
+
2057
+ tm.assert_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_counting.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+ from string import ascii_lowercase
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas import (
8
+ DataFrame,
9
+ Index,
10
+ MultiIndex,
11
+ Period,
12
+ Series,
13
+ Timedelta,
14
+ Timestamp,
15
+ date_range,
16
+ )
17
+ import pandas._testing as tm
18
+
19
+
20
+ class TestCounting:
21
+ def test_cumcount(self):
22
+ df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"])
23
+ g = df.groupby("A")
24
+ sg = g.A
25
+
26
+ expected = Series([0, 1, 2, 0, 3])
27
+
28
+ tm.assert_series_equal(expected, g.cumcount())
29
+ tm.assert_series_equal(expected, sg.cumcount())
30
+
31
+ def test_cumcount_empty(self):
32
+ ge = DataFrame().groupby(level=0)
33
+ se = Series(dtype=object).groupby(level=0)
34
+
35
+ # edge case, as this is usually considered float
36
+ e = Series(dtype="int64")
37
+
38
+ tm.assert_series_equal(e, ge.cumcount())
39
+ tm.assert_series_equal(e, se.cumcount())
40
+
41
+ def test_cumcount_dupe_index(self):
42
+ df = DataFrame(
43
+ [["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
44
+ )
45
+ g = df.groupby("A")
46
+ sg = g.A
47
+
48
+ expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
49
+
50
+ tm.assert_series_equal(expected, g.cumcount())
51
+ tm.assert_series_equal(expected, sg.cumcount())
52
+
53
+ def test_cumcount_mi(self):
54
+ mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
55
+ df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=mi)
56
+ g = df.groupby("A")
57
+ sg = g.A
58
+
59
+ expected = Series([0, 1, 2, 0, 3], index=mi)
60
+
61
+ tm.assert_series_equal(expected, g.cumcount())
62
+ tm.assert_series_equal(expected, sg.cumcount())
63
+
64
+ def test_cumcount_groupby_not_col(self):
65
+ df = DataFrame(
66
+ [["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
67
+ )
68
+ g = df.groupby([0, 0, 0, 1, 0])
69
+ sg = g.A
70
+
71
+ expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
72
+
73
+ tm.assert_series_equal(expected, g.cumcount())
74
+ tm.assert_series_equal(expected, sg.cumcount())
75
+
76
+ def test_ngroup(self):
77
+ df = DataFrame({"A": list("aaaba")})
78
+ g = df.groupby("A")
79
+ sg = g.A
80
+
81
+ expected = Series([0, 0, 0, 1, 0])
82
+
83
+ tm.assert_series_equal(expected, g.ngroup())
84
+ tm.assert_series_equal(expected, sg.ngroup())
85
+
86
+ def test_ngroup_distinct(self):
87
+ df = DataFrame({"A": list("abcde")})
88
+ g = df.groupby("A")
89
+ sg = g.A
90
+
91
+ expected = Series(range(5), dtype="int64")
92
+
93
+ tm.assert_series_equal(expected, g.ngroup())
94
+ tm.assert_series_equal(expected, sg.ngroup())
95
+
96
+ def test_ngroup_one_group(self):
97
+ df = DataFrame({"A": [0] * 5})
98
+ g = df.groupby("A")
99
+ sg = g.A
100
+
101
+ expected = Series([0] * 5)
102
+
103
+ tm.assert_series_equal(expected, g.ngroup())
104
+ tm.assert_series_equal(expected, sg.ngroup())
105
+
106
+ def test_ngroup_empty(self):
107
+ ge = DataFrame().groupby(level=0)
108
+ se = Series(dtype=object).groupby(level=0)
109
+
110
+ # edge case, as this is usually considered float
111
+ e = Series(dtype="int64")
112
+
113
+ tm.assert_series_equal(e, ge.ngroup())
114
+ tm.assert_series_equal(e, se.ngroup())
115
+
116
+ def test_ngroup_series_matches_frame(self):
117
+ df = DataFrame({"A": list("aaaba")})
118
+ s = Series(list("aaaba"))
119
+
120
+ tm.assert_series_equal(df.groupby(s).ngroup(), s.groupby(s).ngroup())
121
+
122
+ def test_ngroup_dupe_index(self):
123
+ df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
124
+ g = df.groupby("A")
125
+ sg = g.A
126
+
127
+ expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
128
+
129
+ tm.assert_series_equal(expected, g.ngroup())
130
+ tm.assert_series_equal(expected, sg.ngroup())
131
+
132
+ def test_ngroup_mi(self):
133
+ mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
134
+ df = DataFrame({"A": list("aaaba")}, index=mi)
135
+ g = df.groupby("A")
136
+ sg = g.A
137
+ expected = Series([0, 0, 0, 1, 0], index=mi)
138
+
139
+ tm.assert_series_equal(expected, g.ngroup())
140
+ tm.assert_series_equal(expected, sg.ngroup())
141
+
142
+ def test_ngroup_groupby_not_col(self):
143
+ df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
144
+ g = df.groupby([0, 0, 0, 1, 0])
145
+ sg = g.A
146
+
147
+ expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
148
+
149
+ tm.assert_series_equal(expected, g.ngroup())
150
+ tm.assert_series_equal(expected, sg.ngroup())
151
+
152
+ def test_ngroup_descending(self):
153
+ df = DataFrame(["a", "a", "b", "a", "b"], columns=["A"])
154
+ g = df.groupby(["A"])
155
+
156
+ ascending = Series([0, 0, 1, 0, 1])
157
+ descending = Series([1, 1, 0, 1, 0])
158
+
159
+ tm.assert_series_equal(descending, (g.ngroups - 1) - ascending)
160
+ tm.assert_series_equal(ascending, g.ngroup(ascending=True))
161
+ tm.assert_series_equal(descending, g.ngroup(ascending=False))
162
+
163
+ def test_ngroup_matches_cumcount(self):
164
+ # verify one manually-worked out case works
165
+ df = DataFrame(
166
+ [["a", "x"], ["a", "y"], ["b", "x"], ["a", "x"], ["b", "y"]],
167
+ columns=["A", "X"],
168
+ )
169
+ g = df.groupby(["A", "X"])
170
+ g_ngroup = g.ngroup()
171
+ g_cumcount = g.cumcount()
172
+ expected_ngroup = Series([0, 1, 2, 0, 3])
173
+ expected_cumcount = Series([0, 0, 0, 1, 0])
174
+
175
+ tm.assert_series_equal(g_ngroup, expected_ngroup)
176
+ tm.assert_series_equal(g_cumcount, expected_cumcount)
177
+
178
+ def test_ngroup_cumcount_pair(self):
179
+ # brute force comparison for all small series
180
+ for p in product(range(3), repeat=4):
181
+ df = DataFrame({"a": p})
182
+ g = df.groupby(["a"])
183
+
184
+ order = sorted(set(p))
185
+ ngroupd = [order.index(val) for val in p]
186
+ cumcounted = [p[:i].count(val) for i, val in enumerate(p)]
187
+
188
+ tm.assert_series_equal(g.ngroup(), Series(ngroupd))
189
+ tm.assert_series_equal(g.cumcount(), Series(cumcounted))
190
+
191
+ def test_ngroup_respects_groupby_order(self, sort):
192
+ np.random.seed(0)
193
+ df = DataFrame({"a": np.random.choice(list("abcdef"), 100)})
194
+ g = df.groupby("a", sort=sort)
195
+ df["group_id"] = -1
196
+ df["group_index"] = -1
197
+
198
+ for i, (_, group) in enumerate(g):
199
+ df.loc[group.index, "group_id"] = i
200
+ for j, ind in enumerate(group.index):
201
+ df.loc[ind, "group_index"] = j
202
+
203
+ tm.assert_series_equal(Series(df["group_id"].values), g.ngroup())
204
+ tm.assert_series_equal(Series(df["group_index"].values), g.cumcount())
205
+
206
+ @pytest.mark.parametrize(
207
+ "datetimelike",
208
+ [
209
+ [Timestamp(f"2016-05-{i:02d} 20:09:25+00:00") for i in range(1, 4)],
210
+ [Timestamp(f"2016-05-{i:02d} 20:09:25") for i in range(1, 4)],
211
+ [Timestamp(f"2016-05-{i:02d} 20:09:25", tz="UTC") for i in range(1, 4)],
212
+ [Timedelta(x, unit="h") for x in range(1, 4)],
213
+ [Period(freq="2W", year=2017, month=x) for x in range(1, 4)],
214
+ ],
215
+ )
216
+ def test_count_with_datetimelike(self, datetimelike):
217
+ # test for #13393, where DataframeGroupBy.count() fails
218
+ # when counting a datetimelike column.
219
+
220
+ df = DataFrame({"x": ["a", "a", "b"], "y": datetimelike})
221
+ res = df.groupby("x").count()
222
+ expected = DataFrame({"y": [2, 1]}, index=["a", "b"])
223
+ expected.index.name = "x"
224
+ tm.assert_frame_equal(expected, res)
225
+
226
+ def test_count_with_only_nans_in_first_group(self):
227
+ # GH21956
228
+ df = DataFrame({"A": [np.nan, np.nan], "B": ["a", "b"], "C": [1, 2]})
229
+ result = df.groupby(["A", "B"]).C.count()
230
+ mi = MultiIndex(levels=[[], ["a", "b"]], codes=[[], []], names=["A", "B"])
231
+ expected = Series([], index=mi, dtype=np.int64, name="C")
232
+ tm.assert_series_equal(result, expected, check_index_type=False)
233
+
234
+ def test_count_groupby_column_with_nan_in_groupby_column(self):
235
+ # https://github.com/pandas-dev/pandas/issues/32841
236
+ df = DataFrame({"A": [1, 1, 1, 1, 1], "B": [5, 4, np.NaN, 3, 0]})
237
+ res = df.groupby(["B"]).count()
238
+ expected = DataFrame(
239
+ index=Index([0.0, 3.0, 4.0, 5.0], name="B"), data={"A": [1, 1, 1, 1]}
240
+ )
241
+ tm.assert_frame_equal(expected, res)
242
+
243
+ def test_groupby_count_dateparseerror(self):
244
+ dr = date_range(start="1/1/2012", freq="5min", periods=10)
245
+
246
+ # BAD Example, datetimes first
247
+ ser = Series(np.arange(10), index=[dr, np.arange(10)])
248
+ grouped = ser.groupby(lambda x: x[1] % 2 == 0)
249
+ result = grouped.count()
250
+
251
+ ser = Series(np.arange(10), index=[np.arange(10), dr])
252
+ grouped = ser.groupby(lambda x: x[0] % 2 == 0)
253
+ expected = grouped.count()
254
+
255
+ tm.assert_series_equal(result, expected)
256
+
257
+
258
+ def test_groupby_timedelta_cython_count():
259
+ df = DataFrame(
260
+ {"g": list("ab" * 2), "delt": np.arange(4).astype("timedelta64[ns]")}
261
+ )
262
+ expected = Series([2, 2], index=Index(["a", "b"], name="g"), name="delt")
263
+ result = df.groupby("g").delt.count()
264
+ tm.assert_series_equal(expected, result)
265
+
266
+
267
+ def test_count():
268
+ n = 1 << 15
269
+ dr = date_range("2015-08-30", periods=n // 10, freq="T")
270
+
271
+ df = DataFrame(
272
+ {
273
+ "1st": np.random.choice(list(ascii_lowercase), n),
274
+ "2nd": np.random.randint(0, 5, n),
275
+ "3rd": np.random.randn(n).round(3),
276
+ "4th": np.random.randint(-10, 10, n),
277
+ "5th": np.random.choice(dr, n),
278
+ "6th": np.random.randn(n).round(3),
279
+ "7th": np.random.randn(n).round(3),
280
+ "8th": np.random.choice(dr, n) - np.random.choice(dr, 1),
281
+ "9th": np.random.choice(list(ascii_lowercase), n),
282
+ }
283
+ )
284
+
285
+ for col in df.columns.drop(["1st", "2nd", "4th"]):
286
+ df.loc[np.random.choice(n, n // 10), col] = np.nan
287
+
288
+ df["9th"] = df["9th"].astype("category")
289
+
290
+ for key in ["1st", "2nd", ["1st", "2nd"]]:
291
+ left = df.groupby(key).count()
292
+ right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
293
+ tm.assert_frame_equal(left, right)
294
+
295
+
296
+ def test_count_non_nulls():
297
+ # GH#5610
298
+ # count counts non-nulls
299
+ df = DataFrame(
300
+ [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, np.nan]],
301
+ columns=["A", "B", "C"],
302
+ )
303
+
304
+ count_as = df.groupby("A").count()
305
+ count_not_as = df.groupby("A", as_index=False).count()
306
+
307
+ expected = DataFrame([[1, 2], [0, 0]], columns=["B", "C"], index=[1, 3])
308
+ expected.index.name = "A"
309
+ tm.assert_frame_equal(count_not_as, expected.reset_index())
310
+ tm.assert_frame_equal(count_as, expected)
311
+
312
+ count_B = df.groupby("A")["B"].count()
313
+ tm.assert_series_equal(count_B, expected["B"])
314
+
315
+
316
+ def test_count_object():
317
+ df = DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3})
318
+ result = df.groupby("c").a.count()
319
+ expected = Series([3, 3], index=Index([2, 3], name="c"), name="a")
320
+ tm.assert_series_equal(result, expected)
321
+
322
+ df = DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3})
323
+ result = df.groupby("c").a.count()
324
+ expected = Series([1, 3], index=Index([2, 3], name="c"), name="a")
325
+ tm.assert_series_equal(result, expected)
326
+
327
+
328
+ def test_count_cross_type():
329
+ # GH8169
330
+ vals = np.hstack(
331
+ (np.random.randint(0, 5, (100, 2)), np.random.randint(0, 2, (100, 2)))
332
+ )
333
+
334
+ df = DataFrame(vals, columns=["a", "b", "c", "d"])
335
+ df[df == 2] = np.nan
336
+ expected = df.groupby(["c", "d"]).count()
337
+
338
+ for t in ["float32", "object"]:
339
+ df["a"] = df["a"].astype(t)
340
+ df["b"] = df["b"].astype(t)
341
+ result = df.groupby(["c", "d"]).count()
342
+ tm.assert_frame_equal(result, expected)
343
+
344
+
345
+ def test_lower_int_prec_count():
346
+ df = DataFrame(
347
+ {
348
+ "a": np.array([0, 1, 2, 100], np.int8),
349
+ "b": np.array([1, 2, 3, 6], np.uint32),
350
+ "c": np.array([4, 5, 6, 8], np.int16),
351
+ "grp": list("ab" * 2),
352
+ }
353
+ )
354
+ result = df.groupby("grp").count()
355
+ expected = DataFrame(
356
+ {"a": [2, 2], "b": [2, 2], "c": [2, 2]}, index=Index(list("ab"), name="grp")
357
+ )
358
+ tm.assert_frame_equal(result, expected)
359
+
360
+
361
+ def test_count_uses_size_on_exception():
362
+ class RaisingObjectException(Exception):
363
+ pass
364
+
365
+ class RaisingObject:
366
+ def __init__(self, msg="I will raise inside Cython") -> None:
367
+ super().__init__()
368
+ self.msg = msg
369
+
370
+ def __eq__(self, other):
371
+ # gets called in Cython to check that raising calls the method
372
+ raise RaisingObjectException(self.msg)
373
+
374
+ df = DataFrame({"a": [RaisingObject() for _ in range(4)], "grp": list("ab" * 2)})
375
+ result = df.groupby("grp").count()
376
+ expected = DataFrame({"a": [2, 2]}, index=Index(list("ab"), name="grp"))
377
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_filters.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from string import ascii_lowercase
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ DataFrame,
9
+ Series,
10
+ Timestamp,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+
15
+ def test_filter_series():
16
+ s = Series([1, 3, 20, 5, 22, 24, 7])
17
+ expected_odd = Series([1, 3, 5, 7], index=[0, 1, 3, 6])
18
+ expected_even = Series([20, 22, 24], index=[2, 4, 5])
19
+ grouper = s.apply(lambda x: x % 2)
20
+ grouped = s.groupby(grouper)
21
+ tm.assert_series_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)
22
+ tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)
23
+ # Test dropna=False.
24
+ tm.assert_series_equal(
25
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
26
+ expected_odd.reindex(s.index),
27
+ )
28
+ tm.assert_series_equal(
29
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
30
+ expected_even.reindex(s.index),
31
+ )
32
+
33
+
34
+ def test_filter_single_column_df():
35
+ df = DataFrame([1, 3, 20, 5, 22, 24, 7])
36
+ expected_odd = DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
37
+ expected_even = DataFrame([20, 22, 24], index=[2, 4, 5])
38
+ grouper = df[0].apply(lambda x: x % 2)
39
+ grouped = df.groupby(grouper)
40
+ tm.assert_frame_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)
41
+ tm.assert_frame_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)
42
+ # Test dropna=False.
43
+ tm.assert_frame_equal(
44
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
45
+ expected_odd.reindex(df.index),
46
+ )
47
+ tm.assert_frame_equal(
48
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
49
+ expected_even.reindex(df.index),
50
+ )
51
+
52
+
53
+ def test_filter_multi_column_df():
54
+ df = DataFrame({"A": [1, 12, 12, 1], "B": [1, 1, 1, 1]})
55
+ grouper = df["A"].apply(lambda x: x % 2)
56
+ grouped = df.groupby(grouper)
57
+ expected = DataFrame({"A": [12, 12], "B": [1, 1]}, index=[1, 2])
58
+ tm.assert_frame_equal(
59
+ grouped.filter(lambda x: x["A"].sum() - x["B"].sum() > 10), expected
60
+ )
61
+
62
+
63
+ def test_filter_mixed_df():
64
+ df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
65
+ grouper = df["A"].apply(lambda x: x % 2)
66
+ grouped = df.groupby(grouper)
67
+ expected = DataFrame({"A": [12, 12], "B": ["b", "c"]}, index=[1, 2])
68
+ tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 10), expected)
69
+
70
+
71
+ def test_filter_out_all_groups():
72
+ s = Series([1, 3, 20, 5, 22, 24, 7])
73
+ grouper = s.apply(lambda x: x % 2)
74
+ grouped = s.groupby(grouper)
75
+ tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
76
+ df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
77
+ grouper = df["A"].apply(lambda x: x % 2)
78
+ grouped = df.groupby(grouper)
79
+ tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 1000), df.loc[[]])
80
+
81
+
82
+ def test_filter_out_no_groups():
83
+ s = Series([1, 3, 20, 5, 22, 24, 7])
84
+ grouper = s.apply(lambda x: x % 2)
85
+ grouped = s.groupby(grouper)
86
+ filtered = grouped.filter(lambda x: x.mean() > 0)
87
+ tm.assert_series_equal(filtered, s)
88
+ df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
89
+ grouper = df["A"].apply(lambda x: x % 2)
90
+ grouped = df.groupby(grouper)
91
+ filtered = grouped.filter(lambda x: x["A"].mean() > 0)
92
+ tm.assert_frame_equal(filtered, df)
93
+
94
+
95
+ def test_filter_out_all_groups_in_df():
96
+ # GH12768
97
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})
98
+ res = df.groupby("a")
99
+ res = res.filter(lambda x: x["b"].sum() > 5, dropna=False)
100
+ expected = DataFrame({"a": [np.nan] * 3, "b": [np.nan] * 3})
101
+ tm.assert_frame_equal(expected, res)
102
+
103
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})
104
+ res = df.groupby("a")
105
+ res = res.filter(lambda x: x["b"].sum() > 5, dropna=True)
106
+ expected = DataFrame({"a": [], "b": []}, dtype="int64")
107
+ tm.assert_frame_equal(expected, res)
108
+
109
+
110
+ def test_filter_condition_raises():
111
+ def raise_if_sum_is_zero(x):
112
+ if x.sum() == 0:
113
+ raise ValueError
114
+ return x.sum() > 0
115
+
116
+ s = Series([-1, 0, 1, 2])
117
+ grouper = s.apply(lambda x: x % 2)
118
+ grouped = s.groupby(grouper)
119
+ msg = "the filter must return a boolean result"
120
+ with pytest.raises(TypeError, match=msg):
121
+ grouped.filter(raise_if_sum_is_zero)
122
+
123
+
124
+ def test_filter_with_axis_in_groupby():
125
+ # issue 11041
126
+ index = pd.MultiIndex.from_product([range(10), [0, 1]])
127
+ data = DataFrame(np.arange(100).reshape(-1, 20), columns=index, dtype="int64")
128
+ result = data.groupby(level=0, axis=1).filter(lambda x: x.iloc[0, 0] > 10)
129
+ expected = data.iloc[:, 12:20]
130
+ tm.assert_frame_equal(result, expected)
131
+
132
+
133
+ def test_filter_bad_shapes():
134
+ df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
135
+ s = df["B"]
136
+ g_df = df.groupby("B")
137
+ g_s = s.groupby(s)
138
+
139
+ f = lambda x: x
140
+ msg = "filter function returned a DataFrame, but expected a scalar bool"
141
+ with pytest.raises(TypeError, match=msg):
142
+ g_df.filter(f)
143
+ msg = "the filter must return a boolean result"
144
+ with pytest.raises(TypeError, match=msg):
145
+ g_s.filter(f)
146
+
147
+ f = lambda x: x == 1
148
+ msg = "filter function returned a DataFrame, but expected a scalar bool"
149
+ with pytest.raises(TypeError, match=msg):
150
+ g_df.filter(f)
151
+ msg = "the filter must return a boolean result"
152
+ with pytest.raises(TypeError, match=msg):
153
+ g_s.filter(f)
154
+
155
+ f = lambda x: np.outer(x, x)
156
+ msg = "can't multiply sequence by non-int of type 'str'"
157
+ with pytest.raises(TypeError, match=msg):
158
+ g_df.filter(f)
159
+ msg = "the filter must return a boolean result"
160
+ with pytest.raises(TypeError, match=msg):
161
+ g_s.filter(f)
162
+
163
+
164
+ def test_filter_nan_is_false():
165
+ df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
166
+ s = df["B"]
167
+ g_df = df.groupby(df["B"])
168
+ g_s = s.groupby(s)
169
+
170
+ f = lambda x: np.nan
171
+ tm.assert_frame_equal(g_df.filter(f), df.loc[[]])
172
+ tm.assert_series_equal(g_s.filter(f), s[[]])
173
+
174
+
175
+ def test_filter_pdna_is_false():
176
+ # in particular, dont raise in filter trying to call bool(pd.NA)
177
+ df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
178
+ ser = df["B"]
179
+ g_df = df.groupby(df["B"])
180
+ g_s = ser.groupby(ser)
181
+
182
+ func = lambda x: pd.NA
183
+ res = g_df.filter(func)
184
+ tm.assert_frame_equal(res, df.loc[[]])
185
+ res = g_s.filter(func)
186
+ tm.assert_series_equal(res, ser[[]])
187
+
188
+
189
+ def test_filter_against_workaround():
190
+ np.random.seed(0)
191
+ # Series of ints
192
+ s = Series(np.random.randint(0, 100, 1000))
193
+ grouper = s.apply(lambda x: np.round(x, -1))
194
+ grouped = s.groupby(grouper)
195
+ f = lambda x: x.mean() > 10
196
+
197
+ old_way = s[grouped.transform(f).astype("bool")]
198
+ new_way = grouped.filter(f)
199
+ tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
200
+
201
+ # Series of floats
202
+ s = 100 * Series(np.random.random(1000))
203
+ grouper = s.apply(lambda x: np.round(x, -1))
204
+ grouped = s.groupby(grouper)
205
+ f = lambda x: x.mean() > 10
206
+ old_way = s[grouped.transform(f).astype("bool")]
207
+ new_way = grouped.filter(f)
208
+ tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
209
+
210
+ # Set up DataFrame of ints, floats, strings.
211
+ letters = np.array(list(ascii_lowercase))
212
+ N = 1000
213
+ random_letters = letters.take(np.random.randint(0, 26, N))
214
+ df = DataFrame(
215
+ {
216
+ "ints": Series(np.random.randint(0, 100, N)),
217
+ "floats": N / 10 * Series(np.random.random(N)),
218
+ "letters": Series(random_letters),
219
+ }
220
+ )
221
+
222
+ # Group by ints; filter on floats.
223
+ grouped = df.groupby("ints")
224
+ old_way = df[grouped.floats.transform(lambda x: x.mean() > N / 20).astype("bool")]
225
+ new_way = grouped.filter(lambda x: x["floats"].mean() > N / 20)
226
+ tm.assert_frame_equal(new_way, old_way)
227
+
228
+ # Group by floats (rounded); filter on strings.
229
+ grouper = df.floats.apply(lambda x: np.round(x, -1))
230
+ grouped = df.groupby(grouper)
231
+ old_way = df[grouped.letters.transform(lambda x: len(x) < N / 10).astype("bool")]
232
+ new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
233
+ tm.assert_frame_equal(new_way, old_way)
234
+
235
+ # Group by strings; filter on ints.
236
+ grouped = df.groupby("letters")
237
+ old_way = df[grouped.ints.transform(lambda x: x.mean() > N / 20).astype("bool")]
238
+ new_way = grouped.filter(lambda x: x["ints"].mean() > N / 20)
239
+ tm.assert_frame_equal(new_way, old_way)
240
+
241
+
242
+ def test_filter_using_len():
243
+ # BUG GH4447
244
+ df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
245
+ grouped = df.groupby("B")
246
+ actual = grouped.filter(lambda x: len(x) > 2)
247
+ expected = DataFrame(
248
+ {"A": np.arange(2, 6), "B": list("bbbb"), "C": np.arange(2, 6)},
249
+ index=np.arange(2, 6, dtype=np.int64),
250
+ )
251
+ tm.assert_frame_equal(actual, expected)
252
+
253
+ actual = grouped.filter(lambda x: len(x) > 4)
254
+ expected = df.loc[[]]
255
+ tm.assert_frame_equal(actual, expected)
256
+
257
+ # Series have always worked properly, but we'll test anyway.
258
+ s = df["B"]
259
+ grouped = s.groupby(s)
260
+ actual = grouped.filter(lambda x: len(x) > 2)
261
+ expected = Series(4 * ["b"], index=np.arange(2, 6, dtype=np.int64), name="B")
262
+ tm.assert_series_equal(actual, expected)
263
+
264
+ actual = grouped.filter(lambda x: len(x) > 4)
265
+ expected = s[[]]
266
+ tm.assert_series_equal(actual, expected)
267
+
268
+
269
+ def test_filter_maintains_ordering():
270
+ # Simple case: index is sequential. #4621
271
+ df = DataFrame(
272
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}
273
+ )
274
+ s = df["pid"]
275
+ grouped = df.groupby("tag")
276
+ actual = grouped.filter(lambda x: len(x) > 1)
277
+ expected = df.iloc[[1, 2, 4, 7]]
278
+ tm.assert_frame_equal(actual, expected)
279
+
280
+ grouped = s.groupby(df["tag"])
281
+ actual = grouped.filter(lambda x: len(x) > 1)
282
+ expected = s.iloc[[1, 2, 4, 7]]
283
+ tm.assert_series_equal(actual, expected)
284
+
285
+ # Now index is sequentially decreasing.
286
+ df.index = np.arange(len(df) - 1, -1, -1)
287
+ s = df["pid"]
288
+ grouped = df.groupby("tag")
289
+ actual = grouped.filter(lambda x: len(x) > 1)
290
+ expected = df.iloc[[1, 2, 4, 7]]
291
+ tm.assert_frame_equal(actual, expected)
292
+
293
+ grouped = s.groupby(df["tag"])
294
+ actual = grouped.filter(lambda x: len(x) > 1)
295
+ expected = s.iloc[[1, 2, 4, 7]]
296
+ tm.assert_series_equal(actual, expected)
297
+
298
+ # Index is shuffled.
299
+ SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
300
+ df.index = df.index[SHUFFLED]
301
+ s = df["pid"]
302
+ grouped = df.groupby("tag")
303
+ actual = grouped.filter(lambda x: len(x) > 1)
304
+ expected = df.iloc[[1, 2, 4, 7]]
305
+ tm.assert_frame_equal(actual, expected)
306
+
307
+ grouped = s.groupby(df["tag"])
308
+ actual = grouped.filter(lambda x: len(x) > 1)
309
+ expected = s.iloc[[1, 2, 4, 7]]
310
+ tm.assert_series_equal(actual, expected)
311
+
312
+
313
+ def test_filter_multiple_timestamp():
314
+ # GH 10114
315
+ df = DataFrame(
316
+ {
317
+ "A": np.arange(5, dtype="int64"),
318
+ "B": ["foo", "bar", "foo", "bar", "bar"],
319
+ "C": Timestamp("20130101"),
320
+ }
321
+ )
322
+
323
+ grouped = df.groupby(["B", "C"])
324
+
325
+ result = grouped["A"].filter(lambda x: True)
326
+ tm.assert_series_equal(df["A"], result)
327
+
328
+ result = grouped["A"].transform(len)
329
+ expected = Series([2, 3, 2, 3, 3], name="A")
330
+ tm.assert_series_equal(result, expected)
331
+
332
+ result = grouped.filter(lambda x: True)
333
+ tm.assert_frame_equal(df, result)
334
+
335
+ result = grouped.transform("sum")
336
+ expected = DataFrame({"A": [2, 8, 2, 8, 8]})
337
+ tm.assert_frame_equal(result, expected)
338
+
339
+ result = grouped.transform(len)
340
+ expected = DataFrame({"A": [2, 3, 2, 3, 3]})
341
+ tm.assert_frame_equal(result, expected)
342
+
343
+
344
+ def test_filter_and_transform_with_non_unique_int_index():
345
+ # GH4620
346
+ index = [1, 1, 1, 2, 1, 1, 0, 1]
347
+ df = DataFrame(
348
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
349
+ index=index,
350
+ )
351
+ grouped_df = df.groupby("tag")
352
+ ser = df["pid"]
353
+ grouped_ser = ser.groupby(df["tag"])
354
+ expected_indexes = [1, 2, 4, 7]
355
+
356
+ # Filter DataFrame
357
+ actual = grouped_df.filter(lambda x: len(x) > 1)
358
+ expected = df.iloc[expected_indexes]
359
+ tm.assert_frame_equal(actual, expected)
360
+
361
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
362
+ expected = df.copy()
363
+ expected.iloc[[0, 3, 5, 6]] = np.nan
364
+ tm.assert_frame_equal(actual, expected)
365
+
366
+ # Filter Series
367
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
368
+ expected = ser.take(expected_indexes)
369
+ tm.assert_series_equal(actual, expected)
370
+
371
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
372
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
373
+ # ^ made manually because this can get confusing!
374
+ tm.assert_series_equal(actual, expected)
375
+
376
+ # Transform Series
377
+ actual = grouped_ser.transform(len)
378
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
379
+ tm.assert_series_equal(actual, expected)
380
+
381
+ # Transform (a column from) DataFrameGroupBy
382
+ actual = grouped_df.pid.transform(len)
383
+ tm.assert_series_equal(actual, expected)
384
+
385
+
386
+ def test_filter_and_transform_with_multiple_non_unique_int_index():
387
+ # GH4620
388
+ index = [1, 1, 1, 2, 0, 0, 0, 1]
389
+ df = DataFrame(
390
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
391
+ index=index,
392
+ )
393
+ grouped_df = df.groupby("tag")
394
+ ser = df["pid"]
395
+ grouped_ser = ser.groupby(df["tag"])
396
+ expected_indexes = [1, 2, 4, 7]
397
+
398
+ # Filter DataFrame
399
+ actual = grouped_df.filter(lambda x: len(x) > 1)
400
+ expected = df.iloc[expected_indexes]
401
+ tm.assert_frame_equal(actual, expected)
402
+
403
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
404
+ expected = df.copy()
405
+ expected.iloc[[0, 3, 5, 6]] = np.nan
406
+ tm.assert_frame_equal(actual, expected)
407
+
408
+ # Filter Series
409
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
410
+ expected = ser.take(expected_indexes)
411
+ tm.assert_series_equal(actual, expected)
412
+
413
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
414
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
415
+ # ^ made manually because this can get confusing!
416
+ tm.assert_series_equal(actual, expected)
417
+
418
+ # Transform Series
419
+ actual = grouped_ser.transform(len)
420
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
421
+ tm.assert_series_equal(actual, expected)
422
+
423
+ # Transform (a column from) DataFrameGroupBy
424
+ actual = grouped_df.pid.transform(len)
425
+ tm.assert_series_equal(actual, expected)
426
+
427
+
428
+ def test_filter_and_transform_with_non_unique_float_index():
429
+ # GH4620
430
+ index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
431
+ df = DataFrame(
432
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
433
+ index=index,
434
+ )
435
+ grouped_df = df.groupby("tag")
436
+ ser = df["pid"]
437
+ grouped_ser = ser.groupby(df["tag"])
438
+ expected_indexes = [1, 2, 4, 7]
439
+
440
+ # Filter DataFrame
441
+ actual = grouped_df.filter(lambda x: len(x) > 1)
442
+ expected = df.iloc[expected_indexes]
443
+ tm.assert_frame_equal(actual, expected)
444
+
445
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
446
+ expected = df.copy()
447
+ expected.iloc[[0, 3, 5, 6]] = np.nan
448
+ tm.assert_frame_equal(actual, expected)
449
+
450
+ # Filter Series
451
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
452
+ expected = ser.take(expected_indexes)
453
+ tm.assert_series_equal(actual, expected)
454
+
455
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
456
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
457
+ # ^ made manually because this can get confusing!
458
+ tm.assert_series_equal(actual, expected)
459
+
460
+ # Transform Series
461
+ actual = grouped_ser.transform(len)
462
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
463
+ tm.assert_series_equal(actual, expected)
464
+
465
+ # Transform (a column from) DataFrameGroupBy
466
+ actual = grouped_df.pid.transform(len)
467
+ tm.assert_series_equal(actual, expected)
468
+
469
+
470
+ def test_filter_and_transform_with_non_unique_timestamp_index():
471
+ # GH4620
472
+ t0 = Timestamp("2013-09-30 00:05:00")
473
+ t1 = Timestamp("2013-10-30 00:05:00")
474
+ t2 = Timestamp("2013-11-30 00:05:00")
475
+ index = [t1, t1, t1, t2, t1, t1, t0, t1]
476
+ df = DataFrame(
477
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
478
+ index=index,
479
+ )
480
+ grouped_df = df.groupby("tag")
481
+ ser = df["pid"]
482
+ grouped_ser = ser.groupby(df["tag"])
483
+ expected_indexes = [1, 2, 4, 7]
484
+
485
+ # Filter DataFrame
486
+ actual = grouped_df.filter(lambda x: len(x) > 1)
487
+ expected = df.iloc[expected_indexes]
488
+ tm.assert_frame_equal(actual, expected)
489
+
490
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
491
+ expected = df.copy()
492
+ expected.iloc[[0, 3, 5, 6]] = np.nan
493
+ tm.assert_frame_equal(actual, expected)
494
+
495
+ # Filter Series
496
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
497
+ expected = ser.take(expected_indexes)
498
+ tm.assert_series_equal(actual, expected)
499
+
500
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
501
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
502
+ # ^ made manually because this can get confusing!
503
+ tm.assert_series_equal(actual, expected)
504
+
505
+ # Transform Series
506
+ actual = grouped_ser.transform(len)
507
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
508
+ tm.assert_series_equal(actual, expected)
509
+
510
+ # Transform (a column from) DataFrameGroupBy
511
+ actual = grouped_df.pid.transform(len)
512
+ tm.assert_series_equal(actual, expected)
513
+
514
+
515
+ def test_filter_and_transform_with_non_unique_string_index():
516
+ # GH4620
517
+ index = list("bbbcbbab")
518
+ df = DataFrame(
519
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
520
+ index=index,
521
+ )
522
+ grouped_df = df.groupby("tag")
523
+ ser = df["pid"]
524
+ grouped_ser = ser.groupby(df["tag"])
525
+ expected_indexes = [1, 2, 4, 7]
526
+
527
+ # Filter DataFrame
528
+ actual = grouped_df.filter(lambda x: len(x) > 1)
529
+ expected = df.iloc[expected_indexes]
530
+ tm.assert_frame_equal(actual, expected)
531
+
532
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
533
+ expected = df.copy()
534
+ expected.iloc[[0, 3, 5, 6]] = np.nan
535
+ tm.assert_frame_equal(actual, expected)
536
+
537
+ # Filter Series
538
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
539
+ expected = ser.take(expected_indexes)
540
+ tm.assert_series_equal(actual, expected)
541
+
542
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
543
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
544
+ # ^ made manually because this can get confusing!
545
+ tm.assert_series_equal(actual, expected)
546
+
547
+ # Transform Series
548
+ actual = grouped_ser.transform(len)
549
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
550
+ tm.assert_series_equal(actual, expected)
551
+
552
+ # Transform (a column from) DataFrameGroupBy
553
+ actual = grouped_df.pid.transform(len)
554
+ tm.assert_series_equal(actual, expected)
555
+
556
+
557
+ def test_filter_has_access_to_grouped_cols():
558
+ df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=["A", "B"])
559
+ g = df.groupby("A")
560
+ # previously didn't have access to col A #????
561
+ filt = g.filter(lambda x: x["A"].sum() == 2)
562
+ tm.assert_frame_equal(filt, df.iloc[[0, 1]])
563
+
564
+
565
+ def test_filter_enforces_scalarness():
566
+ df = DataFrame(
567
+ [
568
+ ["best", "a", "x"],
569
+ ["worst", "b", "y"],
570
+ ["best", "c", "x"],
571
+ ["best", "d", "y"],
572
+ ["worst", "d", "y"],
573
+ ["worst", "d", "y"],
574
+ ["best", "d", "z"],
575
+ ],
576
+ columns=["a", "b", "c"],
577
+ )
578
+ with pytest.raises(TypeError, match="filter function returned a.*"):
579
+ df.groupby("c").filter(lambda g: g["a"] == "best")
580
+
581
+
582
+ def test_filter_non_bool_raises():
583
+ df = DataFrame(
584
+ [
585
+ ["best", "a", 1],
586
+ ["worst", "b", 1],
587
+ ["best", "c", 1],
588
+ ["best", "d", 1],
589
+ ["worst", "d", 1],
590
+ ["worst", "d", 1],
591
+ ["best", "d", 1],
592
+ ],
593
+ columns=["a", "b", "c"],
594
+ )
595
+ with pytest.raises(TypeError, match="filter function returned a.*"):
596
+ df.groupby("a").filter(lambda g: g.c.mean())
597
+
598
+
599
+ def test_filter_dropna_with_empty_groups():
600
+ # GH 10780
601
+ data = Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
602
+ groupped = data.groupby(level=0)
603
+ result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False)
604
+ expected_false = Series([np.nan] * 9, index=np.repeat([1, 2, 3], 3))
605
+ tm.assert_series_equal(result_false, expected_false)
606
+
607
+ result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True)
608
+ expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64)
609
+ tm.assert_series_equal(result_true, expected_true)
610
+
611
+
612
+ def test_filter_consistent_result_before_after_agg_func():
613
+ # GH 17091
614
+ df = DataFrame({"data": range(6), "key": list("ABCABC")})
615
+ grouper = df.groupby("key")
616
+ result = grouper.filter(lambda x: True)
617
+ expected = DataFrame({"data": range(6), "key": list("ABCABC")})
618
+ tm.assert_frame_equal(result, expected)
619
+
620
+ grouper.sum()
621
+ result = grouper.filter(lambda x: True)
622
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_function.py ADDED
@@ -0,0 +1,1637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ from io import StringIO
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas._libs import lib
8
+ from pandas.errors import UnsupportedFunctionCall
9
+
10
+ import pandas as pd
11
+ from pandas import (
12
+ DataFrame,
13
+ Index,
14
+ MultiIndex,
15
+ Series,
16
+ Timestamp,
17
+ date_range,
18
+ )
19
+ import pandas._testing as tm
20
+ from pandas.core import nanops
21
+ from pandas.tests.groupby import get_groupby_method_args
22
+ from pandas.util import _test_decorators as td
23
+
24
+
25
+ @pytest.fixture(
26
+ params=[np.int32, np.int64, np.float32, np.float64, "Int64", "Float64"],
27
+ ids=["np.int32", "np.int64", "np.float32", "np.float64", "Int64", "Float64"],
28
+ )
29
+ def dtypes_for_minmax(request):
30
+ """
31
+ Fixture of dtypes with min and max values used for testing
32
+ cummin and cummax
33
+ """
34
+ dtype = request.param
35
+
36
+ np_type = dtype
37
+ if dtype == "Int64":
38
+ np_type = np.int64
39
+ elif dtype == "Float64":
40
+ np_type = np.float64
41
+
42
+ min_val = (
43
+ np.iinfo(np_type).min
44
+ if np.dtype(np_type).kind == "i"
45
+ else np.finfo(np_type).min
46
+ )
47
+ max_val = (
48
+ np.iinfo(np_type).max
49
+ if np.dtype(np_type).kind == "i"
50
+ else np.finfo(np_type).max
51
+ )
52
+
53
+ return (dtype, min_val, max_val)
54
+
55
+
56
+ def test_intercept_builtin_sum():
57
+ s = Series([1.0, 2.0, np.nan, 3.0])
58
+ grouped = s.groupby([0, 1, 2, 2])
59
+
60
+ result = grouped.agg(builtins.sum)
61
+ result2 = grouped.apply(builtins.sum)
62
+ expected = grouped.sum()
63
+ tm.assert_series_equal(result, expected)
64
+ tm.assert_series_equal(result2, expected)
65
+
66
+
67
+ @pytest.mark.parametrize("f", [max, min, sum])
68
+ @pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
69
+ def test_builtins_apply(keys, f):
70
+ # see gh-8155
71
+ df = DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
72
+ df["jolie"] = np.random.randn(1000)
73
+
74
+ gb = df.groupby(keys)
75
+
76
+ fname = f.__name__
77
+ result = gb.apply(f)
78
+ ngroups = len(df.drop_duplicates(subset=keys))
79
+
80
+ assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
81
+ assert result.shape == (ngroups, 3), assert_msg
82
+
83
+ npfunc = lambda x: getattr(np, fname)(x, axis=0) # numpy's equivalent function
84
+ expected = gb.apply(npfunc)
85
+ tm.assert_frame_equal(result, expected)
86
+
87
+ with tm.assert_produces_warning(None):
88
+ expected2 = gb.apply(lambda x: npfunc(x))
89
+ tm.assert_frame_equal(result, expected2)
90
+
91
+ if f != sum:
92
+ expected = gb.agg(fname).reset_index()
93
+ expected.set_index(keys, inplace=True, drop=False)
94
+ tm.assert_frame_equal(result, expected, check_dtype=False)
95
+
96
+ tm.assert_series_equal(getattr(result, fname)(axis=0), getattr(df, fname)(axis=0))
97
+
98
+
99
+ class TestNumericOnly:
100
+ # make sure that we are passing thru kwargs to our agg functions
101
+
102
+ @pytest.fixture
103
+ def df(self):
104
+ # GH3668
105
+ # GH5724
106
+ df = DataFrame(
107
+ {
108
+ "group": [1, 1, 2],
109
+ "int": [1, 2, 3],
110
+ "float": [4.0, 5.0, 6.0],
111
+ "string": list("abc"),
112
+ "category_string": Series(list("abc")).astype("category"),
113
+ "category_int": [7, 8, 9],
114
+ "datetime": date_range("20130101", periods=3),
115
+ "datetimetz": date_range("20130101", periods=3, tz="US/Eastern"),
116
+ "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
117
+ },
118
+ columns=[
119
+ "group",
120
+ "int",
121
+ "float",
122
+ "string",
123
+ "category_string",
124
+ "category_int",
125
+ "datetime",
126
+ "datetimetz",
127
+ "timedelta",
128
+ ],
129
+ )
130
+ return df
131
+
132
+ @pytest.mark.parametrize("method", ["mean", "median"])
133
+ def test_averages(self, df, method):
134
+ # mean / median
135
+ expected_columns_numeric = Index(["int", "float", "category_int"])
136
+
137
+ gb = df.groupby("group")
138
+ expected = DataFrame(
139
+ {
140
+ "category_int": [7.5, 9],
141
+ "float": [4.5, 6.0],
142
+ "timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
143
+ "int": [1.5, 3],
144
+ "datetime": [
145
+ Timestamp("2013-01-01 12:00:00"),
146
+ Timestamp("2013-01-03 00:00:00"),
147
+ ],
148
+ "datetimetz": [
149
+ Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
150
+ Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
151
+ ],
152
+ },
153
+ index=Index([1, 2], name="group"),
154
+ columns=[
155
+ "int",
156
+ "float",
157
+ "category_int",
158
+ ],
159
+ )
160
+
161
+ result = getattr(gb, method)(numeric_only=True)
162
+ tm.assert_frame_equal(result.reindex_like(expected), expected)
163
+
164
+ expected_columns = expected.columns
165
+
166
+ self._check(df, method, expected_columns, expected_columns_numeric)
167
+
168
+ @pytest.mark.parametrize("method", ["min", "max"])
169
+ def test_extrema(self, df, method):
170
+ # TODO: min, max *should* handle
171
+ # categorical (ordered) dtype
172
+
173
+ expected_columns = Index(
174
+ [
175
+ "int",
176
+ "float",
177
+ "string",
178
+ "category_int",
179
+ "datetime",
180
+ "datetimetz",
181
+ "timedelta",
182
+ ]
183
+ )
184
+ expected_columns_numeric = expected_columns
185
+
186
+ self._check(df, method, expected_columns, expected_columns_numeric)
187
+
188
+ @pytest.mark.parametrize("method", ["first", "last"])
189
+ def test_first_last(self, df, method):
190
+ expected_columns = Index(
191
+ [
192
+ "int",
193
+ "float",
194
+ "string",
195
+ "category_string",
196
+ "category_int",
197
+ "datetime",
198
+ "datetimetz",
199
+ "timedelta",
200
+ ]
201
+ )
202
+ expected_columns_numeric = expected_columns
203
+
204
+ self._check(df, method, expected_columns, expected_columns_numeric)
205
+
206
+ @pytest.mark.parametrize("method", ["sum", "cumsum"])
207
+ def test_sum_cumsum(self, df, method):
208
+ expected_columns_numeric = Index(["int", "float", "category_int"])
209
+ expected_columns = Index(
210
+ ["int", "float", "string", "category_int", "timedelta"]
211
+ )
212
+ if method == "cumsum":
213
+ # cumsum loses string
214
+ expected_columns = Index(["int", "float", "category_int", "timedelta"])
215
+
216
+ self._check(df, method, expected_columns, expected_columns_numeric)
217
+
218
+ @pytest.mark.parametrize("method", ["prod", "cumprod"])
219
+ def test_prod_cumprod(self, df, method):
220
+ expected_columns = Index(["int", "float", "category_int"])
221
+ expected_columns_numeric = expected_columns
222
+
223
+ self._check(df, method, expected_columns, expected_columns_numeric)
224
+
225
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
226
+ def test_cummin_cummax(self, df, method):
227
+ # like min, max, but don't include strings
228
+ expected_columns = Index(
229
+ ["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
230
+ )
231
+
232
+ # GH#15561: numeric_only=False set by default like min/max
233
+ expected_columns_numeric = expected_columns
234
+
235
+ self._check(df, method, expected_columns, expected_columns_numeric)
236
+
237
+ def _check(self, df, method, expected_columns, expected_columns_numeric):
238
+ gb = df.groupby("group")
239
+
240
+ # object dtypes for transformations are not implemented in Cython and
241
+ # have no Python fallback
242
+ exception = NotImplementedError if method.startswith("cum") else TypeError
243
+
244
+ if method in ("min", "max", "cummin", "cummax", "cumsum", "cumprod"):
245
+ # The methods default to numeric_only=False and raise TypeError
246
+ msg = "|".join(
247
+ [
248
+ "Categorical is not ordered",
249
+ "function is not implemented for this dtype",
250
+ f"Cannot perform {method} with non-ordered Categorical",
251
+ ]
252
+ )
253
+ with pytest.raises(exception, match=msg):
254
+ getattr(gb, method)()
255
+ elif method in ("sum", "mean", "median", "prod"):
256
+ msg = "|".join(
257
+ [
258
+ "category type does not support sum operations",
259
+ "[Cc]ould not convert",
260
+ "can't multiply sequence by non-int of type 'str'",
261
+ ]
262
+ )
263
+ with pytest.raises(exception, match=msg):
264
+ getattr(gb, method)()
265
+ else:
266
+ result = getattr(gb, method)()
267
+ tm.assert_index_equal(result.columns, expected_columns_numeric)
268
+
269
+ if method not in ("first", "last"):
270
+ msg = "|".join(
271
+ [
272
+ "[Cc]ould not convert",
273
+ "Categorical is not ordered",
274
+ "category type does not support",
275
+ "can't multiply sequence",
276
+ "function is not implemented for this dtype",
277
+ f"Cannot perform {method} with non-ordered Categorical",
278
+ ]
279
+ )
280
+ with pytest.raises(exception, match=msg):
281
+ getattr(gb, method)(numeric_only=False)
282
+ else:
283
+ result = getattr(gb, method)(numeric_only=False)
284
+ tm.assert_index_equal(result.columns, expected_columns)
285
+
286
+
287
+ class TestGroupByNonCythonPaths:
288
+ # GH#5610 non-cython calls should not include the grouper
289
+ # Tests for code not expected to go through cython paths.
290
+
291
+ @pytest.fixture
292
+ def df(self):
293
+ df = DataFrame(
294
+ [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]],
295
+ columns=["A", "B", "C"],
296
+ )
297
+ return df
298
+
299
+ @pytest.fixture
300
+ def gb(self, df):
301
+ gb = df.groupby("A")
302
+ return gb
303
+
304
+ @pytest.fixture
305
+ def gni(self, df):
306
+ gni = df.groupby("A", as_index=False)
307
+ return gni
308
+
309
+ def test_describe(self, df, gb, gni):
310
+ # describe
311
+ expected_index = Index([1, 3], name="A")
312
+ expected_col = MultiIndex(
313
+ levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
314
+ codes=[[0] * 8, list(range(8))],
315
+ )
316
+ expected = DataFrame(
317
+ [
318
+ [1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
319
+ [0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
320
+ ],
321
+ index=expected_index,
322
+ columns=expected_col,
323
+ )
324
+ result = gb.describe()
325
+ tm.assert_frame_equal(result, expected)
326
+
327
+ expected = expected.reset_index()
328
+ result = gni.describe()
329
+ tm.assert_frame_equal(result, expected)
330
+
331
+
332
+ def test_cython_api2():
333
+ # this takes the fast apply path
334
+
335
+ # cumsum (GH5614)
336
+ df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
337
+ expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
338
+ result = df.groupby("A").cumsum()
339
+ tm.assert_frame_equal(result, expected)
340
+
341
+ # GH 5755 - cumsum is a transformer and should ignore as_index
342
+ result = df.groupby("A", as_index=False).cumsum()
343
+ tm.assert_frame_equal(result, expected)
344
+
345
+ # GH 13994
346
+ result = df.groupby("A").cumsum(axis=1)
347
+ expected = df.cumsum(axis=1)
348
+ tm.assert_frame_equal(result, expected)
349
+ result = df.groupby("A").cumprod(axis=1)
350
+ expected = df.cumprod(axis=1)
351
+ tm.assert_frame_equal(result, expected)
352
+
353
+
354
+ def test_cython_median():
355
+ arr = np.random.randn(1000)
356
+ arr[::2] = np.nan
357
+ df = DataFrame(arr)
358
+
359
+ labels = np.random.randint(0, 50, size=1000).astype(float)
360
+ labels[::17] = np.nan
361
+
362
+ result = df.groupby(labels).median()
363
+ exp = df.groupby(labels).agg(nanops.nanmedian)
364
+ tm.assert_frame_equal(result, exp)
365
+
366
+ df = DataFrame(np.random.randn(1000, 5))
367
+ rs = df.groupby(labels).agg(np.median)
368
+ xp = df.groupby(labels).median()
369
+ tm.assert_frame_equal(rs, xp)
370
+
371
+
372
+ def test_median_empty_bins(observed):
373
+ df = DataFrame(np.random.randint(0, 44, 500))
374
+
375
+ grps = range(0, 55, 5)
376
+ bins = pd.cut(df[0], grps)
377
+
378
+ result = df.groupby(bins, observed=observed).median()
379
+ expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
380
+ tm.assert_frame_equal(result, expected)
381
+
382
+
383
+ @pytest.mark.parametrize(
384
+ "dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
385
+ )
386
+ @pytest.mark.parametrize(
387
+ "method,data",
388
+ [
389
+ ("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
390
+ ("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
391
+ ("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
392
+ ("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
393
+ ("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
394
+ ],
395
+ )
396
+ def test_groupby_non_arithmetic_agg_types(dtype, method, data):
397
+ # GH9311, GH6620
398
+ df = DataFrame(
399
+ [{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
400
+ )
401
+
402
+ df["b"] = df.b.astype(dtype)
403
+
404
+ if "args" not in data:
405
+ data["args"] = []
406
+
407
+ if "out_type" in data:
408
+ out_type = data["out_type"]
409
+ else:
410
+ out_type = dtype
411
+
412
+ exp = data["df"]
413
+ df_out = DataFrame(exp)
414
+
415
+ df_out["b"] = df_out.b.astype(out_type)
416
+ df_out.set_index("a", inplace=True)
417
+
418
+ grpd = df.groupby("a")
419
+ t = getattr(grpd, method)(*data["args"])
420
+ tm.assert_frame_equal(t, df_out)
421
+
422
+
423
+ @pytest.mark.parametrize(
424
+ "i",
425
+ [
426
+ (
427
+ Timestamp("2011-01-15 12:50:28.502376"),
428
+ Timestamp("2011-01-20 12:50:28.593448"),
429
+ ),
430
+ (24650000000000001, 24650000000000002),
431
+ ],
432
+ )
433
+ def test_groupby_non_arithmetic_agg_int_like_precision(i):
434
+ # see gh-6620, gh-9311
435
+ df = DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
436
+
437
+ grp_exp = {
438
+ "first": {"expected": i[0]},
439
+ "last": {"expected": i[1]},
440
+ "min": {"expected": i[0]},
441
+ "max": {"expected": i[1]},
442
+ "nth": {"expected": i[1], "args": [1]},
443
+ "count": {"expected": 2},
444
+ }
445
+
446
+ for method, data in grp_exp.items():
447
+ if "args" not in data:
448
+ data["args"] = []
449
+
450
+ grouped = df.groupby("a")
451
+ res = getattr(grouped, method)(*data["args"])
452
+
453
+ assert res.iloc[0].b == data["expected"]
454
+
455
+
456
+ @pytest.mark.parametrize(
457
+ "func, values",
458
+ [
459
+ ("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
460
+ ("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
461
+ ],
462
+ )
463
+ @pytest.mark.parametrize("numeric_only", [True, False])
464
+ def test_idxmin_idxmax_returns_int_types(func, values, numeric_only):
465
+ # GH 25444
466
+ df = DataFrame(
467
+ {
468
+ "name": ["A", "A", "B", "B"],
469
+ "c_int": [1, 2, 3, 4],
470
+ "c_float": [4.02, 3.03, 2.04, 1.05],
471
+ "c_date": ["2019", "2018", "2016", "2017"],
472
+ }
473
+ )
474
+ df["c_date"] = pd.to_datetime(df["c_date"])
475
+ df["c_date_tz"] = df["c_date"].dt.tz_localize("US/Pacific")
476
+ df["c_timedelta"] = df["c_date"] - df["c_date"].iloc[0]
477
+ df["c_period"] = df["c_date"].dt.to_period("W")
478
+ df["c_Integer"] = df["c_int"].astype("Int64")
479
+ df["c_Floating"] = df["c_float"].astype("Float64")
480
+
481
+ result = getattr(df.groupby("name"), func)(numeric_only=numeric_only)
482
+
483
+ expected = DataFrame(values, index=Index(["A", "B"], name="name"))
484
+ if numeric_only:
485
+ expected = expected.drop(columns=["c_date"])
486
+ else:
487
+ expected["c_date_tz"] = expected["c_date"]
488
+ expected["c_timedelta"] = expected["c_date"]
489
+ expected["c_period"] = expected["c_date"]
490
+ expected["c_Integer"] = expected["c_int"]
491
+ expected["c_Floating"] = expected["c_float"]
492
+
493
+ tm.assert_frame_equal(result, expected)
494
+
495
+
496
+ def test_idxmin_idxmax_axis1():
497
+ df = DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"])
498
+ df["A"] = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4]
499
+
500
+ gb = df.groupby("A")
501
+
502
+ res = gb.idxmax(axis=1)
503
+
504
+ alt = df.iloc[:, 1:].idxmax(axis=1)
505
+ indexer = res.index.get_level_values(1)
506
+
507
+ tm.assert_series_equal(alt[indexer], res.droplevel("A"))
508
+
509
+ df["E"] = date_range("2016-01-01", periods=10)
510
+ gb2 = df.groupby("A")
511
+
512
+ msg = "reduction operation 'argmax' not allowed for this dtype"
513
+ with pytest.raises(TypeError, match=msg):
514
+ gb2.idxmax(axis=1)
515
+
516
+
517
+ @pytest.mark.parametrize("numeric_only", [True, False, None])
518
+ def test_axis1_numeric_only(request, groupby_func, numeric_only):
519
+ if groupby_func in ("idxmax", "idxmin"):
520
+ pytest.skip("idxmax and idx_min tested in test_idxmin_idxmax_axis1")
521
+ if groupby_func in ("corrwith", "skew"):
522
+ msg = "GH#47723 groupby.corrwith and skew do not correctly implement axis=1"
523
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
524
+
525
+ df = DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"])
526
+ df["E"] = "x"
527
+ groups = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4]
528
+ gb = df.groupby(groups)
529
+ method = getattr(gb, groupby_func)
530
+ args = get_groupby_method_args(groupby_func, df)
531
+ kwargs = {"axis": 1}
532
+ if numeric_only is not None:
533
+ # when numeric_only is None we don't pass any argument
534
+ kwargs["numeric_only"] = numeric_only
535
+
536
+ # Functions without numeric_only and axis args
537
+ no_args = ("cumprod", "cumsum", "diff", "fillna", "pct_change", "rank", "shift")
538
+ # Functions with axis args
539
+ has_axis = (
540
+ "cumprod",
541
+ "cumsum",
542
+ "diff",
543
+ "pct_change",
544
+ "rank",
545
+ "shift",
546
+ "cummax",
547
+ "cummin",
548
+ "idxmin",
549
+ "idxmax",
550
+ "fillna",
551
+ )
552
+ if numeric_only is not None and groupby_func in no_args:
553
+ msg = "got an unexpected keyword argument 'numeric_only'"
554
+ with pytest.raises(TypeError, match=msg):
555
+ method(*args, **kwargs)
556
+ elif groupby_func not in has_axis:
557
+ msg = "got an unexpected keyword argument 'axis'"
558
+ with pytest.raises(TypeError, match=msg):
559
+ method(*args, **kwargs)
560
+ # fillna and shift are successful even on object dtypes
561
+ elif (numeric_only is None or not numeric_only) and groupby_func not in (
562
+ "fillna",
563
+ "shift",
564
+ ):
565
+ msgs = (
566
+ # cummax, cummin, rank
567
+ "not supported between instances of",
568
+ # cumprod
569
+ "can't multiply sequence by non-int of type 'float'",
570
+ # cumsum, diff, pct_change
571
+ "unsupported operand type",
572
+ )
573
+ with pytest.raises(TypeError, match=f"({'|'.join(msgs)})"):
574
+ method(*args, **kwargs)
575
+ else:
576
+ result = method(*args, **kwargs)
577
+
578
+ df_expected = df.drop(columns="E").T if numeric_only else df.T
579
+ expected = getattr(df_expected, groupby_func)(*args).T
580
+ if groupby_func == "shift" and not numeric_only:
581
+ # shift with axis=1 leaves the leftmost column as numeric
582
+ # but transposing for expected gives us object dtype
583
+ expected = expected.astype(float)
584
+
585
+ tm.assert_equal(result, expected)
586
+
587
+
588
+ def test_groupby_cumprod():
589
+ # GH 4095
590
+ df = DataFrame({"key": ["b"] * 10, "value": 2})
591
+
592
+ actual = df.groupby("key")["value"].cumprod()
593
+ expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod())
594
+ expected.name = "value"
595
+ tm.assert_series_equal(actual, expected)
596
+
597
+ df = DataFrame({"key": ["b"] * 100, "value": 2})
598
+ df["value"] = df["value"].astype(float)
599
+ actual = df.groupby("key")["value"].cumprod()
600
+ expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod())
601
+ expected.name = "value"
602
+ tm.assert_series_equal(actual, expected)
603
+
604
+
605
+ def test_groupby_cumprod_overflow():
606
+ # GH#37493 if we overflow we return garbage consistent with numpy
607
+ df = DataFrame({"key": ["b"] * 4, "value": 100_000})
608
+ actual = df.groupby("key")["value"].cumprod()
609
+ expected = Series(
610
+ [100_000, 10_000_000_000, 1_000_000_000_000_000, 7766279631452241920],
611
+ name="value",
612
+ )
613
+ tm.assert_series_equal(actual, expected)
614
+
615
+ numpy_result = df.groupby("key", group_keys=False)["value"].apply(
616
+ lambda x: x.cumprod()
617
+ )
618
+ numpy_result.name = "value"
619
+ tm.assert_series_equal(actual, numpy_result)
620
+
621
+
622
+ def test_groupby_cumprod_nan_influences_other_columns():
623
+ # GH#48064
624
+ df = DataFrame(
625
+ {
626
+ "a": 1,
627
+ "b": [1, np.nan, 2],
628
+ "c": [1, 2, 3.0],
629
+ }
630
+ )
631
+ result = df.groupby("a").cumprod(numeric_only=True, skipna=False)
632
+ expected = DataFrame({"b": [1, np.nan, np.nan], "c": [1, 2, 6.0]})
633
+ tm.assert_frame_equal(result, expected)
634
+
635
+
636
+ def scipy_sem(*args, **kwargs):
637
+ from scipy.stats import sem
638
+
639
+ return sem(*args, ddof=1, **kwargs)
640
+
641
+
642
+ @pytest.mark.parametrize(
643
+ "op,targop",
644
+ [
645
+ ("mean", np.mean),
646
+ ("median", np.median),
647
+ ("std", np.std),
648
+ ("var", np.var),
649
+ ("sum", np.sum),
650
+ ("prod", np.prod),
651
+ ("min", np.min),
652
+ ("max", np.max),
653
+ ("first", lambda x: x.iloc[0]),
654
+ ("last", lambda x: x.iloc[-1]),
655
+ ("count", np.size),
656
+ pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
657
+ ],
658
+ )
659
+ def test_ops_general(op, targop):
660
+ df = DataFrame(np.random.randn(1000))
661
+ labels = np.random.randint(0, 50, size=1000).astype(float)
662
+
663
+ result = getattr(df.groupby(labels), op)()
664
+ expected = df.groupby(labels).agg(targop)
665
+ tm.assert_frame_equal(result, expected)
666
+
667
+
668
+ def test_max_nan_bug():
669
+ raw = """,Date,app,File
670
+ -04-23,2013-04-23 00:00:00,,log080001.log
671
+ -05-06,2013-05-06 00:00:00,,log.log
672
+ -05-07,2013-05-07 00:00:00,OE,xlsx"""
673
+
674
+ with tm.assert_produces_warning(UserWarning, match="Could not infer format"):
675
+ df = pd.read_csv(StringIO(raw), parse_dates=[0])
676
+ gb = df.groupby("Date")
677
+ r = gb[["File"]].max()
678
+ e = gb["File"].max().to_frame()
679
+ tm.assert_frame_equal(r, e)
680
+ assert not r["File"].isna().any()
681
+
682
+
683
+ def test_nlargest():
684
+ a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
685
+ b = Series(list("a" * 5 + "b" * 5))
686
+ gb = a.groupby(b)
687
+ r = gb.nlargest(3)
688
+ e = Series(
689
+ [7, 5, 3, 10, 9, 6],
690
+ index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),
691
+ )
692
+ tm.assert_series_equal(r, e)
693
+
694
+ a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
695
+ gb = a.groupby(b)
696
+ e = Series(
697
+ [3, 2, 1, 3, 3, 2],
698
+ index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),
699
+ )
700
+ tm.assert_series_equal(gb.nlargest(3, keep="last"), e)
701
+
702
+
703
+ def test_nlargest_mi_grouper():
704
+ # see gh-21411
705
+ npr = np.random.RandomState(123456789)
706
+
707
+ dts = date_range("20180101", periods=10)
708
+ iterables = [dts, ["one", "two"]]
709
+
710
+ idx = MultiIndex.from_product(iterables, names=["first", "second"])
711
+ s = Series(npr.randn(20), index=idx)
712
+
713
+ result = s.groupby("first").nlargest(1)
714
+
715
+ exp_idx = MultiIndex.from_tuples(
716
+ [
717
+ (dts[0], dts[0], "one"),
718
+ (dts[1], dts[1], "one"),
719
+ (dts[2], dts[2], "one"),
720
+ (dts[3], dts[3], "two"),
721
+ (dts[4], dts[4], "one"),
722
+ (dts[5], dts[5], "one"),
723
+ (dts[6], dts[6], "one"),
724
+ (dts[7], dts[7], "one"),
725
+ (dts[8], dts[8], "two"),
726
+ (dts[9], dts[9], "one"),
727
+ ],
728
+ names=["first", "first", "second"],
729
+ )
730
+
731
+ exp_values = [
732
+ 2.2129019979039612,
733
+ 1.8417114045748335,
734
+ 0.858963679564603,
735
+ 1.3759151378258088,
736
+ 0.9430284594687134,
737
+ 0.5296914208183142,
738
+ 0.8318045593815487,
739
+ -0.8476703342910327,
740
+ 0.3804446884133735,
741
+ -0.8028845810770998,
742
+ ]
743
+
744
+ expected = Series(exp_values, index=exp_idx)
745
+ tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3)
746
+
747
+
748
+ def test_nsmallest():
749
+ a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
750
+ b = Series(list("a" * 5 + "b" * 5))
751
+ gb = a.groupby(b)
752
+ r = gb.nsmallest(3)
753
+ e = Series(
754
+ [1, 2, 3, 0, 4, 6],
755
+ index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),
756
+ )
757
+ tm.assert_series_equal(r, e)
758
+
759
+ a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
760
+ gb = a.groupby(b)
761
+ e = Series(
762
+ [0, 1, 1, 0, 1, 2],
763
+ index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),
764
+ )
765
+ tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
766
+
767
+
768
+ @pytest.mark.parametrize(
769
+ "data, groups",
770
+ [([0, 1, 2, 3], [0, 0, 1, 1]), ([0], [0])],
771
+ )
772
+ @pytest.mark.parametrize("dtype", [None, *tm.ALL_INT_NUMPY_DTYPES])
773
+ @pytest.mark.parametrize("method", ["nlargest", "nsmallest"])
774
+ def test_nlargest_and_smallest_noop(data, groups, dtype, method):
775
+ # GH 15272, GH 16345, GH 29129
776
+ # Test nlargest/smallest when it results in a noop,
777
+ # i.e. input is sorted and group size <= n
778
+ if dtype is not None:
779
+ data = np.array(data, dtype=dtype)
780
+ if method == "nlargest":
781
+ data = list(reversed(data))
782
+ ser = Series(data, name="a")
783
+ result = getattr(ser.groupby(groups), method)(n=2)
784
+ expidx = np.array(groups, dtype=np.int_) if isinstance(groups, list) else groups
785
+ expected = Series(data, index=MultiIndex.from_arrays([expidx, ser.index]), name="a")
786
+ tm.assert_series_equal(result, expected)
787
+
788
+
789
+ @pytest.mark.parametrize("func", ["cumprod", "cumsum"])
790
+ def test_numpy_compat(func):
791
+ # see gh-12811
792
+ df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
793
+ g = df.groupby("A")
794
+
795
+ msg = "numpy operations are not valid with groupby"
796
+
797
+ with pytest.raises(UnsupportedFunctionCall, match=msg):
798
+ getattr(g, func)(1, 2, 3)
799
+ with pytest.raises(UnsupportedFunctionCall, match=msg):
800
+ getattr(g, func)(foo=1)
801
+
802
+
803
+ def test_cummin(dtypes_for_minmax):
804
+ dtype = dtypes_for_minmax[0]
805
+ min_val = dtypes_for_minmax[1]
806
+
807
+ # GH 15048
808
+ base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]})
809
+ expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
810
+
811
+ df = base_df.astype(dtype)
812
+
813
+ expected = DataFrame({"B": expected_mins}).astype(dtype)
814
+ result = df.groupby("A").cummin()
815
+ tm.assert_frame_equal(result, expected)
816
+ result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()
817
+ tm.assert_frame_equal(result, expected)
818
+
819
+ # Test w/ min value for dtype
820
+ df.loc[[2, 6], "B"] = min_val
821
+ df.loc[[1, 5], "B"] = min_val + 1
822
+ expected.loc[[2, 3, 6, 7], "B"] = min_val
823
+ expected.loc[[1, 5], "B"] = min_val + 1 # should not be rounded to min_val
824
+ result = df.groupby("A").cummin()
825
+ tm.assert_frame_equal(result, expected, check_exact=True)
826
+ expected = (
827
+ df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()
828
+ )
829
+ tm.assert_frame_equal(result, expected, check_exact=True)
830
+
831
+ # Test nan in some values
832
+ # Explicit cast to float to avoid implicit cast when setting nan
833
+ base_df = base_df.astype({"B": "float"})
834
+ base_df.loc[[0, 2, 4, 6], "B"] = np.nan
835
+ expected = DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]})
836
+ result = base_df.groupby("A").cummin()
837
+ tm.assert_frame_equal(result, expected)
838
+ expected = (
839
+ base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()
840
+ )
841
+ tm.assert_frame_equal(result, expected)
842
+
843
+ # GH 15561
844
+ df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])})
845
+ expected = Series(pd.to_datetime("2001"), index=[0], name="b")
846
+
847
+ result = df.groupby("a")["b"].cummin()
848
+ tm.assert_series_equal(expected, result)
849
+
850
+ # GH 15635
851
+ df = DataFrame({"a": [1, 2, 1], "b": [1, 2, 2]})
852
+ result = df.groupby("a").b.cummin()
853
+ expected = Series([1, 2, 1], name="b")
854
+ tm.assert_series_equal(result, expected)
855
+
856
+
857
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
858
+ @pytest.mark.parametrize("dtype", ["UInt64", "Int64", "Float64", "float", "boolean"])
859
+ def test_cummin_max_all_nan_column(method, dtype):
860
+ base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
861
+ base_df["B"] = base_df["B"].astype(dtype)
862
+ grouped = base_df.groupby("A")
863
+
864
+ expected = DataFrame({"B": [np.nan] * 8}, dtype=dtype)
865
+ result = getattr(grouped, method)()
866
+ tm.assert_frame_equal(expected, result)
867
+
868
+ result = getattr(grouped["B"], method)().to_frame()
869
+ tm.assert_frame_equal(expected, result)
870
+
871
+
872
+ def test_cummax(dtypes_for_minmax):
873
+ dtype = dtypes_for_minmax[0]
874
+ max_val = dtypes_for_minmax[2]
875
+
876
+ # GH 15048
877
+ base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]})
878
+ expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
879
+
880
+ df = base_df.astype(dtype)
881
+
882
+ expected = DataFrame({"B": expected_maxs}).astype(dtype)
883
+ result = df.groupby("A").cummax()
884
+ tm.assert_frame_equal(result, expected)
885
+ result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()
886
+ tm.assert_frame_equal(result, expected)
887
+
888
+ # Test w/ max value for dtype
889
+ df.loc[[2, 6], "B"] = max_val
890
+ expected.loc[[2, 3, 6, 7], "B"] = max_val
891
+ result = df.groupby("A").cummax()
892
+ tm.assert_frame_equal(result, expected)
893
+ expected = (
894
+ df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()
895
+ )
896
+ tm.assert_frame_equal(result, expected)
897
+
898
+ # Test nan in some values
899
+ # Explicit cast to float to avoid implicit cast when setting nan
900
+ base_df = base_df.astype({"B": "float"})
901
+ base_df.loc[[0, 2, 4, 6], "B"] = np.nan
902
+ expected = DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]})
903
+ result = base_df.groupby("A").cummax()
904
+ tm.assert_frame_equal(result, expected)
905
+ expected = (
906
+ base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()
907
+ )
908
+ tm.assert_frame_equal(result, expected)
909
+
910
+ # GH 15561
911
+ df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])})
912
+ expected = Series(pd.to_datetime("2001"), index=[0], name="b")
913
+
914
+ result = df.groupby("a")["b"].cummax()
915
+ tm.assert_series_equal(expected, result)
916
+
917
+ # GH 15635
918
+ df = DataFrame({"a": [1, 2, 1], "b": [2, 1, 1]})
919
+ result = df.groupby("a").b.cummax()
920
+ expected = Series([2, 1, 2], name="b")
921
+ tm.assert_series_equal(result, expected)
922
+
923
+
924
+ def test_cummax_i8_at_implementation_bound():
925
+ # the minimum value used to be treated as NPY_NAT+1 instead of NPY_NAT
926
+ # for int64 dtype GH#46382
927
+ ser = Series([pd.NaT._value + n for n in range(5)])
928
+ df = DataFrame({"A": 1, "B": ser, "C": ser.view("M8[ns]")})
929
+ gb = df.groupby("A")
930
+
931
+ res = gb.cummax()
932
+ exp = df[["B", "C"]]
933
+ tm.assert_frame_equal(res, exp)
934
+
935
+
936
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
937
+ @pytest.mark.parametrize("dtype", ["float", "Int64", "Float64"])
938
+ @pytest.mark.parametrize(
939
+ "groups,expected_data",
940
+ [
941
+ ([1, 1, 1], [1, None, None]),
942
+ ([1, 2, 3], [1, None, 2]),
943
+ ([1, 3, 3], [1, None, None]),
944
+ ],
945
+ )
946
+ def test_cummin_max_skipna(method, dtype, groups, expected_data):
947
+ # GH-34047
948
+ df = DataFrame({"a": Series([1, None, 2], dtype=dtype)})
949
+ orig = df.copy()
950
+ gb = df.groupby(groups)["a"]
951
+
952
+ result = getattr(gb, method)(skipna=False)
953
+ expected = Series(expected_data, dtype=dtype, name="a")
954
+
955
+ # check we didn't accidentally alter df
956
+ tm.assert_frame_equal(df, orig)
957
+
958
+ tm.assert_series_equal(result, expected)
959
+
960
+
961
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
962
+ def test_cummin_max_skipna_multiple_cols(method):
963
+ # Ensure missing value in "a" doesn't cause "b" to be nan-filled
964
+ df = DataFrame({"a": [np.nan, 2.0, 2.0], "b": [2.0, 2.0, 2.0]})
965
+ gb = df.groupby([1, 1, 1])[["a", "b"]]
966
+
967
+ result = getattr(gb, method)(skipna=False)
968
+ expected = DataFrame({"a": [np.nan, np.nan, np.nan], "b": [2.0, 2.0, 2.0]})
969
+
970
+ tm.assert_frame_equal(result, expected)
971
+
972
+
973
+ @td.skip_if_32bit
974
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
975
+ @pytest.mark.parametrize(
976
+ "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2**53 + 1)]
977
+ )
978
+ def test_nullable_int_not_cast_as_float(method, dtype, val):
979
+ data = [val, pd.NA]
980
+ df = DataFrame({"grp": [1, 1], "b": data}, dtype=dtype)
981
+ grouped = df.groupby("grp")
982
+
983
+ result = grouped.transform(method)
984
+ expected = DataFrame({"b": data}, dtype=dtype)
985
+
986
+ tm.assert_frame_equal(result, expected)
987
+
988
+
989
+ @pytest.mark.parametrize(
990
+ "in_vals, out_vals",
991
+ [
992
+ # Basics: strictly increasing (T), strictly decreasing (F),
993
+ # abs val increasing (F), non-strictly increasing (T)
994
+ ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]),
995
+ # Test with inf vals
996
+ (
997
+ [1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
998
+ [True, False, True, False],
999
+ ),
1000
+ # Test with nan vals; should always be False
1001
+ (
1002
+ [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
1003
+ [False, False, False, False],
1004
+ ),
1005
+ ],
1006
+ )
1007
+ def test_is_monotonic_increasing(in_vals, out_vals):
1008
+ # GH 17015
1009
+ source_dict = {
1010
+ "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
1011
+ "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
1012
+ "C": in_vals,
1013
+ }
1014
+ df = DataFrame(source_dict)
1015
+ result = df.groupby("B").C.is_monotonic_increasing
1016
+ index = Index(list("abcd"), name="B")
1017
+ expected = Series(index=index, data=out_vals, name="C")
1018
+ tm.assert_series_equal(result, expected)
1019
+
1020
+ # Also check result equal to manually taking x.is_monotonic_increasing.
1021
+ expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing)
1022
+ tm.assert_series_equal(result, expected)
1023
+
1024
+
1025
+ @pytest.mark.parametrize(
1026
+ "in_vals, out_vals",
1027
+ [
1028
+ # Basics: strictly decreasing (T), strictly increasing (F),
1029
+ # abs val decreasing (F), non-strictly increasing (T)
1030
+ ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]),
1031
+ # Test with inf vals
1032
+ (
1033
+ [np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
1034
+ [True, True, False, True],
1035
+ ),
1036
+ # Test with nan vals; should always be False
1037
+ (
1038
+ [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
1039
+ [False, False, False, False],
1040
+ ),
1041
+ ],
1042
+ )
1043
+ def test_is_monotonic_decreasing(in_vals, out_vals):
1044
+ # GH 17015
1045
+ source_dict = {
1046
+ "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
1047
+ "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
1048
+ "C": in_vals,
1049
+ }
1050
+
1051
+ df = DataFrame(source_dict)
1052
+ result = df.groupby("B").C.is_monotonic_decreasing
1053
+ index = Index(list("abcd"), name="B")
1054
+ expected = Series(index=index, data=out_vals, name="C")
1055
+ tm.assert_series_equal(result, expected)
1056
+
1057
+
1058
+ # describe
1059
+ # --------------------------------
1060
+
1061
+
1062
+ def test_apply_describe_bug(mframe):
1063
+ grouped = mframe.groupby(level="first")
1064
+ grouped.describe() # it works!
1065
+
1066
+
1067
+ def test_series_describe_multikey():
1068
+ ts = tm.makeTimeSeries()
1069
+ grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
1070
+ result = grouped.describe()
1071
+ tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False)
1072
+ tm.assert_series_equal(result["std"], grouped.std(), check_names=False)
1073
+ tm.assert_series_equal(result["min"], grouped.min(), check_names=False)
1074
+
1075
+
1076
+ def test_series_describe_single():
1077
+ ts = tm.makeTimeSeries()
1078
+ grouped = ts.groupby(lambda x: x.month)
1079
+ result = grouped.apply(lambda x: x.describe())
1080
+ expected = grouped.describe().stack()
1081
+ tm.assert_series_equal(result, expected)
1082
+
1083
+
1084
+ @pytest.mark.parametrize("keys", ["key1", ["key1", "key2"]])
1085
+ def test_series_describe_as_index(as_index, keys):
1086
+ # GH#49256
1087
+ df = DataFrame(
1088
+ {
1089
+ "key1": ["one", "two", "two", "three", "two"],
1090
+ "key2": ["one", "two", "two", "three", "two"],
1091
+ "foo2": [1, 2, 4, 4, 6],
1092
+ }
1093
+ )
1094
+ gb = df.groupby(keys, as_index=as_index)["foo2"]
1095
+ result = gb.describe()
1096
+ expected = DataFrame(
1097
+ {
1098
+ "key1": ["one", "three", "two"],
1099
+ "count": [1.0, 1.0, 3.0],
1100
+ "mean": [1.0, 4.0, 4.0],
1101
+ "std": [np.nan, np.nan, 2.0],
1102
+ "min": [1.0, 4.0, 2.0],
1103
+ "25%": [1.0, 4.0, 3.0],
1104
+ "50%": [1.0, 4.0, 4.0],
1105
+ "75%": [1.0, 4.0, 5.0],
1106
+ "max": [1.0, 4.0, 6.0],
1107
+ }
1108
+ )
1109
+ if len(keys) == 2:
1110
+ expected.insert(1, "key2", expected["key1"])
1111
+ if as_index:
1112
+ expected = expected.set_index(keys)
1113
+ tm.assert_frame_equal(result, expected)
1114
+
1115
+
1116
+ def test_series_index_name(df):
1117
+ grouped = df.loc[:, ["C"]].groupby(df["A"])
1118
+ result = grouped.agg(lambda x: x.mean())
1119
+ assert result.index.name == "A"
1120
+
1121
+
1122
+ def test_frame_describe_multikey(tsframe):
1123
+ grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
1124
+ result = grouped.describe()
1125
+ desc_groups = []
1126
+ for col in tsframe:
1127
+ group = grouped[col].describe()
1128
+ # GH 17464 - Remove duplicate MultiIndex levels
1129
+ group_col = MultiIndex(
1130
+ levels=[[col], group.columns],
1131
+ codes=[[0] * len(group.columns), range(len(group.columns))],
1132
+ )
1133
+ group = DataFrame(group.values, columns=group_col, index=group.index)
1134
+ desc_groups.append(group)
1135
+ expected = pd.concat(desc_groups, axis=1)
1136
+ tm.assert_frame_equal(result, expected)
1137
+
1138
+ groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
1139
+ result = groupedT.describe()
1140
+ expected = tsframe.describe().T
1141
+ # reverting the change from https://github.com/pandas-dev/pandas/pull/35441/
1142
+ expected.index = MultiIndex(
1143
+ levels=[[0, 1], expected.index],
1144
+ codes=[[0, 0, 1, 1], range(len(expected.index))],
1145
+ )
1146
+ tm.assert_frame_equal(result, expected)
1147
+
1148
+
1149
+ def test_frame_describe_tupleindex():
1150
+ # GH 14848 - regression from 0.19.0 to 0.19.1
1151
+ df1 = DataFrame(
1152
+ {
1153
+ "x": [1, 2, 3, 4, 5] * 3,
1154
+ "y": [10, 20, 30, 40, 50] * 3,
1155
+ "z": [100, 200, 300, 400, 500] * 3,
1156
+ }
1157
+ )
1158
+ df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
1159
+ df2 = df1.rename(columns={"k": "key"})
1160
+ msg = "Names should be list-like for a MultiIndex"
1161
+ with pytest.raises(ValueError, match=msg):
1162
+ df1.groupby("k").describe()
1163
+ with pytest.raises(ValueError, match=msg):
1164
+ df2.groupby("key").describe()
1165
+
1166
+
1167
+ def test_frame_describe_unstacked_format():
1168
+ # GH 4792
1169
+ prices = {
1170
+ Timestamp("2011-01-06 10:59:05", tz=None): 24990,
1171
+ Timestamp("2011-01-06 12:43:33", tz=None): 25499,
1172
+ Timestamp("2011-01-06 12:54:09", tz=None): 25499,
1173
+ }
1174
+ volumes = {
1175
+ Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
1176
+ Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
1177
+ Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
1178
+ }
1179
+ df = DataFrame({"PRICE": prices, "VOLUME": volumes})
1180
+ result = df.groupby("PRICE").VOLUME.describe()
1181
+ data = [
1182
+ df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
1183
+ df[df.PRICE == 25499].VOLUME.describe().values.tolist(),
1184
+ ]
1185
+ expected = DataFrame(
1186
+ data,
1187
+ index=Index([24990, 25499], name="PRICE"),
1188
+ columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
1189
+ )
1190
+ tm.assert_frame_equal(result, expected)
1191
+
1192
+
1193
+ @pytest.mark.filterwarnings(
1194
+ "ignore:"
1195
+ "indexing past lexsort depth may impact performance:"
1196
+ "pandas.errors.PerformanceWarning"
1197
+ )
1198
+ @pytest.mark.parametrize("as_index", [True, False])
1199
+ @pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])
1200
+ def test_describe_with_duplicate_output_column_names(as_index, keys):
1201
+ # GH 35314
1202
+ df = DataFrame(
1203
+ {
1204
+ "a1": [99, 99, 99, 88, 88, 88],
1205
+ "a2": [99, 99, 99, 88, 88, 88],
1206
+ "b": [1, 2, 3, 4, 5, 6],
1207
+ "c": [10, 20, 30, 40, 50, 60],
1208
+ },
1209
+ columns=["a1", "a2", "b", "b"],
1210
+ copy=False,
1211
+ )
1212
+ if keys == ["a1"]:
1213
+ df = df.drop(columns="a2")
1214
+
1215
+ expected = (
1216
+ DataFrame.from_records(
1217
+ [
1218
+ ("b", "count", 3.0, 3.0),
1219
+ ("b", "mean", 5.0, 2.0),
1220
+ ("b", "std", 1.0, 1.0),
1221
+ ("b", "min", 4.0, 1.0),
1222
+ ("b", "25%", 4.5, 1.5),
1223
+ ("b", "50%", 5.0, 2.0),
1224
+ ("b", "75%", 5.5, 2.5),
1225
+ ("b", "max", 6.0, 3.0),
1226
+ ("b", "count", 3.0, 3.0),
1227
+ ("b", "mean", 5.0, 2.0),
1228
+ ("b", "std", 1.0, 1.0),
1229
+ ("b", "min", 4.0, 1.0),
1230
+ ("b", "25%", 4.5, 1.5),
1231
+ ("b", "50%", 5.0, 2.0),
1232
+ ("b", "75%", 5.5, 2.5),
1233
+ ("b", "max", 6.0, 3.0),
1234
+ ],
1235
+ )
1236
+ .set_index([0, 1])
1237
+ .T
1238
+ )
1239
+ expected.columns.names = [None, None]
1240
+ if len(keys) == 2:
1241
+ expected.index = MultiIndex(
1242
+ levels=[[88, 99], [88, 99]], codes=[[0, 1], [0, 1]], names=["a1", "a2"]
1243
+ )
1244
+ else:
1245
+ expected.index = Index([88, 99], name="a1")
1246
+
1247
+ if not as_index:
1248
+ expected = expected.reset_index()
1249
+
1250
+ result = df.groupby(keys, as_index=as_index).describe()
1251
+
1252
+ tm.assert_frame_equal(result, expected)
1253
+
1254
+
1255
+ def test_describe_duplicate_columns():
1256
+ # GH#50806
1257
+ df = DataFrame([[0, 1, 2, 3]])
1258
+ df.columns = [0, 1, 2, 0]
1259
+ gb = df.groupby(df[1])
1260
+ result = gb.describe(percentiles=[])
1261
+
1262
+ columns = ["count", "mean", "std", "min", "50%", "max"]
1263
+ frames = [
1264
+ DataFrame([[1.0, val, np.nan, val, val, val]], index=[1], columns=columns)
1265
+ for val in (0.0, 2.0, 3.0)
1266
+ ]
1267
+ expected = pd.concat(frames, axis=1)
1268
+ expected.columns = MultiIndex(
1269
+ levels=[[0, 2], columns],
1270
+ codes=[6 * [0] + 6 * [1] + 6 * [0], 3 * list(range(6))],
1271
+ )
1272
+ expected.index.names = [1]
1273
+ tm.assert_frame_equal(result, expected)
1274
+
1275
+
1276
+ def test_groupby_mean_no_overflow():
1277
+ # Regression test for (#22487)
1278
+ df = DataFrame(
1279
+ {
1280
+ "user": ["A", "A", "A", "A", "A"],
1281
+ "connections": [4970, 4749, 4719, 4704, 18446744073699999744],
1282
+ }
1283
+ )
1284
+ assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840
1285
+
1286
+
1287
+ @pytest.mark.parametrize(
1288
+ "values",
1289
+ [
1290
+ {
1291
+ "a": [1, 1, 1, 2, 2, 2, 3, 3, 3],
1292
+ "b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2],
1293
+ },
1294
+ {"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]},
1295
+ ],
1296
+ )
1297
+ @pytest.mark.parametrize("function", ["mean", "median", "var"])
1298
+ def test_apply_to_nullable_integer_returns_float(values, function):
1299
+ # https://github.com/pandas-dev/pandas/issues/32219
1300
+ output = 0.5 if function == "var" else 1.5
1301
+ arr = np.array([output] * 3, dtype=float)
1302
+ idx = Index([1, 2, 3], name="a", dtype="Int64")
1303
+ expected = DataFrame({"b": arr}, index=idx).astype("Float64")
1304
+
1305
+ groups = DataFrame(values, dtype="Int64").groupby("a")
1306
+
1307
+ result = getattr(groups, function)()
1308
+ tm.assert_frame_equal(result, expected)
1309
+
1310
+ result = groups.agg(function)
1311
+ tm.assert_frame_equal(result, expected)
1312
+
1313
+ result = groups.agg([function])
1314
+ expected.columns = MultiIndex.from_tuples([("b", function)])
1315
+ tm.assert_frame_equal(result, expected)
1316
+
1317
+
1318
+ def test_groupby_sum_below_mincount_nullable_integer():
1319
+ # https://github.com/pandas-dev/pandas/issues/32861
1320
+ df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64")
1321
+ grouped = df.groupby("a")
1322
+ idx = Index([0, 1, 2], name="a", dtype="Int64")
1323
+
1324
+ result = grouped["b"].sum(min_count=2)
1325
+ expected = Series([pd.NA] * 3, dtype="Int64", index=idx, name="b")
1326
+ tm.assert_series_equal(result, expected)
1327
+
1328
+ result = grouped.sum(min_count=2)
1329
+ expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx)
1330
+ tm.assert_frame_equal(result, expected)
1331
+
1332
+
1333
+ def test_mean_on_timedelta():
1334
+ # GH 17382
1335
+ df = DataFrame({"time": pd.to_timedelta(range(10)), "cat": ["A", "B"] * 5})
1336
+ result = df.groupby("cat")["time"].mean()
1337
+ expected = Series(
1338
+ pd.to_timedelta([4, 5]), name="time", index=Index(["A", "B"], name="cat")
1339
+ )
1340
+ tm.assert_series_equal(result, expected)
1341
+
1342
+
1343
+ def test_groupby_sum_timedelta_with_nat():
1344
+ # GH#42659
1345
+ df = DataFrame(
1346
+ {
1347
+ "a": [1, 1, 2, 2],
1348
+ "b": [pd.Timedelta("1d"), pd.Timedelta("2d"), pd.Timedelta("3d"), pd.NaT],
1349
+ }
1350
+ )
1351
+ td3 = pd.Timedelta(days=3)
1352
+
1353
+ gb = df.groupby("a")
1354
+
1355
+ res = gb.sum()
1356
+ expected = DataFrame({"b": [td3, td3]}, index=Index([1, 2], name="a"))
1357
+ tm.assert_frame_equal(res, expected)
1358
+
1359
+ res = gb["b"].sum()
1360
+ tm.assert_series_equal(res, expected["b"])
1361
+
1362
+ res = gb["b"].sum(min_count=2)
1363
+ expected = Series([td3, pd.NaT], dtype="m8[ns]", name="b", index=expected.index)
1364
+ tm.assert_series_equal(res, expected)
1365
+
1366
+
1367
+ @pytest.mark.parametrize(
1368
+ "kernel, has_arg",
1369
+ [
1370
+ ("all", False),
1371
+ ("any", False),
1372
+ ("bfill", False),
1373
+ ("corr", True),
1374
+ ("corrwith", True),
1375
+ ("cov", True),
1376
+ ("cummax", True),
1377
+ ("cummin", True),
1378
+ ("cumprod", True),
1379
+ ("cumsum", True),
1380
+ ("diff", False),
1381
+ ("ffill", False),
1382
+ ("fillna", False),
1383
+ ("first", True),
1384
+ ("idxmax", True),
1385
+ ("idxmin", True),
1386
+ ("last", True),
1387
+ ("max", True),
1388
+ ("mean", True),
1389
+ ("median", True),
1390
+ ("min", True),
1391
+ ("nth", False),
1392
+ ("nunique", False),
1393
+ ("pct_change", False),
1394
+ ("prod", True),
1395
+ ("quantile", True),
1396
+ ("sem", True),
1397
+ ("skew", True),
1398
+ ("std", True),
1399
+ ("sum", True),
1400
+ ("var", True),
1401
+ ],
1402
+ )
1403
+ @pytest.mark.parametrize("numeric_only", [True, False, lib.no_default])
1404
+ @pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])
1405
+ def test_numeric_only(kernel, has_arg, numeric_only, keys):
1406
+ # GH#46072
1407
+ # drops_nuisance: Whether the op drops nuisance columns even when numeric_only=False
1408
+ # has_arg: Whether the op has a numeric_only arg
1409
+ df = DataFrame({"a1": [1, 1], "a2": [2, 2], "a3": [5, 6], "b": 2 * [object]})
1410
+
1411
+ args = get_groupby_method_args(kernel, df)
1412
+ kwargs = {} if numeric_only is lib.no_default else {"numeric_only": numeric_only}
1413
+
1414
+ gb = df.groupby(keys)
1415
+ method = getattr(gb, kernel)
1416
+ if has_arg and numeric_only is True:
1417
+ # Cases where b does not appear in the result
1418
+ result = method(*args, **kwargs)
1419
+ assert "b" not in result.columns
1420
+ elif (
1421
+ # kernels that work on any dtype and have numeric_only arg
1422
+ kernel in ("first", "last")
1423
+ or (
1424
+ # kernels that work on any dtype and don't have numeric_only arg
1425
+ kernel in ("any", "all", "bfill", "ffill", "fillna", "nth", "nunique")
1426
+ and numeric_only is lib.no_default
1427
+ )
1428
+ ):
1429
+ result = method(*args, **kwargs)
1430
+ assert "b" in result.columns
1431
+ elif has_arg or kernel in ("idxmax", "idxmin"):
1432
+ assert numeric_only is not True
1433
+ # kernels that are successful on any dtype were above; this will fail
1434
+
1435
+ # object dtypes for transformations are not implemented in Cython and
1436
+ # have no Python fallback
1437
+ exception = NotImplementedError if kernel.startswith("cum") else TypeError
1438
+
1439
+ msg = "|".join(
1440
+ [
1441
+ "not allowed for this dtype",
1442
+ "must be a string or a number",
1443
+ "cannot be performed against 'object' dtypes",
1444
+ "must be a string or a real number",
1445
+ "unsupported operand type",
1446
+ "not supported between instances of",
1447
+ "function is not implemented for this dtype",
1448
+ ]
1449
+ )
1450
+ with pytest.raises(exception, match=msg):
1451
+ method(*args, **kwargs)
1452
+ elif not has_arg and numeric_only is not lib.no_default:
1453
+ with pytest.raises(
1454
+ TypeError, match="got an unexpected keyword argument 'numeric_only'"
1455
+ ):
1456
+ method(*args, **kwargs)
1457
+ else:
1458
+ assert kernel in ("diff", "pct_change")
1459
+ assert numeric_only is lib.no_default
1460
+ # Doesn't have numeric_only argument and fails on nuisance columns
1461
+ with pytest.raises(TypeError, match=r"unsupported operand type"):
1462
+ method(*args, **kwargs)
1463
+
1464
+
1465
+ @pytest.mark.parametrize("dtype", [bool, int, float, object])
1466
+ def test_deprecate_numeric_only_series(dtype, groupby_func, request):
1467
+ # GH#46560
1468
+ if groupby_func == "corrwith":
1469
+ msg = "corrwith is not implemented on SeriesGroupBy"
1470
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
1471
+
1472
+ grouper = [0, 0, 1]
1473
+
1474
+ ser = Series([1, 0, 0], dtype=dtype)
1475
+ gb = ser.groupby(grouper)
1476
+ method = getattr(gb, groupby_func)
1477
+
1478
+ expected_ser = Series([1, 0, 0])
1479
+ expected_gb = expected_ser.groupby(grouper)
1480
+ expected_method = getattr(expected_gb, groupby_func)
1481
+
1482
+ args = get_groupby_method_args(groupby_func, ser)
1483
+
1484
+ fails_on_numeric_object = (
1485
+ "corr",
1486
+ "cov",
1487
+ "cummax",
1488
+ "cummin",
1489
+ "cumprod",
1490
+ "cumsum",
1491
+ "idxmax",
1492
+ "idxmin",
1493
+ "quantile",
1494
+ )
1495
+ # ops that give an object result on object input
1496
+ obj_result = (
1497
+ "first",
1498
+ "last",
1499
+ "nth",
1500
+ "bfill",
1501
+ "ffill",
1502
+ "shift",
1503
+ "sum",
1504
+ "diff",
1505
+ "pct_change",
1506
+ "var",
1507
+ "mean",
1508
+ "median",
1509
+ "min",
1510
+ "max",
1511
+ "prod",
1512
+ )
1513
+
1514
+ # Test default behavior; kernels that fail may be enabled in the future but kernels
1515
+ # that succeed should not be allowed to fail (without deprecation, at least)
1516
+ if groupby_func in fails_on_numeric_object and dtype is object:
1517
+ if groupby_func in ("idxmax", "idxmin"):
1518
+ msg = "not allowed for this dtype"
1519
+ elif groupby_func == "quantile":
1520
+ msg = "cannot be performed against 'object' dtypes"
1521
+ else:
1522
+ msg = "is not supported for object dtype"
1523
+ with pytest.raises(TypeError, match=msg):
1524
+ method(*args)
1525
+ elif dtype is object:
1526
+ result = method(*args)
1527
+ expected = expected_method(*args)
1528
+ if groupby_func in obj_result:
1529
+ expected = expected.astype(object)
1530
+ tm.assert_series_equal(result, expected)
1531
+
1532
+ has_numeric_only = (
1533
+ "first",
1534
+ "last",
1535
+ "max",
1536
+ "mean",
1537
+ "median",
1538
+ "min",
1539
+ "prod",
1540
+ "quantile",
1541
+ "sem",
1542
+ "skew",
1543
+ "std",
1544
+ "sum",
1545
+ "var",
1546
+ "cummax",
1547
+ "cummin",
1548
+ "cumprod",
1549
+ "cumsum",
1550
+ )
1551
+ if groupby_func not in has_numeric_only:
1552
+ msg = "got an unexpected keyword argument 'numeric_only'"
1553
+ with pytest.raises(TypeError, match=msg):
1554
+ method(*args, numeric_only=True)
1555
+ elif dtype is object:
1556
+ msg = "|".join(
1557
+ [
1558
+ "SeriesGroupBy.sem called with numeric_only=True and dtype object",
1559
+ "Series.skew does not allow numeric_only=True with non-numeric",
1560
+ "cum(sum|prod|min|max) is not supported for object dtype",
1561
+ r"Cannot use numeric_only=True with SeriesGroupBy\..* and non-numeric",
1562
+ ]
1563
+ )
1564
+ with pytest.raises(TypeError, match=msg):
1565
+ method(*args, numeric_only=True)
1566
+ else:
1567
+ result = method(*args, numeric_only=True)
1568
+ expected = method(*args, numeric_only=False)
1569
+ tm.assert_series_equal(result, expected)
1570
+
1571
+
1572
+ @pytest.mark.parametrize("dtype", [int, float, object])
1573
+ @pytest.mark.parametrize(
1574
+ "kwargs",
1575
+ [
1576
+ {"percentiles": [0.10, 0.20, 0.30], "include": "all", "exclude": None},
1577
+ {"percentiles": [0.10, 0.20, 0.30], "include": None, "exclude": ["int"]},
1578
+ {"percentiles": [0.10, 0.20, 0.30], "include": ["int"], "exclude": None},
1579
+ ],
1580
+ )
1581
+ def test_groupby_empty_dataset(dtype, kwargs):
1582
+ # GH#41575
1583
+ df = DataFrame([[1, 2, 3]], columns=["A", "B", "C"], dtype=dtype)
1584
+ df["B"] = df["B"].astype(int)
1585
+ df["C"] = df["C"].astype(float)
1586
+
1587
+ result = df.iloc[:0].groupby("A").describe(**kwargs)
1588
+ expected = df.groupby("A").describe(**kwargs).reset_index(drop=True).iloc[:0]
1589
+ tm.assert_frame_equal(result, expected)
1590
+
1591
+ result = df.iloc[:0].groupby("A").B.describe(**kwargs)
1592
+ expected = df.groupby("A").B.describe(**kwargs).reset_index(drop=True).iloc[:0]
1593
+ expected.index = Index([])
1594
+ tm.assert_frame_equal(result, expected)
1595
+
1596
+
1597
+ def test_corrwith_with_1_axis():
1598
+ # GH 47723
1599
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 7, 4]})
1600
+ result = df.groupby("a").corrwith(df, axis=1)
1601
+ index = Index(
1602
+ data=[(1, 0), (1, 1), (1, 2), (2, 2), (2, 0), (2, 1)],
1603
+ name=("a", None),
1604
+ )
1605
+ expected = Series([np.nan] * 6, index=index)
1606
+ tm.assert_series_equal(result, expected)
1607
+
1608
+
1609
+ def test_multiindex_group_all_columns_when_empty(groupby_func):
1610
+ # GH 32464
1611
+ df = DataFrame({"a": [], "b": [], "c": []}).set_index(["a", "b", "c"])
1612
+ gb = df.groupby(["a", "b", "c"], group_keys=False)
1613
+ method = getattr(gb, groupby_func)
1614
+ args = get_groupby_method_args(groupby_func, df)
1615
+
1616
+ result = method(*args).index
1617
+ expected = df.index
1618
+ tm.assert_index_equal(result, expected)
1619
+
1620
+
1621
+ def test_duplicate_columns(request, groupby_func, as_index):
1622
+ # GH#50806
1623
+ if groupby_func == "corrwith":
1624
+ msg = "GH#50845 - corrwith fails when there are duplicate columns"
1625
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
1626
+ df = DataFrame([[1, 3, 6], [1, 4, 7], [2, 5, 8]], columns=list("abb"))
1627
+ args = get_groupby_method_args(groupby_func, df)
1628
+ gb = df.groupby("a", as_index=as_index)
1629
+ result = getattr(gb, groupby_func)(*args)
1630
+
1631
+ expected_df = df.set_axis(["a", "b", "c"], axis=1)
1632
+ expected_args = get_groupby_method_args(groupby_func, expected_df)
1633
+ expected_gb = expected_df.groupby("a", as_index=as_index)
1634
+ expected = getattr(expected_gb, groupby_func)(*expected_args)
1635
+ if groupby_func not in ("size", "ngroup", "cumcount"):
1636
+ expected = expected.rename(columns={"c": "b"})
1637
+ tm.assert_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby.py ADDED
@@ -0,0 +1,2837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from decimal import Decimal
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas.compat import IS64
8
+ from pandas.errors import (
9
+ PerformanceWarning,
10
+ SpecificationError,
11
+ )
12
+
13
+ import pandas as pd
14
+ from pandas import (
15
+ Categorical,
16
+ DataFrame,
17
+ Grouper,
18
+ Index,
19
+ MultiIndex,
20
+ RangeIndex,
21
+ Series,
22
+ Timedelta,
23
+ Timestamp,
24
+ date_range,
25
+ to_datetime,
26
+ )
27
+ import pandas._testing as tm
28
+ from pandas.core.arrays import BooleanArray
29
+ import pandas.core.common as com
30
+ from pandas.tests.groupby import get_groupby_method_args
31
+
32
+
33
+ def test_repr():
34
+ # GH18203
35
+ result = repr(Grouper(key="A", level="B"))
36
+ expected = "Grouper(key='A', level='B', axis=0, sort=False, dropna=True)"
37
+ assert result == expected
38
+
39
+
40
+ def test_groupby_std_datetimelike():
41
+ # GH#48481
42
+ tdi = pd.timedelta_range("1 Day", periods=10000)
43
+ ser = Series(tdi)
44
+ ser[::5] *= 2 # get different std for different groups
45
+
46
+ df = ser.to_frame("A")
47
+
48
+ df["B"] = ser + Timestamp(0)
49
+ df["C"] = ser + Timestamp(0, tz="UTC")
50
+ df.iloc[-1] = pd.NaT # last group includes NaTs
51
+
52
+ gb = df.groupby(list(range(5)) * 2000)
53
+
54
+ result = gb.std()
55
+
56
+ # Note: this does not _exactly_ match what we would get if we did
57
+ # [gb.get_group(i).std() for i in gb.groups]
58
+ # but it _does_ match the floating point error we get doing the
59
+ # same operation on int64 data xref GH#51332
60
+ td1 = Timedelta("2887 days 11:21:02.326710176")
61
+ td4 = Timedelta("2886 days 00:42:34.664668096")
62
+ exp_ser = Series([td1 * 2, td1, td1, td1, td4], index=np.arange(5))
63
+ expected = DataFrame({"A": exp_ser, "B": exp_ser, "C": exp_ser})
64
+ tm.assert_frame_equal(result, expected)
65
+
66
+
67
+ @pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
68
+ def test_basic(dtype):
69
+ data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
70
+
71
+ index = np.arange(9)
72
+ np.random.shuffle(index)
73
+ data = data.reindex(index)
74
+
75
+ grouped = data.groupby(lambda x: x // 3, group_keys=False)
76
+
77
+ for k, v in grouped:
78
+ assert len(v) == 3
79
+
80
+ agged = grouped.aggregate(np.mean)
81
+ assert agged[1] == 1
82
+
83
+ tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
84
+ tm.assert_series_equal(agged, grouped.mean())
85
+ tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
86
+
87
+ expected = grouped.apply(lambda x: x * x.sum())
88
+ transformed = grouped.transform(lambda x: x * x.sum())
89
+ assert transformed[7] == 12
90
+ tm.assert_series_equal(transformed, expected)
91
+
92
+ value_grouped = data.groupby(data)
93
+ tm.assert_series_equal(
94
+ value_grouped.aggregate(np.mean), agged, check_index_type=False
95
+ )
96
+
97
+ # complex agg
98
+ agged = grouped.aggregate([np.mean, np.std])
99
+
100
+ msg = r"nested renamer is not supported"
101
+ with pytest.raises(SpecificationError, match=msg):
102
+ grouped.aggregate({"one": np.mean, "two": np.std})
103
+
104
+ group_constants = {0: 10, 1: 20, 2: 30}
105
+ agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
106
+ assert agged[1] == 21
107
+
108
+ # corner cases
109
+ msg = "Must produce aggregated value"
110
+ # exception raised is type Exception
111
+ with pytest.raises(Exception, match=msg):
112
+ grouped.aggregate(lambda x: x * 2)
113
+
114
+
115
+ def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
116
+ key = mframe.index.codes[0]
117
+ grouped = mframe.groupby(key)
118
+ result = grouped.sum()
119
+
120
+ expected = mframe.groupby(key.astype("O")).sum()
121
+ assert result.index.dtype == np.int8
122
+ assert expected.index.dtype == np.int64
123
+ tm.assert_frame_equal(result, expected, check_index_type=False)
124
+
125
+ # GH 3911, mixed frame non-conversion
126
+ df = df_mixed_floats.copy()
127
+ df["value"] = range(len(df))
128
+
129
+ def max_value(group):
130
+ return group.loc[group["value"].idxmax()]
131
+
132
+ applied = df.groupby("A").apply(max_value)
133
+ result = applied.dtypes
134
+ expected = df.dtypes
135
+ tm.assert_series_equal(result, expected)
136
+
137
+
138
+ def test_inconsistent_return_type():
139
+ # GH5592
140
+ # inconsistent return type
141
+ df = DataFrame(
142
+ {
143
+ "A": ["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
144
+ "B": Series(np.arange(7), dtype="int64"),
145
+ "C": date_range("20130101", periods=7),
146
+ }
147
+ )
148
+
149
+ def f_0(grp):
150
+ return grp.iloc[0]
151
+
152
+ expected = df.groupby("A").first()[["B"]]
153
+ result = df.groupby("A").apply(f_0)[["B"]]
154
+ tm.assert_frame_equal(result, expected)
155
+
156
+ def f_1(grp):
157
+ if grp.name == "Tiger":
158
+ return None
159
+ return grp.iloc[0]
160
+
161
+ result = df.groupby("A").apply(f_1)[["B"]]
162
+ e = expected.copy()
163
+ e.loc["Tiger"] = np.nan
164
+ tm.assert_frame_equal(result, e)
165
+
166
+ def f_2(grp):
167
+ if grp.name == "Pony":
168
+ return None
169
+ return grp.iloc[0]
170
+
171
+ result = df.groupby("A").apply(f_2)[["B"]]
172
+ e = expected.copy()
173
+ e.loc["Pony"] = np.nan
174
+ tm.assert_frame_equal(result, e)
175
+
176
+ # 5592 revisited, with datetimes
177
+ def f_3(grp):
178
+ if grp.name == "Pony":
179
+ return None
180
+ return grp.iloc[0]
181
+
182
+ result = df.groupby("A").apply(f_3)[["C"]]
183
+ e = df.groupby("A").first()[["C"]]
184
+ e.loc["Pony"] = pd.NaT
185
+ tm.assert_frame_equal(result, e)
186
+
187
+ # scalar outputs
188
+ def f_4(grp):
189
+ if grp.name == "Pony":
190
+ return None
191
+ return grp.iloc[0].loc["C"]
192
+
193
+ result = df.groupby("A").apply(f_4)
194
+ e = df.groupby("A").first()["C"].copy()
195
+ e.loc["Pony"] = np.nan
196
+ e.name = None
197
+ tm.assert_series_equal(result, e)
198
+
199
+
200
+ def test_pass_args_kwargs(ts, tsframe):
201
+ def f(x, q=None, axis=0):
202
+ return np.percentile(x, q, axis=axis)
203
+
204
+ g = lambda x: np.percentile(x, 80, axis=0)
205
+
206
+ # Series
207
+ ts_grouped = ts.groupby(lambda x: x.month)
208
+ agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
209
+ apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
210
+ trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
211
+
212
+ agg_expected = ts_grouped.quantile(0.8)
213
+ trans_expected = ts_grouped.transform(g)
214
+
215
+ tm.assert_series_equal(apply_result, agg_expected)
216
+ tm.assert_series_equal(agg_result, agg_expected)
217
+ tm.assert_series_equal(trans_result, trans_expected)
218
+
219
+ agg_result = ts_grouped.agg(f, q=80)
220
+ apply_result = ts_grouped.apply(f, q=80)
221
+ trans_result = ts_grouped.transform(f, q=80)
222
+ tm.assert_series_equal(agg_result, agg_expected)
223
+ tm.assert_series_equal(apply_result, agg_expected)
224
+ tm.assert_series_equal(trans_result, trans_expected)
225
+
226
+ # DataFrame
227
+ for as_index in [True, False]:
228
+ df_grouped = tsframe.groupby(lambda x: x.month, as_index=as_index)
229
+ agg_result = df_grouped.agg(np.percentile, 80, axis=0)
230
+ apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
231
+ expected = df_grouped.quantile(0.8)
232
+ tm.assert_frame_equal(apply_result, expected, check_names=False)
233
+ tm.assert_frame_equal(agg_result, expected)
234
+
235
+ apply_result = df_grouped.apply(DataFrame.quantile, [0.4, 0.8])
236
+ expected_seq = df_grouped.quantile([0.4, 0.8])
237
+ tm.assert_frame_equal(apply_result, expected_seq, check_names=False)
238
+
239
+ agg_result = df_grouped.agg(f, q=80)
240
+ apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
241
+ tm.assert_frame_equal(agg_result, expected)
242
+ tm.assert_frame_equal(apply_result, expected, check_names=False)
243
+
244
+
245
+ @pytest.mark.parametrize("as_index", [True, False])
246
+ def test_pass_args_kwargs_duplicate_columns(tsframe, as_index):
247
+ # go through _aggregate_frame with self.axis == 0 and duplicate columns
248
+ tsframe.columns = ["A", "B", "A", "C"]
249
+ gb = tsframe.groupby(lambda x: x.month, as_index=as_index)
250
+
251
+ res = gb.agg(np.percentile, 80, axis=0)
252
+
253
+ ex_data = {
254
+ 1: tsframe[tsframe.index.month == 1].quantile(0.8),
255
+ 2: tsframe[tsframe.index.month == 2].quantile(0.8),
256
+ }
257
+ expected = DataFrame(ex_data).T
258
+ expected.index = expected.index.astype(np.int32)
259
+ if not as_index:
260
+ # TODO: try to get this more consistent?
261
+ expected.index = Index(range(2))
262
+
263
+ tm.assert_frame_equal(res, expected)
264
+
265
+
266
+ def test_len():
267
+ df = tm.makeTimeDataFrame()
268
+ grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
269
+ assert len(grouped) == len(df)
270
+
271
+ grouped = df.groupby([lambda x: x.year, lambda x: x.month])
272
+ expected = len({(x.year, x.month) for x in df.index})
273
+ assert len(grouped) == expected
274
+
275
+ # issue 11016
276
+ df = DataFrame({"a": [np.nan] * 3, "b": [1, 2, 3]})
277
+ assert len(df.groupby("a")) == 0
278
+ assert len(df.groupby("b")) == 3
279
+ assert len(df.groupby(["a", "b"])) == 3
280
+
281
+
282
+ def test_basic_regression():
283
+ # regression
284
+ result = Series([1.0 * x for x in list(range(1, 10)) * 10])
285
+
286
+ data = np.random.random(1100) * 10.0
287
+ groupings = Series(data)
288
+
289
+ grouped = result.groupby(groupings)
290
+ grouped.mean()
291
+
292
+
293
+ @pytest.mark.parametrize(
294
+ "dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
295
+ )
296
+ def test_with_na_groups(dtype):
297
+ index = Index(np.arange(10))
298
+ values = Series(np.ones(10), index, dtype=dtype)
299
+ labels = Series(
300
+ [np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
301
+ index=index,
302
+ )
303
+
304
+ # this SHOULD be an int
305
+ grouped = values.groupby(labels)
306
+ agged = grouped.agg(len)
307
+ expected = Series([4, 2], index=["bar", "foo"])
308
+
309
+ tm.assert_series_equal(agged, expected, check_dtype=False)
310
+
311
+ # assert issubclass(agged.dtype.type, np.integer)
312
+
313
+ # explicitly return a float from my function
314
+ def f(x):
315
+ return float(len(x))
316
+
317
+ agged = grouped.agg(f)
318
+ expected = Series([4.0, 2.0], index=["bar", "foo"])
319
+
320
+ tm.assert_series_equal(agged, expected)
321
+
322
+
323
+ def test_indices_concatenation_order():
324
+ # GH 2808
325
+
326
+ def f1(x):
327
+ y = x[(x.b % 2) == 1] ** 2
328
+ if y.empty:
329
+ multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
330
+ res = DataFrame(columns=["a"], index=multiindex)
331
+ return res
332
+ else:
333
+ y = y.set_index(["b", "c"])
334
+ return y
335
+
336
+ def f2(x):
337
+ y = x[(x.b % 2) == 1] ** 2
338
+ if y.empty:
339
+ return DataFrame()
340
+ else:
341
+ y = y.set_index(["b", "c"])
342
+ return y
343
+
344
+ def f3(x):
345
+ y = x[(x.b % 2) == 1] ** 2
346
+ if y.empty:
347
+ multiindex = MultiIndex(
348
+ levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
349
+ )
350
+ res = DataFrame(columns=["a", "b"], index=multiindex)
351
+ return res
352
+ else:
353
+ return y
354
+
355
+ df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
356
+
357
+ df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
358
+
359
+ # correct result
360
+ result1 = df.groupby("a").apply(f1)
361
+ result2 = df2.groupby("a").apply(f1)
362
+ tm.assert_frame_equal(result1, result2)
363
+
364
+ # should fail (not the same number of levels)
365
+ msg = "Cannot concat indices that do not have the same number of levels"
366
+ with pytest.raises(AssertionError, match=msg):
367
+ df.groupby("a").apply(f2)
368
+ with pytest.raises(AssertionError, match=msg):
369
+ df2.groupby("a").apply(f2)
370
+
371
+ # should fail (incorrect shape)
372
+ with pytest.raises(AssertionError, match=msg):
373
+ df.groupby("a").apply(f3)
374
+ with pytest.raises(AssertionError, match=msg):
375
+ df2.groupby("a").apply(f3)
376
+
377
+
378
+ def test_attr_wrapper(ts):
379
+ grouped = ts.groupby(lambda x: x.weekday())
380
+
381
+ result = grouped.std()
382
+ expected = grouped.agg(lambda x: np.std(x, ddof=1))
383
+ tm.assert_series_equal(result, expected)
384
+
385
+ # this is pretty cool
386
+ result = grouped.describe()
387
+ expected = {name: gp.describe() for name, gp in grouped}
388
+ expected = DataFrame(expected).T
389
+ tm.assert_frame_equal(result, expected)
390
+
391
+ # get attribute
392
+ result = grouped.dtype
393
+ expected = grouped.agg(lambda x: x.dtype)
394
+ tm.assert_series_equal(result, expected)
395
+
396
+ # make sure raises error
397
+ msg = "'SeriesGroupBy' object has no attribute 'foo'"
398
+ with pytest.raises(AttributeError, match=msg):
399
+ getattr(grouped, "foo")
400
+
401
+
402
+ def test_frame_groupby(tsframe):
403
+ grouped = tsframe.groupby(lambda x: x.weekday())
404
+
405
+ # aggregate
406
+ aggregated = grouped.aggregate(np.mean)
407
+ assert len(aggregated) == 5
408
+ assert len(aggregated.columns) == 4
409
+
410
+ # by string
411
+ tscopy = tsframe.copy()
412
+ tscopy["weekday"] = [x.weekday() for x in tscopy.index]
413
+ stragged = tscopy.groupby("weekday").aggregate(np.mean)
414
+ tm.assert_frame_equal(stragged, aggregated, check_names=False)
415
+
416
+ # transform
417
+ grouped = tsframe.head(30).groupby(lambda x: x.weekday())
418
+ transformed = grouped.transform(lambda x: x - x.mean())
419
+ assert len(transformed) == 30
420
+ assert len(transformed.columns) == 4
421
+
422
+ # transform propagate
423
+ transformed = grouped.transform(lambda x: x.mean())
424
+ for name, group in grouped:
425
+ mean = group.mean()
426
+ for idx in group.index:
427
+ tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
428
+
429
+ # iterate
430
+ for weekday, group in grouped:
431
+ assert group.index[0].weekday() == weekday
432
+
433
+ # groups / group_indices
434
+ groups = grouped.groups
435
+ indices = grouped.indices
436
+
437
+ for k, v in groups.items():
438
+ samething = tsframe.index.take(indices[k])
439
+ assert (samething == v).all()
440
+
441
+
442
+ def test_frame_groupby_columns(tsframe):
443
+ mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
444
+ grouped = tsframe.groupby(mapping, axis=1)
445
+
446
+ # aggregate
447
+ aggregated = grouped.aggregate(np.mean)
448
+ assert len(aggregated) == len(tsframe)
449
+ assert len(aggregated.columns) == 2
450
+
451
+ # transform
452
+ tf = lambda x: x - x.mean()
453
+ groupedT = tsframe.T.groupby(mapping, axis=0)
454
+ tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
455
+
456
+ # iterate
457
+ for k, v in grouped:
458
+ assert len(v.columns) == 2
459
+
460
+
461
+ def test_frame_set_name_single(df):
462
+ grouped = df.groupby("A")
463
+
464
+ result = grouped.mean(numeric_only=True)
465
+ assert result.index.name == "A"
466
+
467
+ result = df.groupby("A", as_index=False).mean(numeric_only=True)
468
+ assert result.index.name != "A"
469
+
470
+ result = grouped[["C", "D"]].agg(np.mean)
471
+ assert result.index.name == "A"
472
+
473
+ result = grouped.agg({"C": np.mean, "D": np.std})
474
+ assert result.index.name == "A"
475
+
476
+ result = grouped["C"].mean()
477
+ assert result.index.name == "A"
478
+ result = grouped["C"].agg(np.mean)
479
+ assert result.index.name == "A"
480
+ result = grouped["C"].agg([np.mean, np.std])
481
+ assert result.index.name == "A"
482
+
483
+ msg = r"nested renamer is not supported"
484
+ with pytest.raises(SpecificationError, match=msg):
485
+ grouped["C"].agg({"foo": np.mean, "bar": np.std})
486
+
487
+
488
+ def test_multi_func(df):
489
+ col1 = df["A"]
490
+ col2 = df["B"]
491
+
492
+ grouped = df.groupby([col1.get, col2.get])
493
+ agged = grouped.mean(numeric_only=True)
494
+ expected = df.groupby(["A", "B"]).mean()
495
+
496
+ # TODO groupby get drops names
497
+ tm.assert_frame_equal(
498
+ agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
499
+ )
500
+
501
+ # some "groups" with no data
502
+ df = DataFrame(
503
+ {
504
+ "v1": np.random.randn(6),
505
+ "v2": np.random.randn(6),
506
+ "k1": np.array(["b", "b", "b", "a", "a", "a"]),
507
+ "k2": np.array(["1", "1", "1", "2", "2", "2"]),
508
+ },
509
+ index=["one", "two", "three", "four", "five", "six"],
510
+ )
511
+ # only verify that it works for now
512
+ grouped = df.groupby(["k1", "k2"])
513
+ grouped.agg(np.sum)
514
+
515
+
516
+ def test_multi_key_multiple_functions(df):
517
+ grouped = df.groupby(["A", "B"])["C"]
518
+
519
+ agged = grouped.agg([np.mean, np.std])
520
+ expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
521
+ tm.assert_frame_equal(agged, expected)
522
+
523
+
524
+ def test_frame_multi_key_function_list():
525
+ data = DataFrame(
526
+ {
527
+ "A": [
528
+ "foo",
529
+ "foo",
530
+ "foo",
531
+ "foo",
532
+ "bar",
533
+ "bar",
534
+ "bar",
535
+ "bar",
536
+ "foo",
537
+ "foo",
538
+ "foo",
539
+ ],
540
+ "B": [
541
+ "one",
542
+ "one",
543
+ "one",
544
+ "two",
545
+ "one",
546
+ "one",
547
+ "one",
548
+ "two",
549
+ "two",
550
+ "two",
551
+ "one",
552
+ ],
553
+ "D": np.random.randn(11),
554
+ "E": np.random.randn(11),
555
+ "F": np.random.randn(11),
556
+ }
557
+ )
558
+
559
+ grouped = data.groupby(["A", "B"])
560
+ funcs = [np.mean, np.std]
561
+ agged = grouped.agg(funcs)
562
+ expected = pd.concat(
563
+ [grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
564
+ keys=["D", "E", "F"],
565
+ axis=1,
566
+ )
567
+ assert isinstance(agged.index, MultiIndex)
568
+ assert isinstance(expected.index, MultiIndex)
569
+ tm.assert_frame_equal(agged, expected)
570
+
571
+
572
+ def test_frame_multi_key_function_list_partial_failure():
573
+ data = DataFrame(
574
+ {
575
+ "A": [
576
+ "foo",
577
+ "foo",
578
+ "foo",
579
+ "foo",
580
+ "bar",
581
+ "bar",
582
+ "bar",
583
+ "bar",
584
+ "foo",
585
+ "foo",
586
+ "foo",
587
+ ],
588
+ "B": [
589
+ "one",
590
+ "one",
591
+ "one",
592
+ "two",
593
+ "one",
594
+ "one",
595
+ "one",
596
+ "two",
597
+ "two",
598
+ "two",
599
+ "one",
600
+ ],
601
+ "C": [
602
+ "dull",
603
+ "dull",
604
+ "shiny",
605
+ "dull",
606
+ "dull",
607
+ "shiny",
608
+ "shiny",
609
+ "dull",
610
+ "shiny",
611
+ "shiny",
612
+ "shiny",
613
+ ],
614
+ "D": np.random.randn(11),
615
+ "E": np.random.randn(11),
616
+ "F": np.random.randn(11),
617
+ }
618
+ )
619
+
620
+ grouped = data.groupby(["A", "B"])
621
+ funcs = [np.mean, np.std]
622
+ with pytest.raises(TypeError, match="Could not convert dullshinyshiny to numeric"):
623
+ grouped.agg(funcs)
624
+
625
+
626
+ @pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
627
+ def test_groupby_multiple_columns(df, op):
628
+ data = df
629
+ grouped = data.groupby(["A", "B"])
630
+
631
+ result1 = op(grouped)
632
+
633
+ keys = []
634
+ values = []
635
+ for n1, gp1 in data.groupby("A"):
636
+ for n2, gp2 in gp1.groupby("B"):
637
+ keys.append((n1, n2))
638
+ values.append(op(gp2.loc[:, ["C", "D"]]))
639
+
640
+ mi = MultiIndex.from_tuples(keys, names=["A", "B"])
641
+ expected = pd.concat(values, axis=1).T
642
+ expected.index = mi
643
+
644
+ # a little bit crude
645
+ for col in ["C", "D"]:
646
+ result_col = op(grouped[col])
647
+ pivoted = result1[col]
648
+ exp = expected[col]
649
+ tm.assert_series_equal(result_col, exp)
650
+ tm.assert_series_equal(pivoted, exp)
651
+
652
+ # test single series works the same
653
+ result = data["C"].groupby([data["A"], data["B"]]).mean()
654
+ expected = data.groupby(["A", "B"]).mean()["C"]
655
+
656
+ tm.assert_series_equal(result, expected)
657
+
658
+
659
+ def test_as_index_select_column():
660
+ # GH 5764
661
+ df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
662
+ result = df.groupby("A", as_index=False)["B"].get_group(1)
663
+ expected = Series([2, 4], name="B")
664
+ tm.assert_series_equal(result, expected)
665
+
666
+ result = df.groupby("A", as_index=False, group_keys=True)["B"].apply(
667
+ lambda x: x.cumsum()
668
+ )
669
+ expected = Series(
670
+ [2, 6, 6], name="B", index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
671
+ )
672
+ tm.assert_series_equal(result, expected)
673
+
674
+
675
+ def test_groupby_as_index_select_column_sum_empty_df():
676
+ # GH 35246
677
+ df = DataFrame(columns=Index(["A", "B", "C"], name="alpha"))
678
+ left = df.groupby(by="A", as_index=False)["B"].sum(numeric_only=False)
679
+
680
+ expected = DataFrame(columns=df.columns[:2], index=range(0))
681
+ # GH#50744 - Columns after selection shouldn't retain names
682
+ expected.columns.names = [None]
683
+ tm.assert_frame_equal(left, expected)
684
+
685
+
686
+ def test_groupby_as_index_agg(df):
687
+ grouped = df.groupby("A", as_index=False)
688
+
689
+ # single-key
690
+
691
+ result = grouped[["C", "D"]].agg(np.mean)
692
+ expected = grouped.mean(numeric_only=True)
693
+ tm.assert_frame_equal(result, expected)
694
+
695
+ result2 = grouped.agg({"C": np.mean, "D": np.sum})
696
+ expected2 = grouped.mean(numeric_only=True)
697
+ expected2["D"] = grouped.sum()["D"]
698
+ tm.assert_frame_equal(result2, expected2)
699
+
700
+ grouped = df.groupby("A", as_index=True)
701
+
702
+ msg = r"nested renamer is not supported"
703
+ with pytest.raises(SpecificationError, match=msg):
704
+ grouped["C"].agg({"Q": np.sum})
705
+
706
+ # multi-key
707
+
708
+ grouped = df.groupby(["A", "B"], as_index=False)
709
+
710
+ result = grouped.agg(np.mean)
711
+ expected = grouped.mean()
712
+ tm.assert_frame_equal(result, expected)
713
+
714
+ result2 = grouped.agg({"C": np.mean, "D": np.sum})
715
+ expected2 = grouped.mean()
716
+ expected2["D"] = grouped.sum()["D"]
717
+ tm.assert_frame_equal(result2, expected2)
718
+
719
+ expected3 = grouped["C"].sum()
720
+ expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
721
+ result3 = grouped["C"].agg({"Q": np.sum})
722
+ tm.assert_frame_equal(result3, expected3)
723
+
724
+ # GH7115 & GH8112 & GH8582
725
+ df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
726
+ ts = Series(np.random.randint(5, 10, 50), name="jim")
727
+
728
+ gr = df.groupby(ts)
729
+ gr.nth(0) # invokes set_selection_from_grouper internally
730
+ tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
731
+
732
+ for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
733
+ gr = df.groupby(ts, as_index=False)
734
+ left = getattr(gr, attr)()
735
+
736
+ gr = df.groupby(ts.values, as_index=True)
737
+ right = getattr(gr, attr)().reset_index(drop=True)
738
+
739
+ tm.assert_frame_equal(left, right)
740
+
741
+
742
+ def test_ops_not_as_index(reduction_func):
743
+ # GH 10355, 21090
744
+ # Using as_index=False should not modify grouped column
745
+
746
+ if reduction_func in ("corrwith", "nth", "ngroup"):
747
+ pytest.skip(f"GH 5755: Test not applicable for {reduction_func}")
748
+
749
+ df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
750
+ expected = getattr(df.groupby("a"), reduction_func)()
751
+ if reduction_func == "size":
752
+ expected = expected.rename("size")
753
+ expected = expected.reset_index()
754
+
755
+ if reduction_func != "size":
756
+ # 32 bit compat -> groupby preserves dtype whereas reset_index casts to int64
757
+ expected["a"] = expected["a"].astype(df["a"].dtype)
758
+
759
+ g = df.groupby("a", as_index=False)
760
+
761
+ result = getattr(g, reduction_func)()
762
+ tm.assert_frame_equal(result, expected)
763
+
764
+ result = g.agg(reduction_func)
765
+ tm.assert_frame_equal(result, expected)
766
+
767
+ result = getattr(g["b"], reduction_func)()
768
+ tm.assert_frame_equal(result, expected)
769
+
770
+ result = g["b"].agg(reduction_func)
771
+ tm.assert_frame_equal(result, expected)
772
+
773
+
774
+ def test_as_index_series_return_frame(df):
775
+ grouped = df.groupby("A", as_index=False)
776
+ grouped2 = df.groupby(["A", "B"], as_index=False)
777
+
778
+ result = grouped["C"].agg(np.sum)
779
+ expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
780
+ assert isinstance(result, DataFrame)
781
+ tm.assert_frame_equal(result, expected)
782
+
783
+ result2 = grouped2["C"].agg(np.sum)
784
+ expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
785
+ assert isinstance(result2, DataFrame)
786
+ tm.assert_frame_equal(result2, expected2)
787
+
788
+ result = grouped["C"].sum()
789
+ expected = grouped.sum().loc[:, ["A", "C"]]
790
+ assert isinstance(result, DataFrame)
791
+ tm.assert_frame_equal(result, expected)
792
+
793
+ result2 = grouped2["C"].sum()
794
+ expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
795
+ assert isinstance(result2, DataFrame)
796
+ tm.assert_frame_equal(result2, expected2)
797
+
798
+
799
+ def test_as_index_series_column_slice_raises(df):
800
+ # GH15072
801
+ grouped = df.groupby("A", as_index=False)
802
+ msg = r"Column\(s\) C already selected"
803
+
804
+ with pytest.raises(IndexError, match=msg):
805
+ grouped["C"].__getitem__("D")
806
+
807
+
808
+ def test_groupby_as_index_cython(df):
809
+ data = df
810
+
811
+ # single-key
812
+ grouped = data.groupby("A", as_index=False)
813
+ result = grouped.mean(numeric_only=True)
814
+ expected = data.groupby(["A"]).mean(numeric_only=True)
815
+ expected.insert(0, "A", expected.index)
816
+ expected.index = RangeIndex(len(expected))
817
+ tm.assert_frame_equal(result, expected)
818
+
819
+ # multi-key
820
+ grouped = data.groupby(["A", "B"], as_index=False)
821
+ result = grouped.mean()
822
+ expected = data.groupby(["A", "B"]).mean()
823
+
824
+ arrays = list(zip(*expected.index.values))
825
+ expected.insert(0, "A", arrays[0])
826
+ expected.insert(1, "B", arrays[1])
827
+ expected.index = RangeIndex(len(expected))
828
+ tm.assert_frame_equal(result, expected)
829
+
830
+
831
+ def test_groupby_as_index_series_scalar(df):
832
+ grouped = df.groupby(["A", "B"], as_index=False)
833
+
834
+ # GH #421
835
+
836
+ result = grouped["C"].agg(len)
837
+ expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
838
+ tm.assert_frame_equal(result, expected)
839
+
840
+
841
+ def test_groupby_as_index_corner(df, ts):
842
+ msg = "as_index=False only valid with DataFrame"
843
+ with pytest.raises(TypeError, match=msg):
844
+ ts.groupby(lambda x: x.weekday(), as_index=False)
845
+
846
+ msg = "as_index=False only valid for axis=0"
847
+ with pytest.raises(ValueError, match=msg):
848
+ df.groupby(lambda x: x.lower(), as_index=False, axis=1)
849
+
850
+
851
+ def test_groupby_multiple_key():
852
+ df = tm.makeTimeDataFrame()
853
+ grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
854
+ agged = grouped.sum()
855
+ tm.assert_almost_equal(df.values, agged.values)
856
+
857
+ grouped = df.T.groupby(
858
+ [lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
859
+ )
860
+
861
+ agged = grouped.agg(lambda x: x.sum())
862
+ tm.assert_index_equal(agged.index, df.columns)
863
+ tm.assert_almost_equal(df.T.values, agged.values)
864
+
865
+ agged = grouped.agg(lambda x: x.sum())
866
+ tm.assert_almost_equal(df.T.values, agged.values)
867
+
868
+
869
+ def test_groupby_multi_corner(df):
870
+ # test that having an all-NA column doesn't mess you up
871
+ df = df.copy()
872
+ df["bad"] = np.nan
873
+ agged = df.groupby(["A", "B"]).mean()
874
+
875
+ expected = df.groupby(["A", "B"]).mean()
876
+ expected["bad"] = np.nan
877
+
878
+ tm.assert_frame_equal(agged, expected)
879
+
880
+
881
+ def test_raises_on_nuisance(df):
882
+ grouped = df.groupby("A")
883
+ with pytest.raises(TypeError, match="Could not convert"):
884
+ grouped.agg(np.mean)
885
+ with pytest.raises(TypeError, match="Could not convert"):
886
+ grouped.mean()
887
+
888
+ df = df.loc[:, ["A", "C", "D"]]
889
+ df["E"] = datetime.now()
890
+ grouped = df.groupby("A")
891
+ msg = "datetime64 type does not support sum operations"
892
+ with pytest.raises(TypeError, match=msg):
893
+ grouped.agg(np.sum)
894
+ with pytest.raises(TypeError, match=msg):
895
+ grouped.sum()
896
+
897
+ # won't work with axis = 1
898
+ grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
899
+ msg = "does not support reduction 'sum'"
900
+ with pytest.raises(TypeError, match=msg):
901
+ grouped.agg(lambda x: x.sum(0, numeric_only=False))
902
+
903
+
904
+ @pytest.mark.parametrize(
905
+ "agg_function",
906
+ ["max", "min"],
907
+ )
908
+ def test_keep_nuisance_agg(df, agg_function):
909
+ # GH 38815
910
+ grouped = df.groupby("A")
911
+ result = getattr(grouped, agg_function)()
912
+ expected = result.copy()
913
+ expected.loc["bar", "B"] = getattr(df.loc[df["A"] == "bar", "B"], agg_function)()
914
+ expected.loc["foo", "B"] = getattr(df.loc[df["A"] == "foo", "B"], agg_function)()
915
+ tm.assert_frame_equal(result, expected)
916
+
917
+
918
+ @pytest.mark.parametrize(
919
+ "agg_function",
920
+ ["sum", "mean", "prod", "std", "var", "sem", "median"],
921
+ )
922
+ @pytest.mark.parametrize("numeric_only", [True, False])
923
+ def test_omit_nuisance_agg(df, agg_function, numeric_only):
924
+ # GH 38774, GH 38815
925
+ grouped = df.groupby("A")
926
+
927
+ no_drop_nuisance = ("var", "std", "sem", "mean", "prod", "median")
928
+ if agg_function in no_drop_nuisance and not numeric_only:
929
+ # Added numeric_only as part of GH#46560; these do not drop nuisance
930
+ # columns when numeric_only is False
931
+ klass = ValueError if agg_function in ("std", "sem") else TypeError
932
+ msg = "|".join(["[C|c]ould not convert", "can't multiply sequence"])
933
+ with pytest.raises(klass, match=msg):
934
+ getattr(grouped, agg_function)(numeric_only=numeric_only)
935
+ else:
936
+ result = getattr(grouped, agg_function)(numeric_only=numeric_only)
937
+ if not numeric_only and agg_function == "sum":
938
+ # sum is successful on column B
939
+ columns = ["A", "B", "C", "D"]
940
+ else:
941
+ columns = ["A", "C", "D"]
942
+ expected = getattr(df.loc[:, columns].groupby("A"), agg_function)(
943
+ numeric_only=numeric_only
944
+ )
945
+ tm.assert_frame_equal(result, expected)
946
+
947
+
948
+ def test_raise_on_nuisance_python_single(df):
949
+ # GH 38815
950
+ grouped = df.groupby("A")
951
+ with pytest.raises(TypeError, match="could not convert"):
952
+ grouped.skew()
953
+
954
+
955
+ def test_raise_on_nuisance_python_multiple(three_group):
956
+ grouped = three_group.groupby(["A", "B"])
957
+ with pytest.raises(TypeError, match="Could not convert"):
958
+ grouped.agg(np.mean)
959
+ with pytest.raises(TypeError, match="Could not convert"):
960
+ grouped.mean()
961
+
962
+
963
+ def test_empty_groups_corner(mframe):
964
+ # handle empty groups
965
+ df = DataFrame(
966
+ {
967
+ "k1": np.array(["b", "b", "b", "a", "a", "a"]),
968
+ "k2": np.array(["1", "1", "1", "2", "2", "2"]),
969
+ "k3": ["foo", "bar"] * 3,
970
+ "v1": np.random.randn(6),
971
+ "v2": np.random.randn(6),
972
+ }
973
+ )
974
+
975
+ grouped = df.groupby(["k1", "k2"])
976
+ result = grouped[["v1", "v2"]].agg(np.mean)
977
+ expected = grouped.mean(numeric_only=True)
978
+ tm.assert_frame_equal(result, expected)
979
+
980
+ grouped = mframe[3:5].groupby(level=0)
981
+ agged = grouped.apply(lambda x: x.mean())
982
+ agged_A = grouped["A"].apply(np.mean)
983
+ tm.assert_series_equal(agged["A"], agged_A)
984
+ assert agged.index.name == "first"
985
+
986
+
987
+ def test_nonsense_func():
988
+ df = DataFrame([0])
989
+ msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
990
+ with pytest.raises(TypeError, match=msg):
991
+ df.groupby(lambda x: x + "foo")
992
+
993
+
994
+ def test_wrap_aggregated_output_multindex(mframe):
995
+ df = mframe.T
996
+ df["baz", "two"] = "peekaboo"
997
+
998
+ keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
999
+ with pytest.raises(TypeError, match="Could not convert"):
1000
+ df.groupby(keys).agg(np.mean)
1001
+ agged = df.drop(columns=("baz", "two")).groupby(keys).agg(np.mean)
1002
+ assert isinstance(agged.columns, MultiIndex)
1003
+
1004
+ def aggfun(ser):
1005
+ if ser.name == ("foo", "one"):
1006
+ raise TypeError("Test error message")
1007
+ return ser.sum()
1008
+
1009
+ with pytest.raises(TypeError, match="Test error message"):
1010
+ df.groupby(keys).aggregate(aggfun)
1011
+
1012
+
1013
+ def test_groupby_level_apply(mframe):
1014
+ result = mframe.groupby(level=0).count()
1015
+ assert result.index.name == "first"
1016
+ result = mframe.groupby(level=1).count()
1017
+ assert result.index.name == "second"
1018
+
1019
+ result = mframe["A"].groupby(level=0).count()
1020
+ assert result.index.name == "first"
1021
+
1022
+
1023
+ def test_groupby_level_mapper(mframe):
1024
+ deleveled = mframe.reset_index()
1025
+
1026
+ mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
1027
+ mapper1 = {"one": 0, "two": 0, "three": 1}
1028
+
1029
+ result0 = mframe.groupby(mapper0, level=0).sum()
1030
+ result1 = mframe.groupby(mapper1, level=1).sum()
1031
+
1032
+ mapped_level0 = np.array(
1033
+ [mapper0.get(x) for x in deleveled["first"]], dtype=np.int64
1034
+ )
1035
+ mapped_level1 = np.array(
1036
+ [mapper1.get(x) for x in deleveled["second"]], dtype=np.int64
1037
+ )
1038
+ expected0 = mframe.groupby(mapped_level0).sum()
1039
+ expected1 = mframe.groupby(mapped_level1).sum()
1040
+ expected0.index.name, expected1.index.name = "first", "second"
1041
+
1042
+ tm.assert_frame_equal(result0, expected0)
1043
+ tm.assert_frame_equal(result1, expected1)
1044
+
1045
+
1046
+ def test_groupby_level_nonmulti():
1047
+ # GH 1313, GH 13901
1048
+ s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
1049
+ expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
1050
+
1051
+ result = s.groupby(level=0).sum()
1052
+ tm.assert_series_equal(result, expected)
1053
+ result = s.groupby(level=[0]).sum()
1054
+ tm.assert_series_equal(result, expected)
1055
+ result = s.groupby(level=-1).sum()
1056
+ tm.assert_series_equal(result, expected)
1057
+ result = s.groupby(level=[-1]).sum()
1058
+ tm.assert_series_equal(result, expected)
1059
+
1060
+ msg = "level > 0 or level < -1 only valid with MultiIndex"
1061
+ with pytest.raises(ValueError, match=msg):
1062
+ s.groupby(level=1)
1063
+ with pytest.raises(ValueError, match=msg):
1064
+ s.groupby(level=-2)
1065
+ msg = "No group keys passed!"
1066
+ with pytest.raises(ValueError, match=msg):
1067
+ s.groupby(level=[])
1068
+ msg = "multiple levels only valid with MultiIndex"
1069
+ with pytest.raises(ValueError, match=msg):
1070
+ s.groupby(level=[0, 0])
1071
+ with pytest.raises(ValueError, match=msg):
1072
+ s.groupby(level=[0, 1])
1073
+ msg = "level > 0 or level < -1 only valid with MultiIndex"
1074
+ with pytest.raises(ValueError, match=msg):
1075
+ s.groupby(level=[1])
1076
+
1077
+
1078
+ def test_groupby_complex():
1079
+ # GH 12902
1080
+ a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
1081
+ expected = Series((1 + 2j, 5 + 10j))
1082
+
1083
+ result = a.groupby(level=0).sum()
1084
+ tm.assert_series_equal(result, expected)
1085
+
1086
+
1087
+ def test_groupby_complex_numbers():
1088
+ # GH 17927
1089
+ df = DataFrame(
1090
+ [
1091
+ {"a": 1, "b": 1 + 1j},
1092
+ {"a": 1, "b": 1 + 2j},
1093
+ {"a": 4, "b": 1},
1094
+ ]
1095
+ )
1096
+ expected = DataFrame(
1097
+ np.array([1, 1, 1], dtype=np.int64),
1098
+ index=Index([(1 + 1j), (1 + 2j), (1 + 0j)], name="b"),
1099
+ columns=Index(["a"], dtype="object"),
1100
+ )
1101
+ result = df.groupby("b", sort=False).count()
1102
+ tm.assert_frame_equal(result, expected)
1103
+
1104
+ # Sorted by the magnitude of the complex numbers
1105
+ expected.index = Index([(1 + 0j), (1 + 1j), (1 + 2j)], name="b")
1106
+ result = df.groupby("b", sort=True).count()
1107
+ tm.assert_frame_equal(result, expected)
1108
+
1109
+
1110
+ def test_groupby_series_indexed_differently():
1111
+ s1 = Series(
1112
+ [5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
1113
+ index=Index(["a", "b", "c", "d", "e", "f", "g"]),
1114
+ )
1115
+ s2 = Series(
1116
+ [1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
1117
+ )
1118
+
1119
+ grouped = s1.groupby(s2)
1120
+ agged = grouped.mean()
1121
+ exp = s1.groupby(s2.reindex(s1.index).get).mean()
1122
+ tm.assert_series_equal(agged, exp)
1123
+
1124
+
1125
+ def test_groupby_with_hier_columns():
1126
+ tuples = list(
1127
+ zip(
1128
+ *[
1129
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
1130
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
1131
+ ]
1132
+ )
1133
+ )
1134
+ index = MultiIndex.from_tuples(tuples)
1135
+ columns = MultiIndex.from_tuples(
1136
+ [("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
1137
+ )
1138
+ df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
1139
+
1140
+ result = df.groupby(level=0).mean()
1141
+ tm.assert_index_equal(result.columns, columns)
1142
+
1143
+ result = df.groupby(level=0, axis=1).mean()
1144
+ tm.assert_index_equal(result.index, df.index)
1145
+
1146
+ result = df.groupby(level=0).agg(np.mean)
1147
+ tm.assert_index_equal(result.columns, columns)
1148
+
1149
+ result = df.groupby(level=0).apply(lambda x: x.mean())
1150
+ tm.assert_index_equal(result.columns, columns)
1151
+
1152
+ result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
1153
+ tm.assert_index_equal(result.columns, Index(["A", "B"]))
1154
+ tm.assert_index_equal(result.index, df.index)
1155
+
1156
+ # add a nuisance column
1157
+ sorted_columns, _ = columns.sortlevel(0)
1158
+ df["A", "foo"] = "bar"
1159
+ result = df.groupby(level=0).mean(numeric_only=True)
1160
+ tm.assert_index_equal(result.columns, df.columns[:-1])
1161
+
1162
+
1163
+ def test_grouping_ndarray(df):
1164
+ grouped = df.groupby(df["A"].values)
1165
+ result = grouped.sum()
1166
+ expected = df.groupby(df["A"].rename(None)).sum()
1167
+ tm.assert_frame_equal(result, expected)
1168
+
1169
+
1170
+ def test_groupby_wrong_multi_labels():
1171
+ index = Index([0, 1, 2, 3, 4], name="index")
1172
+ data = DataFrame(
1173
+ {
1174
+ "foo": ["foo1", "foo1", "foo2", "foo1", "foo3"],
1175
+ "bar": ["bar1", "bar2", "bar2", "bar1", "bar1"],
1176
+ "baz": ["baz1", "baz1", "baz1", "baz2", "baz2"],
1177
+ "spam": ["spam2", "spam3", "spam2", "spam1", "spam1"],
1178
+ "data": [20, 30, 40, 50, 60],
1179
+ },
1180
+ index=index,
1181
+ )
1182
+
1183
+ grouped = data.groupby(["foo", "bar", "baz", "spam"])
1184
+
1185
+ result = grouped.agg(np.mean)
1186
+ expected = grouped.mean()
1187
+ tm.assert_frame_equal(result, expected)
1188
+
1189
+
1190
+ def test_groupby_series_with_name(df):
1191
+ result = df.groupby(df["A"]).mean(numeric_only=True)
1192
+ result2 = df.groupby(df["A"], as_index=False).mean(numeric_only=True)
1193
+ assert result.index.name == "A"
1194
+ assert "A" in result2
1195
+
1196
+ result = df.groupby([df["A"], df["B"]]).mean()
1197
+ result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
1198
+ assert result.index.names == ("A", "B")
1199
+ assert "A" in result2
1200
+ assert "B" in result2
1201
+
1202
+
1203
+ def test_seriesgroupby_name_attr(df):
1204
+ # GH 6265
1205
+ result = df.groupby("A")["C"]
1206
+ assert result.count().name == "C"
1207
+ assert result.mean().name == "C"
1208
+
1209
+ testFunc = lambda x: np.sum(x) * 2
1210
+ assert result.agg(testFunc).name == "C"
1211
+
1212
+
1213
+ def test_consistency_name():
1214
+ # GH 12363
1215
+
1216
+ df = DataFrame(
1217
+ {
1218
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
1219
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
1220
+ "C": np.random.randn(8) + 1.0,
1221
+ "D": np.arange(8),
1222
+ }
1223
+ )
1224
+
1225
+ expected = df.groupby(["A"]).B.count()
1226
+ result = df.B.groupby(df.A).count()
1227
+ tm.assert_series_equal(result, expected)
1228
+
1229
+
1230
+ def test_groupby_name_propagation(df):
1231
+ # GH 6124
1232
+ def summarize(df, name=None):
1233
+ return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
1234
+
1235
+ def summarize_random_name(df):
1236
+ # Provide a different name for each Series. In this case, groupby
1237
+ # should not attempt to propagate the Series name since they are
1238
+ # inconsistent.
1239
+ return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
1240
+
1241
+ metrics = df.groupby("A").apply(summarize)
1242
+ assert metrics.columns.name is None
1243
+ metrics = df.groupby("A").apply(summarize, "metrics")
1244
+ assert metrics.columns.name == "metrics"
1245
+ metrics = df.groupby("A").apply(summarize_random_name)
1246
+ assert metrics.columns.name is None
1247
+
1248
+
1249
+ def test_groupby_nonstring_columns():
1250
+ df = DataFrame([np.arange(10) for x in range(10)])
1251
+ grouped = df.groupby(0)
1252
+ result = grouped.mean()
1253
+ expected = df.groupby(df[0]).mean()
1254
+ tm.assert_frame_equal(result, expected)
1255
+
1256
+
1257
+ def test_groupby_mixed_type_columns():
1258
+ # GH 13432, unorderable types in py3
1259
+ df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
1260
+ expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
1261
+
1262
+ result = df.groupby("A").first()
1263
+ tm.assert_frame_equal(result, expected)
1264
+
1265
+ result = df.groupby("A").sum()
1266
+ tm.assert_frame_equal(result, expected)
1267
+
1268
+
1269
+ def test_cython_grouper_series_bug_noncontig():
1270
+ arr = np.empty((100, 100))
1271
+ arr.fill(np.nan)
1272
+ obj = Series(arr[:, 0])
1273
+ inds = np.tile(range(10), 10)
1274
+
1275
+ result = obj.groupby(inds).agg(Series.median)
1276
+ assert result.isna().all()
1277
+
1278
+
1279
+ def test_series_grouper_noncontig_index():
1280
+ index = Index(tm.rands_array(10, 100))
1281
+
1282
+ values = Series(np.random.randn(50), index=index[::2])
1283
+ labels = np.random.randint(0, 5, 50)
1284
+
1285
+ # it works!
1286
+ grouped = values.groupby(labels)
1287
+
1288
+ # accessing the index elements causes segfault
1289
+ f = lambda x: len(set(map(id, x.index)))
1290
+ grouped.agg(f)
1291
+
1292
+
1293
+ def test_convert_objects_leave_decimal_alone():
1294
+ s = Series(range(5))
1295
+ labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
1296
+
1297
+ def convert_fast(x):
1298
+ return Decimal(str(x.mean()))
1299
+
1300
+ def convert_force_pure(x):
1301
+ # base will be length 0
1302
+ assert len(x.values.base) > 0
1303
+ return Decimal(str(x.mean()))
1304
+
1305
+ grouped = s.groupby(labels)
1306
+
1307
+ result = grouped.agg(convert_fast)
1308
+ assert result.dtype == np.object_
1309
+ assert isinstance(result[0], Decimal)
1310
+
1311
+ result = grouped.agg(convert_force_pure)
1312
+ assert result.dtype == np.object_
1313
+ assert isinstance(result[0], Decimal)
1314
+
1315
+
1316
+ def test_groupby_dtype_inference_empty():
1317
+ # GH 6733
1318
+ df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
1319
+ assert df["x"].dtype == np.float64
1320
+
1321
+ result = df.groupby("x").first()
1322
+ exp_index = Index([], name="x", dtype=np.float64)
1323
+ expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
1324
+ tm.assert_frame_equal(result, expected, by_blocks=True)
1325
+
1326
+
1327
+ def test_groupby_unit64_float_conversion():
1328
+ #  GH: 30859 groupby converts unit64 to floats sometimes
1329
+ df = DataFrame({"first": [1], "second": [1], "value": [16148277970000000000]})
1330
+ result = df.groupby(["first", "second"])["value"].max()
1331
+ expected = Series(
1332
+ [16148277970000000000],
1333
+ MultiIndex.from_product([[1], [1]], names=["first", "second"]),
1334
+ name="value",
1335
+ )
1336
+ tm.assert_series_equal(result, expected)
1337
+
1338
+
1339
+ def test_groupby_list_infer_array_like(df):
1340
+ result = df.groupby(list(df["A"])).mean(numeric_only=True)
1341
+ expected = df.groupby(df["A"]).mean(numeric_only=True)
1342
+ tm.assert_frame_equal(result, expected, check_names=False)
1343
+
1344
+ with pytest.raises(KeyError, match=r"^'foo'$"):
1345
+ df.groupby(list(df["A"][:-1]))
1346
+
1347
+ # pathological case of ambiguity
1348
+ df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
1349
+
1350
+ result = df.groupby(["foo", "bar"]).mean()
1351
+ expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
1352
+
1353
+
1354
+ def test_groupby_keys_same_size_as_index():
1355
+ # GH 11185
1356
+ freq = "s"
1357
+ index = date_range(
1358
+ start=Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
1359
+ )
1360
+ df = DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
1361
+ result = df.groupby([Grouper(level=0, freq=freq), "metric"]).mean()
1362
+ expected = df.set_index([df.index, "metric"]).astype(float)
1363
+
1364
+ tm.assert_frame_equal(result, expected)
1365
+
1366
+
1367
+ def test_groupby_one_row():
1368
+ # GH 11741
1369
+ msg = r"^'Z'$"
1370
+ df1 = DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
1371
+ with pytest.raises(KeyError, match=msg):
1372
+ df1.groupby("Z")
1373
+ df2 = DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
1374
+ with pytest.raises(KeyError, match=msg):
1375
+ df2.groupby("Z")
1376
+
1377
+
1378
+ def test_groupby_nat_exclude():
1379
+ # GH 6992
1380
+ df = DataFrame(
1381
+ {
1382
+ "values": np.random.randn(8),
1383
+ "dt": [
1384
+ np.nan,
1385
+ Timestamp("2013-01-01"),
1386
+ np.nan,
1387
+ Timestamp("2013-02-01"),
1388
+ np.nan,
1389
+ Timestamp("2013-02-01"),
1390
+ np.nan,
1391
+ Timestamp("2013-01-01"),
1392
+ ],
1393
+ "str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
1394
+ }
1395
+ )
1396
+ grouped = df.groupby("dt")
1397
+
1398
+ expected = [Index([1, 7]), Index([3, 5])]
1399
+ keys = sorted(grouped.groups.keys())
1400
+ assert len(keys) == 2
1401
+ for k, e in zip(keys, expected):
1402
+ # grouped.groups keys are np.datetime64 with system tz
1403
+ # not to be affected by tz, only compare values
1404
+ tm.assert_index_equal(grouped.groups[k], e)
1405
+
1406
+ # confirm obj is not filtered
1407
+ tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
1408
+ assert grouped.ngroups == 2
1409
+
1410
+ expected = {
1411
+ Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
1412
+ Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
1413
+ }
1414
+
1415
+ for k in grouped.indices:
1416
+ tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
1417
+
1418
+ tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]])
1419
+ tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]])
1420
+
1421
+ with pytest.raises(KeyError, match=r"^NaT$"):
1422
+ grouped.get_group(pd.NaT)
1423
+
1424
+ nan_df = DataFrame(
1425
+ {"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]}
1426
+ )
1427
+ assert nan_df["nan"].dtype == "float64"
1428
+ assert nan_df["nat"].dtype == "datetime64[ns]"
1429
+
1430
+ for key in ["nan", "nat"]:
1431
+ grouped = nan_df.groupby(key)
1432
+ assert grouped.groups == {}
1433
+ assert grouped.ngroups == 0
1434
+ assert grouped.indices == {}
1435
+ with pytest.raises(KeyError, match=r"^nan$"):
1436
+ grouped.get_group(np.nan)
1437
+ with pytest.raises(KeyError, match=r"^NaT$"):
1438
+ grouped.get_group(pd.NaT)
1439
+
1440
+
1441
+ def test_groupby_two_group_keys_all_nan():
1442
+ # GH #36842: Grouping over two group keys shouldn't raise an error
1443
+ df = DataFrame({"a": [np.nan, np.nan], "b": [np.nan, np.nan], "c": [1, 2]})
1444
+ result = df.groupby(["a", "b"]).indices
1445
+ assert result == {}
1446
+
1447
+
1448
+ def test_groupby_2d_malformed():
1449
+ d = DataFrame(index=range(2))
1450
+ d["group"] = ["g1", "g2"]
1451
+ d["zeros"] = [0, 0]
1452
+ d["ones"] = [1, 1]
1453
+ d["label"] = ["l1", "l2"]
1454
+ tmp = d.groupby(["group"]).mean(numeric_only=True)
1455
+ res_values = np.array([[0.0, 1.0], [0.0, 1.0]])
1456
+ tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
1457
+ tm.assert_numpy_array_equal(tmp.values, res_values)
1458
+
1459
+
1460
+ def test_int32_overflow():
1461
+ B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))
1462
+ A = np.arange(25000)
1463
+ df = DataFrame({"A": A, "B": B, "C": A, "D": B, "E": np.random.randn(25000)})
1464
+
1465
+ left = df.groupby(["A", "B", "C", "D"]).sum()
1466
+ right = df.groupby(["D", "C", "B", "A"]).sum()
1467
+ assert len(left) == len(right)
1468
+
1469
+
1470
+ def test_groupby_sort_multi():
1471
+ df = DataFrame(
1472
+ {
1473
+ "a": ["foo", "bar", "baz"],
1474
+ "b": [3, 2, 1],
1475
+ "c": [0, 1, 2],
1476
+ "d": np.random.randn(3),
1477
+ }
1478
+ )
1479
+
1480
+ tups = [tuple(row) for row in df[["a", "b", "c"]].values]
1481
+ tups = com.asarray_tuplesafe(tups)
1482
+ result = df.groupby(["a", "b", "c"], sort=True).sum()
1483
+ tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
1484
+
1485
+ tups = [tuple(row) for row in df[["c", "a", "b"]].values]
1486
+ tups = com.asarray_tuplesafe(tups)
1487
+ result = df.groupby(["c", "a", "b"], sort=True).sum()
1488
+ tm.assert_numpy_array_equal(result.index.values, tups)
1489
+
1490
+ tups = [tuple(x) for x in df[["b", "c", "a"]].values]
1491
+ tups = com.asarray_tuplesafe(tups)
1492
+ result = df.groupby(["b", "c", "a"], sort=True).sum()
1493
+ tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
1494
+
1495
+ df = DataFrame(
1496
+ {"a": [0, 1, 2, 0, 1, 2], "b": [0, 0, 0, 1, 1, 1], "d": np.random.randn(6)}
1497
+ )
1498
+ grouped = df.groupby(["a", "b"])["d"]
1499
+ result = grouped.sum()
1500
+
1501
+ def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
1502
+ tups = [tuple(row) for row in df[keys].values]
1503
+ tups = com.asarray_tuplesafe(tups)
1504
+ expected = f(df.groupby(tups)[field])
1505
+ for k, v in expected.items():
1506
+ assert result[k] == v
1507
+
1508
+ _check_groupby(df, result, ["a", "b"], "d")
1509
+
1510
+
1511
+ def test_dont_clobber_name_column():
1512
+ df = DataFrame(
1513
+ {"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
1514
+ )
1515
+
1516
+ result = df.groupby("key", group_keys=False).apply(lambda x: x)
1517
+ tm.assert_frame_equal(result, df)
1518
+
1519
+
1520
+ def test_skip_group_keys():
1521
+ tsf = tm.makeTimeDataFrame()
1522
+
1523
+ grouped = tsf.groupby(lambda x: x.month, group_keys=False)
1524
+ result = grouped.apply(lambda x: x.sort_values(by="A")[:3])
1525
+
1526
+ pieces = [group.sort_values(by="A")[:3] for key, group in grouped]
1527
+
1528
+ expected = pd.concat(pieces)
1529
+ tm.assert_frame_equal(result, expected)
1530
+
1531
+ grouped = tsf["A"].groupby(lambda x: x.month, group_keys=False)
1532
+ result = grouped.apply(lambda x: x.sort_values()[:3])
1533
+
1534
+ pieces = [group.sort_values()[:3] for key, group in grouped]
1535
+
1536
+ expected = pd.concat(pieces)
1537
+ tm.assert_series_equal(result, expected)
1538
+
1539
+
1540
+ def test_no_nonsense_name(float_frame):
1541
+ # GH #995
1542
+ s = float_frame["C"].copy()
1543
+ s.name = None
1544
+
1545
+ result = s.groupby(float_frame["A"]).agg(np.sum)
1546
+ assert result.name is None
1547
+
1548
+
1549
+ def test_multifunc_sum_bug():
1550
+ # GH #1065
1551
+ x = DataFrame(np.arange(9).reshape(3, 3))
1552
+ x["test"] = 0
1553
+ x["fl"] = [1.3, 1.5, 1.6]
1554
+
1555
+ grouped = x.groupby("test")
1556
+ result = grouped.agg({"fl": "sum", 2: "size"})
1557
+ assert result["fl"].dtype == np.float64
1558
+
1559
+
1560
+ def test_handle_dict_return_value(df):
1561
+ def f(group):
1562
+ return {"max": group.max(), "min": group.min()}
1563
+
1564
+ def g(group):
1565
+ return Series({"max": group.max(), "min": group.min()})
1566
+
1567
+ result = df.groupby("A")["C"].apply(f)
1568
+ expected = df.groupby("A")["C"].apply(g)
1569
+
1570
+ assert isinstance(result, Series)
1571
+ tm.assert_series_equal(result, expected)
1572
+
1573
+
1574
+ @pytest.mark.parametrize("grouper", ["A", ["A", "B"]])
1575
+ def test_set_group_name(df, grouper):
1576
+ def f(group):
1577
+ assert group.name is not None
1578
+ return group
1579
+
1580
+ def freduce(group):
1581
+ assert group.name is not None
1582
+ return group.sum()
1583
+
1584
+ def freducex(x):
1585
+ return freduce(x)
1586
+
1587
+ grouped = df.groupby(grouper, group_keys=False)
1588
+
1589
+ # make sure all these work
1590
+ grouped.apply(f)
1591
+ grouped.aggregate(freduce)
1592
+ grouped.aggregate({"C": freduce, "D": freduce})
1593
+ grouped.transform(f)
1594
+
1595
+ grouped["C"].apply(f)
1596
+ grouped["C"].aggregate(freduce)
1597
+ grouped["C"].aggregate([freduce, freducex])
1598
+ grouped["C"].transform(f)
1599
+
1600
+
1601
+ def test_group_name_available_in_inference_pass():
1602
+ # gh-15062
1603
+ df = DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)})
1604
+
1605
+ names = []
1606
+
1607
+ def f(group):
1608
+ names.append(group.name)
1609
+ return group.copy()
1610
+
1611
+ df.groupby("a", sort=False, group_keys=False).apply(f)
1612
+
1613
+ expected_names = [0, 1, 2]
1614
+ assert names == expected_names
1615
+
1616
+
1617
+ def test_no_dummy_key_names(df):
1618
+ # see gh-1291
1619
+ result = df.groupby(df["A"].values).sum()
1620
+ assert result.index.name is None
1621
+
1622
+ result = df.groupby([df["A"].values, df["B"].values]).sum()
1623
+ assert result.index.names == (None, None)
1624
+
1625
+
1626
+ def test_groupby_sort_multiindex_series():
1627
+ # series multiindex groupby sort argument was not being passed through
1628
+ # _compress_group_index
1629
+ # GH 9444
1630
+ index = MultiIndex(
1631
+ levels=[[1, 2], [1, 2]],
1632
+ codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
1633
+ names=["a", "b"],
1634
+ )
1635
+ mseries = Series([0, 1, 2, 3, 4, 5], index=index)
1636
+ index = MultiIndex(
1637
+ levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=["a", "b"]
1638
+ )
1639
+ mseries_result = Series([0, 2, 4], index=index)
1640
+
1641
+ result = mseries.groupby(level=["a", "b"], sort=False).first()
1642
+ tm.assert_series_equal(result, mseries_result)
1643
+ result = mseries.groupby(level=["a", "b"], sort=True).first()
1644
+ tm.assert_series_equal(result, mseries_result.sort_index())
1645
+
1646
+
1647
+ def test_groupby_reindex_inside_function():
1648
+ periods = 1000
1649
+ ind = date_range(start="2012/1/1", freq="5min", periods=periods)
1650
+ df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind)
1651
+
1652
+ def agg_before(func, fix=False):
1653
+ """
1654
+ Run an aggregate func on the subset of data.
1655
+ """
1656
+
1657
+ def _func(data):
1658
+ d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()
1659
+ if fix:
1660
+ data[data.index[0]]
1661
+ if len(d) == 0:
1662
+ return None
1663
+ return func(d)
1664
+
1665
+ return _func
1666
+
1667
+ grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
1668
+ closure_bad = grouped.agg({"high": agg_before(np.max)})
1669
+ closure_good = grouped.agg({"high": agg_before(np.max, True)})
1670
+
1671
+ tm.assert_frame_equal(closure_bad, closure_good)
1672
+
1673
+
1674
+ def test_groupby_multiindex_missing_pair():
1675
+ # GH9049
1676
+ df = DataFrame(
1677
+ {
1678
+ "group1": ["a", "a", "a", "b"],
1679
+ "group2": ["c", "c", "d", "c"],
1680
+ "value": [1, 1, 1, 5],
1681
+ }
1682
+ )
1683
+ df = df.set_index(["group1", "group2"])
1684
+ df_grouped = df.groupby(level=["group1", "group2"], sort=True)
1685
+
1686
+ res = df_grouped.agg("sum")
1687
+ idx = MultiIndex.from_tuples(
1688
+ [("a", "c"), ("a", "d"), ("b", "c")], names=["group1", "group2"]
1689
+ )
1690
+ exp = DataFrame([[2], [1], [5]], index=idx, columns=["value"])
1691
+
1692
+ tm.assert_frame_equal(res, exp)
1693
+
1694
+
1695
+ def test_groupby_multiindex_not_lexsorted():
1696
+ # GH 11640
1697
+
1698
+ # define the lexsorted version
1699
+ lexsorted_mi = MultiIndex.from_tuples(
1700
+ [("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"]
1701
+ )
1702
+ lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
1703
+ assert lexsorted_df.columns._is_lexsorted()
1704
+
1705
+ # define the non-lexsorted version
1706
+ not_lexsorted_df = DataFrame(
1707
+ columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
1708
+ )
1709
+ not_lexsorted_df = not_lexsorted_df.pivot_table(
1710
+ index="a", columns=["b", "c"], values="d"
1711
+ )
1712
+ not_lexsorted_df = not_lexsorted_df.reset_index()
1713
+ assert not not_lexsorted_df.columns._is_lexsorted()
1714
+
1715
+ # compare the results
1716
+ tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
1717
+
1718
+ expected = lexsorted_df.groupby("a").mean()
1719
+ with tm.assert_produces_warning(PerformanceWarning):
1720
+ result = not_lexsorted_df.groupby("a").mean()
1721
+ tm.assert_frame_equal(expected, result)
1722
+
1723
+ # a transforming function should work regardless of sort
1724
+ # GH 14776
1725
+ df = DataFrame(
1726
+ {"x": ["a", "a", "b", "a"], "y": [1, 1, 2, 2], "z": [1, 2, 3, 4]}
1727
+ ).set_index(["x", "y"])
1728
+ assert not df.index._is_lexsorted()
1729
+
1730
+ for level in [0, 1, [0, 1]]:
1731
+ for sort in [False, True]:
1732
+ result = df.groupby(level=level, sort=sort, group_keys=False).apply(
1733
+ DataFrame.drop_duplicates
1734
+ )
1735
+ expected = df
1736
+ tm.assert_frame_equal(expected, result)
1737
+
1738
+ result = (
1739
+ df.sort_index()
1740
+ .groupby(level=level, sort=sort, group_keys=False)
1741
+ .apply(DataFrame.drop_duplicates)
1742
+ )
1743
+ expected = df.sort_index()
1744
+ tm.assert_frame_equal(expected, result)
1745
+
1746
+
1747
+ def test_index_label_overlaps_location():
1748
+ # checking we don't have any label/location confusion in the
1749
+ # wake of GH5375
1750
+ df = DataFrame(list("ABCDE"), index=[2, 0, 2, 1, 1])
1751
+ g = df.groupby(list("ababb"))
1752
+ actual = g.filter(lambda x: len(x) > 2)
1753
+ expected = df.iloc[[1, 3, 4]]
1754
+ tm.assert_frame_equal(actual, expected)
1755
+
1756
+ ser = df[0]
1757
+ g = ser.groupby(list("ababb"))
1758
+ actual = g.filter(lambda x: len(x) > 2)
1759
+ expected = ser.take([1, 3, 4])
1760
+ tm.assert_series_equal(actual, expected)
1761
+
1762
+ # and again, with a generic Index of floats
1763
+ df.index = df.index.astype(float)
1764
+ g = df.groupby(list("ababb"))
1765
+ actual = g.filter(lambda x: len(x) > 2)
1766
+ expected = df.iloc[[1, 3, 4]]
1767
+ tm.assert_frame_equal(actual, expected)
1768
+
1769
+ ser = df[0]
1770
+ g = ser.groupby(list("ababb"))
1771
+ actual = g.filter(lambda x: len(x) > 2)
1772
+ expected = ser.take([1, 3, 4])
1773
+ tm.assert_series_equal(actual, expected)
1774
+
1775
+
1776
+ def test_transform_doesnt_clobber_ints():
1777
+ # GH 7972
1778
+ n = 6
1779
+ x = np.arange(n)
1780
+ df = DataFrame({"a": x // 2, "b": 2.0 * x, "c": 3.0 * x})
1781
+ df2 = DataFrame({"a": x // 2 * 1.0, "b": 2.0 * x, "c": 3.0 * x})
1782
+
1783
+ gb = df.groupby("a")
1784
+ result = gb.transform("mean")
1785
+
1786
+ gb2 = df2.groupby("a")
1787
+ expected = gb2.transform("mean")
1788
+ tm.assert_frame_equal(result, expected)
1789
+
1790
+
1791
+ @pytest.mark.parametrize(
1792
+ "sort_column",
1793
+ ["ints", "floats", "strings", ["ints", "floats"], ["ints", "strings"]],
1794
+ )
1795
+ @pytest.mark.parametrize(
1796
+ "group_column", ["int_groups", "string_groups", ["int_groups", "string_groups"]]
1797
+ )
1798
+ def test_groupby_preserves_sort(sort_column, group_column):
1799
+ # Test to ensure that groupby always preserves sort order of original
1800
+ # object. Issue #8588 and #9651
1801
+
1802
+ df = DataFrame(
1803
+ {
1804
+ "int_groups": [3, 1, 0, 1, 0, 3, 3, 3],
1805
+ "string_groups": ["z", "a", "z", "a", "a", "g", "g", "g"],
1806
+ "ints": [8, 7, 4, 5, 2, 9, 1, 1],
1807
+ "floats": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
1808
+ "strings": ["z", "d", "a", "e", "word", "word2", "42", "47"],
1809
+ }
1810
+ )
1811
+
1812
+ # Try sorting on different types and with different group types
1813
+
1814
+ df = df.sort_values(by=sort_column)
1815
+ g = df.groupby(group_column)
1816
+
1817
+ def test_sort(x):
1818
+ tm.assert_frame_equal(x, x.sort_values(by=sort_column))
1819
+
1820
+ g.apply(test_sort)
1821
+
1822
+
1823
+ def test_pivot_table_values_key_error():
1824
+ # This test is designed to replicate the error in issue #14938
1825
+ df = DataFrame(
1826
+ {
1827
+ "eventDate": date_range(datetime.today(), periods=20, freq="M").tolist(),
1828
+ "thename": range(0, 20),
1829
+ }
1830
+ )
1831
+
1832
+ df["year"] = df.set_index("eventDate").index.year
1833
+ df["month"] = df.set_index("eventDate").index.month
1834
+
1835
+ with pytest.raises(KeyError, match="'badname'"):
1836
+ df.reset_index().pivot_table(
1837
+ index="year", columns="month", values="badname", aggfunc="count"
1838
+ )
1839
+
1840
+
1841
+ @pytest.mark.parametrize("columns", ["C", ["C"]])
1842
+ @pytest.mark.parametrize("keys", [["A"], ["A", "B"]])
1843
+ @pytest.mark.parametrize(
1844
+ "values",
1845
+ [
1846
+ [True],
1847
+ [0],
1848
+ [0.0],
1849
+ ["a"],
1850
+ Categorical([0]),
1851
+ [to_datetime(0)],
1852
+ date_range(0, 1, 1, tz="US/Eastern"),
1853
+ pd.period_range("2016-01-01", periods=3, freq="D"),
1854
+ pd.array([0], dtype="Int64"),
1855
+ pd.array([0], dtype="Float64"),
1856
+ pd.array([False], dtype="boolean"),
1857
+ ],
1858
+ ids=[
1859
+ "bool",
1860
+ "int",
1861
+ "float",
1862
+ "str",
1863
+ "cat",
1864
+ "dt64",
1865
+ "dt64tz",
1866
+ "period",
1867
+ "Int64",
1868
+ "Float64",
1869
+ "boolean",
1870
+ ],
1871
+ )
1872
+ @pytest.mark.parametrize("method", ["attr", "agg", "apply"])
1873
+ @pytest.mark.parametrize(
1874
+ "op", ["idxmax", "idxmin", "min", "max", "sum", "prod", "skew"]
1875
+ )
1876
+ def test_empty_groupby(
1877
+ columns, keys, values, method, op, request, using_array_manager, dropna
1878
+ ):
1879
+ # GH8093 & GH26411
1880
+ override_dtype = None
1881
+
1882
+ if (
1883
+ isinstance(values, Categorical)
1884
+ and len(keys) == 1
1885
+ and op in ["idxmax", "idxmin"]
1886
+ ):
1887
+ mark = pytest.mark.xfail(
1888
+ raises=ValueError, match="attempt to get arg(min|max) of an empty sequence"
1889
+ )
1890
+ request.node.add_marker(mark)
1891
+
1892
+ if isinstance(values, BooleanArray) and op in ["sum", "prod"]:
1893
+ # We expect to get Int64 back for these
1894
+ override_dtype = "Int64"
1895
+
1896
+ if isinstance(values[0], bool) and op in ("prod", "sum"):
1897
+ # sum/product of bools is an integer
1898
+ override_dtype = "int64"
1899
+
1900
+ df = DataFrame({"A": values, "B": values, "C": values}, columns=list("ABC"))
1901
+
1902
+ if hasattr(values, "dtype"):
1903
+ # check that we did the construction right
1904
+ assert (df.dtypes == values.dtype).all()
1905
+
1906
+ df = df.iloc[:0]
1907
+
1908
+ gb = df.groupby(keys, group_keys=False, dropna=dropna)[columns]
1909
+
1910
+ def get_result(**kwargs):
1911
+ if method == "attr":
1912
+ return getattr(gb, op)(**kwargs)
1913
+ else:
1914
+ return getattr(gb, method)(op, **kwargs)
1915
+
1916
+ def get_categorical_invalid_expected():
1917
+ # Categorical is special without 'observed=True', we get an NaN entry
1918
+ # corresponding to the unobserved group. If we passed observed=True
1919
+ # to groupby, expected would just be 'df.set_index(keys)[columns]'
1920
+ # as below
1921
+ lev = Categorical([0], dtype=values.dtype)
1922
+ if len(keys) != 1:
1923
+ idx = MultiIndex.from_product([lev, lev], names=keys)
1924
+ else:
1925
+ # all columns are dropped, but we end up with one row
1926
+ # Categorical is special without 'observed=True'
1927
+ idx = Index(lev, name=keys[0])
1928
+
1929
+ expected = DataFrame([], columns=[], index=idx)
1930
+ return expected
1931
+
1932
+ is_per = isinstance(df.dtypes[0], pd.PeriodDtype)
1933
+ is_dt64 = df.dtypes[0].kind == "M"
1934
+ is_cat = isinstance(values, Categorical)
1935
+
1936
+ if isinstance(values, Categorical) and not values.ordered and op in ["min", "max"]:
1937
+ msg = f"Cannot perform {op} with non-ordered Categorical"
1938
+ with pytest.raises(TypeError, match=msg):
1939
+ get_result()
1940
+
1941
+ if isinstance(columns, list):
1942
+ # i.e. DataframeGroupBy, not SeriesGroupBy
1943
+ result = get_result(numeric_only=True)
1944
+ expected = get_categorical_invalid_expected()
1945
+ tm.assert_equal(result, expected)
1946
+ return
1947
+
1948
+ if op in ["prod", "sum", "skew"]:
1949
+ # ops that require more than just ordered-ness
1950
+ if is_dt64 or is_cat or is_per:
1951
+ # GH#41291
1952
+ # datetime64 -> prod and sum are invalid
1953
+ if op == "skew":
1954
+ msg = "does not support reduction 'skew'"
1955
+ elif is_dt64:
1956
+ msg = "datetime64 type does not support"
1957
+ elif is_per:
1958
+ msg = "Period type does not support"
1959
+ else:
1960
+ msg = "category type does not support"
1961
+ with pytest.raises(TypeError, match=msg):
1962
+ get_result()
1963
+
1964
+ if not isinstance(columns, list):
1965
+ # i.e. SeriesGroupBy
1966
+ return
1967
+ elif op == "skew":
1968
+ # TODO: test the numeric_only=True case
1969
+ return
1970
+ else:
1971
+ # i.e. op in ["prod", "sum"]:
1972
+ # i.e. DataFrameGroupBy
1973
+ # ops that require more than just ordered-ness
1974
+ # GH#41291
1975
+ result = get_result(numeric_only=True)
1976
+
1977
+ # with numeric_only=True, these are dropped, and we get
1978
+ # an empty DataFrame back
1979
+ expected = df.set_index(keys)[[]]
1980
+ if is_cat:
1981
+ expected = get_categorical_invalid_expected()
1982
+ tm.assert_equal(result, expected)
1983
+ return
1984
+
1985
+ result = get_result()
1986
+ expected = df.set_index(keys)[columns]
1987
+ if override_dtype is not None:
1988
+ expected = expected.astype(override_dtype)
1989
+ if len(keys) == 1:
1990
+ expected.index.name = keys[0]
1991
+ tm.assert_equal(result, expected)
1992
+
1993
+
1994
+ def test_empty_groupby_apply_nonunique_columns():
1995
+ # GH#44417
1996
+ df = DataFrame(np.random.randn(0, 4))
1997
+ df[3] = df[3].astype(np.int64)
1998
+ df.columns = [0, 1, 2, 0]
1999
+ gb = df.groupby(df[1], group_keys=False)
2000
+ res = gb.apply(lambda x: x)
2001
+ assert (res.dtypes == df.dtypes).all()
2002
+
2003
+
2004
+ def test_tuple_as_grouping():
2005
+ # https://github.com/pandas-dev/pandas/issues/18314
2006
+ df = DataFrame(
2007
+ {
2008
+ ("a", "b"): [1, 1, 1, 1],
2009
+ "a": [2, 2, 2, 2],
2010
+ "b": [2, 2, 2, 2],
2011
+ "c": [1, 1, 1, 1],
2012
+ }
2013
+ )
2014
+
2015
+ with pytest.raises(KeyError, match=r"('a', 'b')"):
2016
+ df[["a", "b", "c"]].groupby(("a", "b"))
2017
+
2018
+ result = df.groupby(("a", "b"))["c"].sum()
2019
+ expected = Series([4], name="c", index=Index([1], name=("a", "b")))
2020
+ tm.assert_series_equal(result, expected)
2021
+
2022
+
2023
+ def test_tuple_correct_keyerror():
2024
+ # https://github.com/pandas-dev/pandas/issues/18798
2025
+ df = DataFrame(1, index=range(3), columns=MultiIndex.from_product([[1, 2], [3, 4]]))
2026
+ with pytest.raises(KeyError, match=r"^\(7, 8\)$"):
2027
+ df.groupby((7, 8)).mean()
2028
+
2029
+
2030
+ def test_groupby_agg_ohlc_non_first():
2031
+ # GH 21716
2032
+ df = DataFrame(
2033
+ [[1], [1]],
2034
+ columns=Index(["foo"], name="mycols"),
2035
+ index=date_range("2018-01-01", periods=2, freq="D", name="dti"),
2036
+ )
2037
+
2038
+ expected = DataFrame(
2039
+ [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],
2040
+ columns=MultiIndex.from_tuples(
2041
+ (
2042
+ ("foo", "sum", "foo"),
2043
+ ("foo", "ohlc", "open"),
2044
+ ("foo", "ohlc", "high"),
2045
+ ("foo", "ohlc", "low"),
2046
+ ("foo", "ohlc", "close"),
2047
+ ),
2048
+ names=["mycols", None, None],
2049
+ ),
2050
+ index=date_range("2018-01-01", periods=2, freq="D", name="dti"),
2051
+ )
2052
+
2053
+ result = df.groupby(Grouper(freq="D")).agg(["sum", "ohlc"])
2054
+
2055
+ tm.assert_frame_equal(result, expected)
2056
+
2057
+
2058
+ def test_groupby_multiindex_nat():
2059
+ # GH 9236
2060
+ values = [
2061
+ (pd.NaT, "a"),
2062
+ (datetime(2012, 1, 2), "a"),
2063
+ (datetime(2012, 1, 2), "b"),
2064
+ (datetime(2012, 1, 3), "a"),
2065
+ ]
2066
+ mi = MultiIndex.from_tuples(values, names=["date", None])
2067
+ ser = Series([3, 2, 2.5, 4], index=mi)
2068
+
2069
+ result = ser.groupby(level=1).mean()
2070
+ expected = Series([3.0, 2.5], index=["a", "b"])
2071
+ tm.assert_series_equal(result, expected)
2072
+
2073
+
2074
+ def test_groupby_empty_list_raises():
2075
+ # GH 5289
2076
+ values = zip(range(10), range(10))
2077
+ df = DataFrame(values, columns=["apple", "b"])
2078
+ msg = "Grouper and axis must be same length"
2079
+ with pytest.raises(ValueError, match=msg):
2080
+ df.groupby([[]])
2081
+
2082
+
2083
+ def test_groupby_multiindex_series_keys_len_equal_group_axis():
2084
+ # GH 25704
2085
+ index_array = [["x", "x"], ["a", "b"], ["k", "k"]]
2086
+ index_names = ["first", "second", "third"]
2087
+ ri = MultiIndex.from_arrays(index_array, names=index_names)
2088
+ s = Series(data=[1, 2], index=ri)
2089
+ result = s.groupby(["first", "third"]).sum()
2090
+
2091
+ index_array = [["x"], ["k"]]
2092
+ index_names = ["first", "third"]
2093
+ ei = MultiIndex.from_arrays(index_array, names=index_names)
2094
+ expected = Series([3], index=ei)
2095
+
2096
+ tm.assert_series_equal(result, expected)
2097
+
2098
+
2099
+ def test_groupby_groups_in_BaseGrouper():
2100
+ # GH 26326
2101
+ # Test if DataFrame grouped with a pandas.Grouper has correct groups
2102
+ mi = MultiIndex.from_product([["A", "B"], ["C", "D"]], names=["alpha", "beta"])
2103
+ df = DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi)
2104
+ result = df.groupby([Grouper(level="alpha"), "beta"])
2105
+ expected = df.groupby(["alpha", "beta"])
2106
+ assert result.groups == expected.groups
2107
+
2108
+ result = df.groupby(["beta", Grouper(level="alpha")])
2109
+ expected = df.groupby(["beta", "alpha"])
2110
+ assert result.groups == expected.groups
2111
+
2112
+
2113
+ @pytest.mark.parametrize("group_name", ["x", ["x"]])
2114
+ def test_groupby_axis_1(group_name):
2115
+ # GH 27614
2116
+ df = DataFrame(
2117
+ np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20]
2118
+ )
2119
+ df.index.name = "y"
2120
+ df.columns.name = "x"
2121
+
2122
+ results = df.groupby(group_name, axis=1).sum()
2123
+ expected = df.T.groupby(group_name).sum().T
2124
+ tm.assert_frame_equal(results, expected)
2125
+
2126
+ # test on MI column
2127
+ iterables = [["bar", "baz", "foo"], ["one", "two"]]
2128
+ mi = MultiIndex.from_product(iterables=iterables, names=["x", "x1"])
2129
+ df = DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi)
2130
+ results = df.groupby(group_name, axis=1).sum()
2131
+ expected = df.T.groupby(group_name).sum().T
2132
+ tm.assert_frame_equal(results, expected)
2133
+
2134
+
2135
+ @pytest.mark.parametrize(
2136
+ "op, expected",
2137
+ [
2138
+ (
2139
+ "shift",
2140
+ {
2141
+ "time": [
2142
+ None,
2143
+ None,
2144
+ Timestamp("2019-01-01 12:00:00"),
2145
+ Timestamp("2019-01-01 12:30:00"),
2146
+ None,
2147
+ None,
2148
+ ]
2149
+ },
2150
+ ),
2151
+ (
2152
+ "bfill",
2153
+ {
2154
+ "time": [
2155
+ Timestamp("2019-01-01 12:00:00"),
2156
+ Timestamp("2019-01-01 12:30:00"),
2157
+ Timestamp("2019-01-01 14:00:00"),
2158
+ Timestamp("2019-01-01 14:30:00"),
2159
+ Timestamp("2019-01-01 14:00:00"),
2160
+ Timestamp("2019-01-01 14:30:00"),
2161
+ ]
2162
+ },
2163
+ ),
2164
+ (
2165
+ "ffill",
2166
+ {
2167
+ "time": [
2168
+ Timestamp("2019-01-01 12:00:00"),
2169
+ Timestamp("2019-01-01 12:30:00"),
2170
+ Timestamp("2019-01-01 12:00:00"),
2171
+ Timestamp("2019-01-01 12:30:00"),
2172
+ Timestamp("2019-01-01 14:00:00"),
2173
+ Timestamp("2019-01-01 14:30:00"),
2174
+ ]
2175
+ },
2176
+ ),
2177
+ ],
2178
+ )
2179
+ def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):
2180
+ # GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill
2181
+ tz = tz_naive_fixture
2182
+ data = {
2183
+ "id": ["A", "B", "A", "B", "A", "B"],
2184
+ "time": [
2185
+ Timestamp("2019-01-01 12:00:00"),
2186
+ Timestamp("2019-01-01 12:30:00"),
2187
+ None,
2188
+ None,
2189
+ Timestamp("2019-01-01 14:00:00"),
2190
+ Timestamp("2019-01-01 14:30:00"),
2191
+ ],
2192
+ }
2193
+ df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz))
2194
+
2195
+ grouped = df.groupby("id")
2196
+ result = getattr(grouped, op)()
2197
+ expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz))
2198
+ tm.assert_frame_equal(result, expected)
2199
+
2200
+
2201
+ def test_groupby_only_none_group():
2202
+ # see GH21624
2203
+ # this was crashing with "ValueError: Length of passed values is 1, index implies 0"
2204
+ df = DataFrame({"g": [None], "x": 1})
2205
+ actual = df.groupby("g")["x"].transform("sum")
2206
+ expected = Series([np.nan], name="x")
2207
+
2208
+ tm.assert_series_equal(actual, expected)
2209
+
2210
+
2211
+ def test_groupby_duplicate_index():
2212
+ # GH#29189 the groupby call here used to raise
2213
+ ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
2214
+ gb = ser.groupby(level=0)
2215
+
2216
+ result = gb.mean()
2217
+ expected = Series([2, 5.5, 8], index=[2.0, 4.0, 5.0])
2218
+ tm.assert_series_equal(result, expected)
2219
+
2220
+
2221
+ def test_group_on_empty_multiindex(transformation_func, request):
2222
+ # GH 47787
2223
+ # With one row, those are transforms so the schema should be the same
2224
+ df = DataFrame(
2225
+ data=[[1, Timestamp("today"), 3, 4]],
2226
+ columns=["col_1", "col_2", "col_3", "col_4"],
2227
+ )
2228
+ df["col_3"] = df["col_3"].astype(int)
2229
+ df["col_4"] = df["col_4"].astype(int)
2230
+ df = df.set_index(["col_1", "col_2"])
2231
+ if transformation_func == "fillna":
2232
+ args = ("ffill",)
2233
+ else:
2234
+ args = ()
2235
+ result = df.iloc[:0].groupby(["col_1"]).transform(transformation_func, *args)
2236
+ expected = df.groupby(["col_1"]).transform(transformation_func, *args).iloc[:0]
2237
+ if transformation_func in ("diff", "shift"):
2238
+ expected = expected.astype(int)
2239
+ tm.assert_equal(result, expected)
2240
+
2241
+ result = (
2242
+ df["col_3"].iloc[:0].groupby(["col_1"]).transform(transformation_func, *args)
2243
+ )
2244
+ expected = (
2245
+ df["col_3"].groupby(["col_1"]).transform(transformation_func, *args).iloc[:0]
2246
+ )
2247
+ if transformation_func in ("diff", "shift"):
2248
+ expected = expected.astype(int)
2249
+ tm.assert_equal(result, expected)
2250
+
2251
+
2252
+ @pytest.mark.parametrize(
2253
+ "idx",
2254
+ [
2255
+ Index(["a", "a"], name="foo"),
2256
+ MultiIndex.from_tuples((("a", "a"), ("a", "a")), names=["foo", "bar"]),
2257
+ ],
2258
+ )
2259
+ def test_dup_labels_output_shape(groupby_func, idx):
2260
+ if groupby_func in {"size", "ngroup", "cumcount"}:
2261
+ pytest.skip(f"Not applicable for {groupby_func}")
2262
+
2263
+ df = DataFrame([[1, 1]], columns=idx)
2264
+ grp_by = df.groupby([0])
2265
+
2266
+ args = get_groupby_method_args(groupby_func, df)
2267
+ result = getattr(grp_by, groupby_func)(*args)
2268
+
2269
+ assert result.shape == (1, 2)
2270
+ tm.assert_index_equal(result.columns, idx)
2271
+
2272
+
2273
+ def test_groupby_crash_on_nunique(axis):
2274
+ # Fix following 30253
2275
+ dti = date_range("2016-01-01", periods=2, name="foo")
2276
+ df = DataFrame({("A", "B"): [1, 2], ("A", "C"): [1, 3], ("D", "B"): [0, 0]})
2277
+ df.columns.names = ("bar", "baz")
2278
+ df.index = dti
2279
+
2280
+ axis_number = df._get_axis_number(axis)
2281
+ if not axis_number:
2282
+ df = df.T
2283
+
2284
+ gb = df.groupby(axis=axis_number, level=0)
2285
+ result = gb.nunique()
2286
+
2287
+ expected = DataFrame({"A": [1, 2], "D": [1, 1]}, index=dti)
2288
+ expected.columns.name = "bar"
2289
+ if not axis_number:
2290
+ expected = expected.T
2291
+
2292
+ tm.assert_frame_equal(result, expected)
2293
+
2294
+ if axis_number == 0:
2295
+ # same thing, but empty columns
2296
+ gb2 = df[[]].groupby(axis=axis_number, level=0)
2297
+ exp = expected[[]]
2298
+ else:
2299
+ # same thing, but empty rows
2300
+ gb2 = df.loc[[]].groupby(axis=axis_number, level=0)
2301
+ # default for empty when we can't infer a dtype is float64
2302
+ exp = expected.loc[[]].astype(np.float64)
2303
+
2304
+ res = gb2.nunique()
2305
+ tm.assert_frame_equal(res, exp)
2306
+
2307
+
2308
+ def test_groupby_list_level():
2309
+ # GH 9790
2310
+ expected = DataFrame(np.arange(0, 9).reshape(3, 3), dtype=float)
2311
+ result = expected.groupby(level=[0]).mean()
2312
+ tm.assert_frame_equal(result, expected)
2313
+
2314
+
2315
+ @pytest.mark.parametrize(
2316
+ "max_seq_items, expected",
2317
+ [
2318
+ (5, "{0: [0], 1: [1], 2: [2], 3: [3], 4: [4]}"),
2319
+ (4, "{0: [0], 1: [1], 2: [2], 3: [3], ...}"),
2320
+ (1, "{0: [0], ...}"),
2321
+ ],
2322
+ )
2323
+ def test_groups_repr_truncates(max_seq_items, expected):
2324
+ # GH 1135
2325
+ df = DataFrame(np.random.randn(5, 1))
2326
+ df["a"] = df.index
2327
+
2328
+ with pd.option_context("display.max_seq_items", max_seq_items):
2329
+ result = df.groupby("a").groups.__repr__()
2330
+ assert result == expected
2331
+
2332
+ result = df.groupby(np.array(df.a)).groups.__repr__()
2333
+ assert result == expected
2334
+
2335
+
2336
+ def test_group_on_two_row_multiindex_returns_one_tuple_key():
2337
+ # GH 18451
2338
+ df = DataFrame([{"a": 1, "b": 2, "c": 99}, {"a": 1, "b": 2, "c": 88}])
2339
+ df = df.set_index(["a", "b"])
2340
+
2341
+ grp = df.groupby(["a", "b"])
2342
+ result = grp.indices
2343
+ expected = {(1, 2): np.array([0, 1], dtype=np.int64)}
2344
+
2345
+ assert len(result) == 1
2346
+ key = (1, 2)
2347
+ assert (result[key] == expected[key]).all()
2348
+
2349
+
2350
+ @pytest.mark.parametrize(
2351
+ "klass, attr, value",
2352
+ [
2353
+ (DataFrame, "level", "a"),
2354
+ (DataFrame, "as_index", False),
2355
+ (DataFrame, "sort", False),
2356
+ (DataFrame, "group_keys", False),
2357
+ (DataFrame, "observed", True),
2358
+ (DataFrame, "dropna", False),
2359
+ (Series, "level", "a"),
2360
+ (Series, "as_index", False),
2361
+ (Series, "sort", False),
2362
+ (Series, "group_keys", False),
2363
+ (Series, "observed", True),
2364
+ (Series, "dropna", False),
2365
+ ],
2366
+ )
2367
+ def test_subsetting_columns_keeps_attrs(klass, attr, value):
2368
+ # GH 9959 - When subsetting columns, don't drop attributes
2369
+ df = DataFrame({"a": [1], "b": [2], "c": [3]})
2370
+ if attr != "axis":
2371
+ df = df.set_index("a")
2372
+
2373
+ expected = df.groupby("a", **{attr: value})
2374
+ result = expected[["b"]] if klass is DataFrame else expected["b"]
2375
+ assert getattr(result, attr) == getattr(expected, attr)
2376
+
2377
+
2378
+ def test_subsetting_columns_axis_1():
2379
+ # GH 37725
2380
+ g = DataFrame({"A": [1], "B": [2], "C": [3]}).groupby([0, 0, 1], axis=1)
2381
+ match = "Cannot subset columns when using axis=1"
2382
+ with pytest.raises(ValueError, match=match):
2383
+ g[["A", "B"]].sum()
2384
+
2385
+
2386
+ @pytest.mark.parametrize("func", ["sum", "any", "shift"])
2387
+ def test_groupby_column_index_name_lost(func):
2388
+ # GH: 29764 groupby loses index sometimes
2389
+ expected = Index(["a"], name="idx")
2390
+ df = DataFrame([[1]], columns=expected)
2391
+ df_grouped = df.groupby([1])
2392
+ result = getattr(df_grouped, func)().columns
2393
+ tm.assert_index_equal(result, expected)
2394
+
2395
+
2396
+ def test_groupby_duplicate_columns():
2397
+ # GH: 31735
2398
+ df = DataFrame(
2399
+ {"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]}
2400
+ ).astype(object)
2401
+ df.columns = ["A", "B", "B"]
2402
+ result = df.groupby([0, 0, 0, 0]).min()
2403
+ expected = DataFrame(
2404
+ [["e", "a", 1]], index=np.array([0]), columns=["A", "B", "B"], dtype=object
2405
+ )
2406
+ tm.assert_frame_equal(result, expected)
2407
+
2408
+
2409
+ def test_groupby_series_with_tuple_name():
2410
+ # GH 37755
2411
+ ser = Series([1, 2, 3, 4], index=[1, 1, 2, 2], name=("a", "a"))
2412
+ ser.index.name = ("b", "b")
2413
+ result = ser.groupby(level=0).last()
2414
+ expected = Series([2, 4], index=[1, 2], name=("a", "a"))
2415
+ expected.index.name = ("b", "b")
2416
+ tm.assert_series_equal(result, expected)
2417
+
2418
+
2419
+ @pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
2420
+ @pytest.mark.parametrize(
2421
+ "func, values", [("sum", [97.0, 98.0]), ("mean", [24.25, 24.5])]
2422
+ )
2423
+ def test_groupby_numerical_stability_sum_mean(func, values):
2424
+ # GH#38778
2425
+ data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
2426
+ df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data})
2427
+ result = getattr(df.groupby("group"), func)()
2428
+ expected = DataFrame({"a": values, "b": values}, index=Index([1, 2], name="group"))
2429
+ tm.assert_frame_equal(result, expected)
2430
+
2431
+
2432
+ @pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
2433
+ def test_groupby_numerical_stability_cumsum():
2434
+ # GH#38934
2435
+ data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
2436
+ df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data})
2437
+ result = df.groupby("group").cumsum()
2438
+ exp_data = (
2439
+ [1e16] * 2 + [1e16 + 96, 1e16 + 98] + [5e15 + 97, 5e15 + 98] + [97.0, 98.0]
2440
+ )
2441
+ expected = DataFrame({"a": exp_data, "b": exp_data})
2442
+ tm.assert_frame_equal(result, expected, check_exact=True)
2443
+
2444
+
2445
+ def test_groupby_cumsum_skipna_false():
2446
+ # GH#46216 don't propagate np.nan above the diagonal
2447
+ arr = np.random.randn(5, 5)
2448
+ df = DataFrame(arr)
2449
+ for i in range(5):
2450
+ df.iloc[i, i] = np.nan
2451
+
2452
+ df["A"] = 1
2453
+ gb = df.groupby("A")
2454
+
2455
+ res = gb.cumsum(skipna=False)
2456
+
2457
+ expected = df[[0, 1, 2, 3, 4]].cumsum(skipna=False)
2458
+ tm.assert_frame_equal(res, expected)
2459
+
2460
+
2461
+ def test_groupby_cumsum_timedelta64():
2462
+ # GH#46216 don't ignore is_datetimelike in libgroupby.group_cumsum
2463
+ dti = date_range("2016-01-01", periods=5)
2464
+ ser = Series(dti) - dti[0]
2465
+ ser[2] = pd.NaT
2466
+
2467
+ df = DataFrame({"A": 1, "B": ser})
2468
+ gb = df.groupby("A")
2469
+
2470
+ res = gb.cumsum(numeric_only=False, skipna=True)
2471
+ exp = DataFrame({"B": [ser[0], ser[1], pd.NaT, ser[4], ser[4] * 2]})
2472
+ tm.assert_frame_equal(res, exp)
2473
+
2474
+ res = gb.cumsum(numeric_only=False, skipna=False)
2475
+ exp = DataFrame({"B": [ser[0], ser[1], pd.NaT, pd.NaT, pd.NaT]})
2476
+ tm.assert_frame_equal(res, exp)
2477
+
2478
+
2479
+ def test_groupby_mean_duplicate_index(rand_series_with_duplicate_datetimeindex):
2480
+ dups = rand_series_with_duplicate_datetimeindex
2481
+ result = dups.groupby(level=0).mean()
2482
+ expected = dups.groupby(dups.index).mean()
2483
+ tm.assert_series_equal(result, expected)
2484
+
2485
+
2486
+ def test_groupby_all_nan_groups_drop():
2487
+ # GH 15036
2488
+ s = Series([1, 2, 3], [np.nan, np.nan, np.nan])
2489
+ result = s.groupby(s.index).sum()
2490
+ expected = Series([], index=Index([], dtype=np.float64), dtype=np.int64)
2491
+ tm.assert_series_equal(result, expected)
2492
+
2493
+
2494
+ @pytest.mark.parametrize("numeric_only", [True, False])
2495
+ def test_groupby_empty_multi_column(as_index, numeric_only):
2496
+ # GH 15106 & GH 41998
2497
+ df = DataFrame(data=[], columns=["A", "B", "C"])
2498
+ gb = df.groupby(["A", "B"], as_index=as_index)
2499
+ result = gb.sum(numeric_only=numeric_only)
2500
+ if as_index:
2501
+ index = MultiIndex([[], []], [[], []], names=["A", "B"])
2502
+ columns = ["C"] if not numeric_only else []
2503
+ else:
2504
+ index = RangeIndex(0)
2505
+ columns = ["A", "B", "C"] if not numeric_only else ["A", "B"]
2506
+ expected = DataFrame([], columns=columns, index=index)
2507
+ tm.assert_frame_equal(result, expected)
2508
+
2509
+
2510
+ def test_groupby_aggregation_non_numeric_dtype():
2511
+ # GH #43108
2512
+ df = DataFrame(
2513
+ [["M", [1]], ["M", [1]], ["W", [10]], ["W", [20]]], columns=["MW", "v"]
2514
+ )
2515
+
2516
+ expected = DataFrame(
2517
+ {
2518
+ "v": [[1, 1], [10, 20]],
2519
+ },
2520
+ index=Index(["M", "W"], dtype="object", name="MW"),
2521
+ )
2522
+
2523
+ gb = df.groupby(by=["MW"])
2524
+ result = gb.sum()
2525
+ tm.assert_frame_equal(result, expected)
2526
+
2527
+
2528
+ def test_groupby_aggregation_multi_non_numeric_dtype():
2529
+ # GH #42395
2530
+ df = DataFrame(
2531
+ {
2532
+ "x": [1, 0, 1, 1, 0],
2533
+ "y": [Timedelta(i, "days") for i in range(1, 6)],
2534
+ "z": [Timedelta(i * 10, "days") for i in range(1, 6)],
2535
+ }
2536
+ )
2537
+
2538
+ expected = DataFrame(
2539
+ {
2540
+ "y": [Timedelta(i, "days") for i in range(7, 9)],
2541
+ "z": [Timedelta(i * 10, "days") for i in range(7, 9)],
2542
+ },
2543
+ index=Index([0, 1], dtype="int64", name="x"),
2544
+ )
2545
+
2546
+ gb = df.groupby(by=["x"])
2547
+ result = gb.sum()
2548
+ tm.assert_frame_equal(result, expected)
2549
+
2550
+
2551
+ def test_groupby_aggregation_numeric_with_non_numeric_dtype():
2552
+ # GH #43108
2553
+ df = DataFrame(
2554
+ {
2555
+ "x": [1, 0, 1, 1, 0],
2556
+ "y": [Timedelta(i, "days") for i in range(1, 6)],
2557
+ "z": list(range(1, 6)),
2558
+ }
2559
+ )
2560
+
2561
+ expected = DataFrame(
2562
+ {"y": [Timedelta(7, "days"), Timedelta(8, "days")], "z": [7, 8]},
2563
+ index=Index([0, 1], dtype="int64", name="x"),
2564
+ )
2565
+
2566
+ gb = df.groupby(by=["x"])
2567
+ result = gb.sum()
2568
+ tm.assert_frame_equal(result, expected)
2569
+
2570
+
2571
+ def test_groupby_filtered_df_std():
2572
+ # GH 16174
2573
+ dicts = [
2574
+ {"filter_col": False, "groupby_col": True, "bool_col": True, "float_col": 10.5},
2575
+ {"filter_col": True, "groupby_col": True, "bool_col": True, "float_col": 20.5},
2576
+ {"filter_col": True, "groupby_col": True, "bool_col": True, "float_col": 30.5},
2577
+ ]
2578
+ df = DataFrame(dicts)
2579
+
2580
+ df_filter = df[df["filter_col"] == True] # noqa:E712
2581
+ dfgb = df_filter.groupby("groupby_col")
2582
+ result = dfgb.std()
2583
+ expected = DataFrame(
2584
+ [[0.0, 0.0, 7.071068]],
2585
+ columns=["filter_col", "bool_col", "float_col"],
2586
+ index=Index([True], name="groupby_col"),
2587
+ )
2588
+ tm.assert_frame_equal(result, expected)
2589
+
2590
+
2591
+ def test_datetime_categorical_multikey_groupby_indices():
2592
+ # GH 26859
2593
+ df = DataFrame(
2594
+ {
2595
+ "a": Series(list("abc")),
2596
+ "b": Series(
2597
+ to_datetime(["2018-01-01", "2018-02-01", "2018-03-01"]),
2598
+ dtype="category",
2599
+ ),
2600
+ "c": Categorical.from_codes([-1, 0, 1], categories=[0, 1]),
2601
+ }
2602
+ )
2603
+ result = df.groupby(["a", "b"]).indices
2604
+ expected = {
2605
+ ("a", Timestamp("2018-01-01 00:00:00")): np.array([0]),
2606
+ ("b", Timestamp("2018-02-01 00:00:00")): np.array([1]),
2607
+ ("c", Timestamp("2018-03-01 00:00:00")): np.array([2]),
2608
+ }
2609
+ assert result == expected
2610
+
2611
+
2612
+ def test_rolling_wrong_param_min_period():
2613
+ # GH34037
2614
+ name_l = ["Alice"] * 5 + ["Bob"] * 5
2615
+ val_l = [np.nan, np.nan, 1, 2, 3] + [np.nan, 1, 2, 3, 4]
2616
+ test_df = DataFrame([name_l, val_l]).T
2617
+ test_df.columns = ["name", "val"]
2618
+
2619
+ result_error_msg = r"__init__\(\) got an unexpected keyword argument 'min_period'"
2620
+ with pytest.raises(TypeError, match=result_error_msg):
2621
+ test_df.groupby("name")["val"].rolling(window=2, min_period=1).sum()
2622
+
2623
+
2624
+ def test_by_column_values_with_same_starting_value():
2625
+ # GH29635
2626
+ df = DataFrame(
2627
+ {
2628
+ "Name": ["Thomas", "Thomas", "Thomas John"],
2629
+ "Credit": [1200, 1300, 900],
2630
+ "Mood": ["sad", "happy", "happy"],
2631
+ }
2632
+ )
2633
+ aggregate_details = {"Mood": Series.mode, "Credit": "sum"}
2634
+
2635
+ result = df.groupby(["Name"]).agg(aggregate_details)
2636
+ expected_result = DataFrame(
2637
+ {
2638
+ "Mood": [["happy", "sad"], "happy"],
2639
+ "Credit": [2500, 900],
2640
+ "Name": ["Thomas", "Thomas John"],
2641
+ }
2642
+ ).set_index("Name")
2643
+
2644
+ tm.assert_frame_equal(result, expected_result)
2645
+
2646
+
2647
+ def test_groupby_none_in_first_mi_level():
2648
+ # GH#47348
2649
+ arr = [[None, 1, 0, 1], [2, 3, 2, 3]]
2650
+ ser = Series(1, index=MultiIndex.from_arrays(arr, names=["a", "b"]))
2651
+ result = ser.groupby(level=[0, 1]).sum()
2652
+ expected = Series(
2653
+ [1, 2], MultiIndex.from_tuples([(0.0, 2), (1.0, 3)], names=["a", "b"])
2654
+ )
2655
+ tm.assert_series_equal(result, expected)
2656
+
2657
+
2658
+ def test_groupby_none_column_name():
2659
+ # GH#47348
2660
+ df = DataFrame({None: [1, 1, 2, 2], "b": [1, 1, 2, 3], "c": [4, 5, 6, 7]})
2661
+ result = df.groupby(by=[None]).sum()
2662
+ expected = DataFrame({"b": [2, 5], "c": [9, 13]}, index=Index([1, 2], name=None))
2663
+ tm.assert_frame_equal(result, expected)
2664
+
2665
+
2666
+ def test_single_element_list_grouping():
2667
+ # GH 42795
2668
+ df = DataFrame({"a": [1, 2], "b": [np.nan, 5], "c": [np.nan, 2]}, index=["x", "y"])
2669
+ result = [key for key, _ in df.groupby(["a"])]
2670
+ expected = [(1,), (2,)]
2671
+ assert result == expected
2672
+
2673
+
2674
+ @pytest.mark.parametrize("func", ["sum", "cumsum", "cumprod", "prod"])
2675
+ def test_groupby_avoid_casting_to_float(func):
2676
+ # GH#37493
2677
+ val = 922337203685477580
2678
+ df = DataFrame({"a": 1, "b": [val]})
2679
+ result = getattr(df.groupby("a"), func)() - val
2680
+ expected = DataFrame({"b": [0]}, index=Index([1], name="a"))
2681
+ if func in ["cumsum", "cumprod"]:
2682
+ expected = expected.reset_index(drop=True)
2683
+ tm.assert_frame_equal(result, expected)
2684
+
2685
+
2686
+ @pytest.mark.parametrize("func, val", [("sum", 3), ("prod", 2)])
2687
+ def test_groupby_sum_support_mask(any_numeric_ea_dtype, func, val):
2688
+ # GH#37493
2689
+ df = DataFrame({"a": 1, "b": [1, 2, pd.NA]}, dtype=any_numeric_ea_dtype)
2690
+ result = getattr(df.groupby("a"), func)()
2691
+ expected = DataFrame(
2692
+ {"b": [val]},
2693
+ index=Index([1], name="a", dtype=any_numeric_ea_dtype),
2694
+ dtype=any_numeric_ea_dtype,
2695
+ )
2696
+ tm.assert_frame_equal(result, expected)
2697
+
2698
+
2699
+ @pytest.mark.parametrize("val, dtype", [(111, "int"), (222, "uint")])
2700
+ def test_groupby_overflow(val, dtype):
2701
+ # GH#37493
2702
+ df = DataFrame({"a": 1, "b": [val, val]}, dtype=f"{dtype}8")
2703
+ result = df.groupby("a").sum()
2704
+ expected = DataFrame(
2705
+ {"b": [val * 2]},
2706
+ index=Index([1], name="a", dtype=f"{dtype}8"),
2707
+ dtype=f"{dtype}64",
2708
+ )
2709
+ tm.assert_frame_equal(result, expected)
2710
+
2711
+ result = df.groupby("a").cumsum()
2712
+ expected = DataFrame({"b": [val, val * 2]}, dtype=f"{dtype}64")
2713
+ tm.assert_frame_equal(result, expected)
2714
+
2715
+ result = df.groupby("a").prod()
2716
+ expected = DataFrame(
2717
+ {"b": [val * val]},
2718
+ index=Index([1], name="a", dtype=f"{dtype}8"),
2719
+ dtype=f"{dtype}64",
2720
+ )
2721
+ tm.assert_frame_equal(result, expected)
2722
+
2723
+
2724
+ @pytest.mark.parametrize("skipna, val", [(True, 3), (False, pd.NA)])
2725
+ def test_groupby_cumsum_mask(any_numeric_ea_dtype, skipna, val):
2726
+ # GH#37493
2727
+ df = DataFrame({"a": 1, "b": [1, pd.NA, 2]}, dtype=any_numeric_ea_dtype)
2728
+ result = df.groupby("a").cumsum(skipna=skipna)
2729
+ expected = DataFrame(
2730
+ {"b": [1, pd.NA, val]},
2731
+ dtype=any_numeric_ea_dtype,
2732
+ )
2733
+ tm.assert_frame_equal(result, expected)
2734
+
2735
+
2736
+ @pytest.mark.parametrize(
2737
+ "val_in, index, val_out",
2738
+ [
2739
+ (
2740
+ [1.0, 2.0, 3.0, 4.0, 5.0],
2741
+ ["foo", "foo", "bar", "baz", "blah"],
2742
+ [3.0, 4.0, 5.0, 3.0],
2743
+ ),
2744
+ (
2745
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
2746
+ ["foo", "foo", "bar", "baz", "blah", "blah"],
2747
+ [3.0, 4.0, 11.0, 3.0],
2748
+ ),
2749
+ ],
2750
+ )
2751
+ def test_groupby_index_name_in_index_content(val_in, index, val_out):
2752
+ # GH 48567
2753
+ series = Series(data=val_in, name="values", index=Index(index, name="blah"))
2754
+ result = series.groupby("blah").sum()
2755
+ expected = Series(
2756
+ data=val_out,
2757
+ name="values",
2758
+ index=Index(["bar", "baz", "blah", "foo"], name="blah"),
2759
+ )
2760
+ tm.assert_series_equal(result, expected)
2761
+
2762
+ result = series.to_frame().groupby("blah").sum()
2763
+ expected = expected.to_frame()
2764
+ tm.assert_frame_equal(result, expected)
2765
+
2766
+
2767
+ @pytest.mark.parametrize("n", [1, 10, 32, 100, 1000])
2768
+ def test_sum_of_booleans(n):
2769
+ # GH 50347
2770
+ df = DataFrame({"groupby_col": 1, "bool": [True] * n})
2771
+ df["bool"] = df["bool"].eq(True)
2772
+ result = df.groupby("groupby_col").sum()
2773
+ expected = DataFrame({"bool": [n]}, index=Index([1], name="groupby_col"))
2774
+ tm.assert_frame_equal(result, expected)
2775
+
2776
+
2777
+ @pytest.mark.filterwarnings(
2778
+ "ignore:invalid value encountered in remainder:RuntimeWarning"
2779
+ )
2780
+ @pytest.mark.parametrize("method", ["head", "tail", "nth", "first", "last"])
2781
+ def test_groupby_method_drop_na(method):
2782
+ # GH 21755
2783
+ df = DataFrame({"A": ["a", np.nan, "b", np.nan, "c"], "B": range(5)})
2784
+
2785
+ if method == "nth":
2786
+ result = getattr(df.groupby("A"), method)(n=0)
2787
+ else:
2788
+ result = getattr(df.groupby("A"), method)()
2789
+
2790
+ if method in ["first", "last"]:
2791
+ expected = DataFrame({"B": [0, 2, 4]}).set_index(
2792
+ Series(["a", "b", "c"], name="A")
2793
+ )
2794
+ else:
2795
+ expected = DataFrame({"A": ["a", "b", "c"], "B": [0, 2, 4]}, index=[0, 2, 4])
2796
+ tm.assert_frame_equal(result, expected)
2797
+
2798
+
2799
+ def test_groupby_reduce_period():
2800
+ # GH#51040
2801
+ pi = pd.period_range("2016-01-01", periods=100, freq="D")
2802
+ grps = list(range(10)) * 10
2803
+ ser = pi.to_series()
2804
+ gb = ser.groupby(grps)
2805
+
2806
+ with pytest.raises(TypeError, match="Period type does not support sum operations"):
2807
+ gb.sum()
2808
+ with pytest.raises(
2809
+ TypeError, match="Period type does not support cumsum operations"
2810
+ ):
2811
+ gb.cumsum()
2812
+ with pytest.raises(TypeError, match="Period type does not support prod operations"):
2813
+ gb.prod()
2814
+ with pytest.raises(
2815
+ TypeError, match="Period type does not support cumprod operations"
2816
+ ):
2817
+ gb.cumprod()
2818
+
2819
+ res = gb.max()
2820
+ expected = ser[-10:]
2821
+ expected.index = Index(range(10), dtype=np.int_)
2822
+ tm.assert_series_equal(res, expected)
2823
+
2824
+ res = gb.min()
2825
+ expected = ser[:10]
2826
+ expected.index = Index(range(10), dtype=np.int_)
2827
+ tm.assert_series_equal(res, expected)
2828
+
2829
+
2830
+ def test_obj_with_exclusions_duplicate_columns():
2831
+ # GH#50806
2832
+ df = DataFrame([[0, 1, 2, 3]])
2833
+ df.columns = [0, 1, 2, 0]
2834
+ gb = df.groupby(df[1])
2835
+ result = gb._obj_with_exclusions
2836
+ expected = df.take([0, 2, 3], axis=1)
2837
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_dropna.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.compat.pyarrow import pa_version_under7p0
5
+
6
+ from pandas.core.dtypes.missing import na_value_for_dtype
7
+
8
+ import pandas as pd
9
+ import pandas._testing as tm
10
+ from pandas.tests.groupby import get_groupby_method_args
11
+
12
+
13
+ @pytest.mark.parametrize(
14
+ "dropna, tuples, outputs",
15
+ [
16
+ (
17
+ True,
18
+ [["A", "B"], ["B", "A"]],
19
+ {"c": [13.0, 123.23], "d": [13.0, 123.0], "e": [13.0, 1.0]},
20
+ ),
21
+ (
22
+ False,
23
+ [["A", "B"], ["A", np.nan], ["B", "A"]],
24
+ {
25
+ "c": [13.0, 12.3, 123.23],
26
+ "d": [13.0, 233.0, 123.0],
27
+ "e": [13.0, 12.0, 1.0],
28
+ },
29
+ ),
30
+ ],
31
+ )
32
+ def test_groupby_dropna_multi_index_dataframe_nan_in_one_group(
33
+ dropna, tuples, outputs, nulls_fixture
34
+ ):
35
+ # GH 3729 this is to test that NA is in one group
36
+ df_list = [
37
+ ["A", "B", 12, 12, 12],
38
+ ["A", nulls_fixture, 12.3, 233.0, 12],
39
+ ["B", "A", 123.23, 123, 1],
40
+ ["A", "B", 1, 1, 1.0],
41
+ ]
42
+ df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
43
+ grouped = df.groupby(["a", "b"], dropna=dropna).sum()
44
+
45
+ mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
46
+
47
+ # Since right now, by default MI will drop NA from levels when we create MI
48
+ # via `from_*`, so we need to add NA for level manually afterwards.
49
+ if not dropna:
50
+ mi = mi.set_levels(["A", "B", np.nan], level="b")
51
+ expected = pd.DataFrame(outputs, index=mi)
52
+
53
+ tm.assert_frame_equal(grouped, expected)
54
+
55
+
56
+ @pytest.mark.parametrize(
57
+ "dropna, tuples, outputs",
58
+ [
59
+ (
60
+ True,
61
+ [["A", "B"], ["B", "A"]],
62
+ {"c": [12.0, 123.23], "d": [12.0, 123.0], "e": [12.0, 1.0]},
63
+ ),
64
+ (
65
+ False,
66
+ [["A", "B"], ["A", np.nan], ["B", "A"], [np.nan, "B"]],
67
+ {
68
+ "c": [12.0, 13.3, 123.23, 1.0],
69
+ "d": [12.0, 234.0, 123.0, 1.0],
70
+ "e": [12.0, 13.0, 1.0, 1.0],
71
+ },
72
+ ),
73
+ ],
74
+ )
75
+ def test_groupby_dropna_multi_index_dataframe_nan_in_two_groups(
76
+ dropna, tuples, outputs, nulls_fixture, nulls_fixture2
77
+ ):
78
+ # GH 3729 this is to test that NA in different groups with different representations
79
+ df_list = [
80
+ ["A", "B", 12, 12, 12],
81
+ ["A", nulls_fixture, 12.3, 233.0, 12],
82
+ ["B", "A", 123.23, 123, 1],
83
+ [nulls_fixture2, "B", 1, 1, 1.0],
84
+ ["A", nulls_fixture2, 1, 1, 1.0],
85
+ ]
86
+ df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
87
+ grouped = df.groupby(["a", "b"], dropna=dropna).sum()
88
+
89
+ mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
90
+
91
+ # Since right now, by default MI will drop NA from levels when we create MI
92
+ # via `from_*`, so we need to add NA for level manually afterwards.
93
+ if not dropna:
94
+ mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]])
95
+ expected = pd.DataFrame(outputs, index=mi)
96
+
97
+ tm.assert_frame_equal(grouped, expected)
98
+
99
+
100
+ @pytest.mark.parametrize(
101
+ "dropna, idx, outputs",
102
+ [
103
+ (True, ["A", "B"], {"b": [123.23, 13.0], "c": [123.0, 13.0], "d": [1.0, 13.0]}),
104
+ (
105
+ False,
106
+ ["A", "B", np.nan],
107
+ {
108
+ "b": [123.23, 13.0, 12.3],
109
+ "c": [123.0, 13.0, 233.0],
110
+ "d": [1.0, 13.0, 12.0],
111
+ },
112
+ ),
113
+ ],
114
+ )
115
+ def test_groupby_dropna_normal_index_dataframe(dropna, idx, outputs):
116
+ # GH 3729
117
+ df_list = [
118
+ ["B", 12, 12, 12],
119
+ [None, 12.3, 233.0, 12],
120
+ ["A", 123.23, 123, 1],
121
+ ["B", 1, 1, 1.0],
122
+ ]
123
+ df = pd.DataFrame(df_list, columns=["a", "b", "c", "d"])
124
+ grouped = df.groupby("a", dropna=dropna).sum()
125
+
126
+ expected = pd.DataFrame(outputs, index=pd.Index(idx, dtype="object", name="a"))
127
+
128
+ tm.assert_frame_equal(grouped, expected)
129
+
130
+
131
+ @pytest.mark.parametrize(
132
+ "dropna, idx, expected",
133
+ [
134
+ (True, ["a", "a", "b", np.nan], pd.Series([3, 3], index=["a", "b"])),
135
+ (
136
+ False,
137
+ ["a", "a", "b", np.nan],
138
+ pd.Series([3, 3, 3], index=["a", "b", np.nan]),
139
+ ),
140
+ ],
141
+ )
142
+ def test_groupby_dropna_series_level(dropna, idx, expected):
143
+ ser = pd.Series([1, 2, 3, 3], index=idx)
144
+
145
+ result = ser.groupby(level=0, dropna=dropna).sum()
146
+ tm.assert_series_equal(result, expected)
147
+
148
+
149
+ @pytest.mark.parametrize(
150
+ "dropna, expected",
151
+ [
152
+ (True, pd.Series([210.0, 350.0], index=["a", "b"], name="Max Speed")),
153
+ (
154
+ False,
155
+ pd.Series([210.0, 350.0, 20.0], index=["a", "b", np.nan], name="Max Speed"),
156
+ ),
157
+ ],
158
+ )
159
+ def test_groupby_dropna_series_by(dropna, expected):
160
+ ser = pd.Series(
161
+ [390.0, 350.0, 30.0, 20.0],
162
+ index=["Falcon", "Falcon", "Parrot", "Parrot"],
163
+ name="Max Speed",
164
+ )
165
+
166
+ result = ser.groupby(["a", "b", "a", np.nan], dropna=dropna).mean()
167
+ tm.assert_series_equal(result, expected)
168
+
169
+
170
+ @pytest.mark.parametrize("dropna", (False, True))
171
+ def test_grouper_dropna_propagation(dropna):
172
+ # GH 36604
173
+ df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]})
174
+ gb = df.groupby("A", dropna=dropna)
175
+ assert gb.grouper.dropna == dropna
176
+
177
+
178
+ @pytest.mark.parametrize(
179
+ "index",
180
+ [
181
+ pd.RangeIndex(0, 4),
182
+ list("abcd"),
183
+ pd.MultiIndex.from_product([(1, 2), ("R", "B")], names=["num", "col"]),
184
+ ],
185
+ )
186
+ def test_groupby_dataframe_slice_then_transform(dropna, index):
187
+ # GH35014 & GH35612
188
+ expected_data = {"B": [2, 2, 1, np.nan if dropna else 1]}
189
+
190
+ df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]}, index=index)
191
+ gb = df.groupby("A", dropna=dropna)
192
+
193
+ result = gb.transform(len)
194
+ expected = pd.DataFrame(expected_data, index=index)
195
+ tm.assert_frame_equal(result, expected)
196
+
197
+ result = gb[["B"]].transform(len)
198
+ expected = pd.DataFrame(expected_data, index=index)
199
+ tm.assert_frame_equal(result, expected)
200
+
201
+ result = gb["B"].transform(len)
202
+ expected = pd.Series(expected_data["B"], index=index, name="B")
203
+ tm.assert_series_equal(result, expected)
204
+
205
+
206
+ @pytest.mark.parametrize(
207
+ "dropna, tuples, outputs",
208
+ [
209
+ (
210
+ True,
211
+ [["A", "B"], ["B", "A"]],
212
+ {"c": [13.0, 123.23], "d": [12.0, 123.0], "e": [1.0, 1.0]},
213
+ ),
214
+ (
215
+ False,
216
+ [["A", "B"], ["A", np.nan], ["B", "A"]],
217
+ {
218
+ "c": [13.0, 12.3, 123.23],
219
+ "d": [12.0, 233.0, 123.0],
220
+ "e": [1.0, 12.0, 1.0],
221
+ },
222
+ ),
223
+ ],
224
+ )
225
+ def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs):
226
+ # GH 3729
227
+ df_list = [
228
+ ["A", "B", 12, 12, 12],
229
+ ["A", None, 12.3, 233.0, 12],
230
+ ["B", "A", 123.23, 123, 1],
231
+ ["A", "B", 1, 1, 1.0],
232
+ ]
233
+ df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
234
+ agg_dict = {"c": sum, "d": max, "e": "min"}
235
+ grouped = df.groupby(["a", "b"], dropna=dropna).agg(agg_dict)
236
+
237
+ mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
238
+
239
+ # Since right now, by default MI will drop NA from levels when we create MI
240
+ # via `from_*`, so we need to add NA for level manually afterwards.
241
+ if not dropna:
242
+ mi = mi.set_levels(["A", "B", np.nan], level="b")
243
+ expected = pd.DataFrame(outputs, index=mi)
244
+
245
+ tm.assert_frame_equal(grouped, expected)
246
+
247
+
248
+ @pytest.mark.arm_slow
249
+ @pytest.mark.parametrize(
250
+ "datetime1, datetime2",
251
+ [
252
+ (pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")),
253
+ (pd.Timedelta("-2 days"), pd.Timedelta("-1 days")),
254
+ (pd.Period("2020-01-01"), pd.Period("2020-02-01")),
255
+ ],
256
+ )
257
+ @pytest.mark.parametrize("dropna, values", [(True, [12, 3]), (False, [12, 3, 6])])
258
+ def test_groupby_dropna_datetime_like_data(
259
+ dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2
260
+ ):
261
+ # 3729
262
+ df = pd.DataFrame(
263
+ {
264
+ "values": [1, 2, 3, 4, 5, 6],
265
+ "dt": [
266
+ datetime1,
267
+ unique_nulls_fixture,
268
+ datetime2,
269
+ unique_nulls_fixture2,
270
+ datetime1,
271
+ datetime1,
272
+ ],
273
+ }
274
+ )
275
+
276
+ if dropna:
277
+ indexes = [datetime1, datetime2]
278
+ else:
279
+ indexes = [datetime1, datetime2, np.nan]
280
+
281
+ grouped = df.groupby("dt", dropna=dropna).agg({"values": sum})
282
+ expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt"))
283
+
284
+ tm.assert_frame_equal(grouped, expected)
285
+
286
+
287
+ @pytest.mark.parametrize(
288
+ "dropna, data, selected_data, levels",
289
+ [
290
+ pytest.param(
291
+ False,
292
+ {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]},
293
+ {"values": [0, 1, 0, 0]},
294
+ ["a", "b", np.nan],
295
+ id="dropna_false_has_nan",
296
+ ),
297
+ pytest.param(
298
+ True,
299
+ {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]},
300
+ {"values": [0, 1, 0]},
301
+ None,
302
+ id="dropna_true_has_nan",
303
+ ),
304
+ pytest.param(
305
+ # no nan in "groups"; dropna=True|False should be same.
306
+ False,
307
+ {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]},
308
+ {"values": [0, 1, 0, 0]},
309
+ None,
310
+ id="dropna_false_no_nan",
311
+ ),
312
+ pytest.param(
313
+ # no nan in "groups"; dropna=True|False should be same.
314
+ True,
315
+ {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]},
316
+ {"values": [0, 1, 0, 0]},
317
+ None,
318
+ id="dropna_true_no_nan",
319
+ ),
320
+ ],
321
+ )
322
+ def test_groupby_apply_with_dropna_for_multi_index(dropna, data, selected_data, levels):
323
+ # GH 35889
324
+
325
+ df = pd.DataFrame(data)
326
+ gb = df.groupby("groups", dropna=dropna)
327
+ result = gb.apply(lambda grp: pd.DataFrame({"values": range(len(grp))}))
328
+
329
+ mi_tuples = tuple(zip(data["groups"], selected_data["values"]))
330
+ mi = pd.MultiIndex.from_tuples(mi_tuples, names=["groups", None])
331
+ # Since right now, by default MI will drop NA from levels when we create MI
332
+ # via `from_*`, so we need to add NA for level manually afterwards.
333
+ if not dropna and levels:
334
+ mi = mi.set_levels(levels, level="groups")
335
+
336
+ expected = pd.DataFrame(selected_data, index=mi)
337
+ tm.assert_frame_equal(result, expected)
338
+
339
+
340
+ @pytest.mark.parametrize("input_index", [None, ["a"], ["a", "b"]])
341
+ @pytest.mark.parametrize("keys", [["a"], ["a", "b"]])
342
+ @pytest.mark.parametrize("series", [True, False])
343
+ def test_groupby_dropna_with_multiindex_input(input_index, keys, series):
344
+ # GH#46783
345
+ obj = pd.DataFrame(
346
+ {
347
+ "a": [1, np.nan],
348
+ "b": [1, 1],
349
+ "c": [2, 3],
350
+ }
351
+ )
352
+
353
+ expected = obj.set_index(keys)
354
+ if series:
355
+ expected = expected["c"]
356
+ elif input_index == ["a", "b"] and keys == ["a"]:
357
+ # Column b should not be aggregated
358
+ expected = expected[["c"]]
359
+
360
+ if input_index is not None:
361
+ obj = obj.set_index(input_index)
362
+ gb = obj.groupby(keys, dropna=False)
363
+ if series:
364
+ gb = gb["c"]
365
+ result = gb.sum()
366
+
367
+ tm.assert_equal(result, expected)
368
+
369
+
370
+ def test_groupby_nan_included():
371
+ # GH 35646
372
+ data = {"group": ["g1", np.nan, "g1", "g2", np.nan], "B": [0, 1, 2, 3, 4]}
373
+ df = pd.DataFrame(data)
374
+ grouped = df.groupby("group", dropna=False)
375
+ result = grouped.indices
376
+ dtype = np.intp
377
+ expected = {
378
+ "g1": np.array([0, 2], dtype=dtype),
379
+ "g2": np.array([3], dtype=dtype),
380
+ np.nan: np.array([1, 4], dtype=dtype),
381
+ }
382
+ for result_values, expected_values in zip(result.values(), expected.values()):
383
+ tm.assert_numpy_array_equal(result_values, expected_values)
384
+ assert np.isnan(list(result.keys())[2])
385
+ assert list(result.keys())[0:2] == ["g1", "g2"]
386
+
387
+
388
+ def test_groupby_drop_nan_with_multi_index():
389
+ # GH 39895
390
+ df = pd.DataFrame([[np.nan, 0, 1]], columns=["a", "b", "c"])
391
+ df = df.set_index(["a", "b"])
392
+ result = df.groupby(["a", "b"], dropna=False).first()
393
+ expected = df
394
+ tm.assert_frame_equal(result, expected)
395
+
396
+
397
+ # sequence_index enumerates all strings made up of x, y, z of length 4
398
+ @pytest.mark.parametrize("sequence_index", range(3**4))
399
+ @pytest.mark.parametrize(
400
+ "dtype",
401
+ [
402
+ None,
403
+ "UInt8",
404
+ "Int8",
405
+ "UInt16",
406
+ "Int16",
407
+ "UInt32",
408
+ "Int32",
409
+ "UInt64",
410
+ "Int64",
411
+ "Float32",
412
+ "Int64",
413
+ "Float64",
414
+ "category",
415
+ "string",
416
+ pytest.param(
417
+ "string[pyarrow]",
418
+ marks=pytest.mark.skipif(
419
+ pa_version_under7p0, reason="pyarrow is not installed"
420
+ ),
421
+ ),
422
+ "datetime64[ns]",
423
+ "period[d]",
424
+ "Sparse[float]",
425
+ ],
426
+ )
427
+ @pytest.mark.parametrize("test_series", [True, False])
428
+ def test_no_sort_keep_na(sequence_index, dtype, test_series, as_index):
429
+ # GH#46584, GH#48794
430
+
431
+ # Convert sequence_index into a string sequence, e.g. 5 becomes "xxyz"
432
+ # This sequence is used for the grouper.
433
+ sequence = "".join(
434
+ [{0: "x", 1: "y", 2: "z"}[sequence_index // (3**k) % 3] for k in range(4)]
435
+ )
436
+
437
+ # Unique values to use for grouper, depends on dtype
438
+ if dtype in ("string", "string[pyarrow]"):
439
+ uniques = {"x": "x", "y": "y", "z": pd.NA}
440
+ elif dtype in ("datetime64[ns]", "period[d]"):
441
+ uniques = {"x": "2016-01-01", "y": "2017-01-01", "z": pd.NA}
442
+ else:
443
+ uniques = {"x": 1, "y": 2, "z": np.nan}
444
+
445
+ df = pd.DataFrame(
446
+ {
447
+ "key": pd.Series([uniques[label] for label in sequence], dtype=dtype),
448
+ "a": [0, 1, 2, 3],
449
+ }
450
+ )
451
+ gb = df.groupby("key", dropna=False, sort=False, as_index=as_index)
452
+ if test_series:
453
+ gb = gb["a"]
454
+ result = gb.sum()
455
+
456
+ # Manually compute the groupby sum, use the labels "x", "y", and "z" to avoid
457
+ # issues with hashing np.nan
458
+ summed = {}
459
+ for idx, label in enumerate(sequence):
460
+ summed[label] = summed.get(label, 0) + idx
461
+ if dtype == "category":
462
+ index = pd.CategoricalIndex(
463
+ [uniques[e] for e in summed],
464
+ df["key"].cat.categories,
465
+ name="key",
466
+ )
467
+ elif isinstance(dtype, str) and dtype.startswith("Sparse"):
468
+ index = pd.Index(
469
+ pd.array([uniques[label] for label in summed], dtype=dtype), name="key"
470
+ )
471
+ else:
472
+ index = pd.Index([uniques[label] for label in summed], dtype=dtype, name="key")
473
+ expected = pd.Series(summed.values(), index=index, name="a", dtype=None)
474
+ if not test_series:
475
+ expected = expected.to_frame()
476
+ if not as_index:
477
+ expected = expected.reset_index()
478
+ if dtype is not None and dtype.startswith("Sparse"):
479
+ expected["key"] = expected["key"].astype(dtype)
480
+
481
+ tm.assert_equal(result, expected)
482
+
483
+
484
+ @pytest.mark.parametrize("test_series", [True, False])
485
+ @pytest.mark.parametrize("dtype", [object, None])
486
+ def test_null_is_null_for_dtype(
487
+ sort, dtype, nulls_fixture, nulls_fixture2, test_series
488
+ ):
489
+ # GH#48506 - groups should always result in using the null for the dtype
490
+ df = pd.DataFrame({"a": [1, 2]})
491
+ groups = pd.Series([nulls_fixture, nulls_fixture2], dtype=dtype)
492
+ obj = df["a"] if test_series else df
493
+ gb = obj.groupby(groups, dropna=False, sort=sort)
494
+ result = gb.sum()
495
+ index = pd.Index([na_value_for_dtype(groups.dtype)])
496
+ expected = pd.DataFrame({"a": [3]}, index=index)
497
+ if test_series:
498
+ tm.assert_series_equal(result, expected["a"])
499
+ else:
500
+ tm.assert_frame_equal(result, expected)
501
+
502
+
503
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
504
+ def test_categorical_reducers(
505
+ request, reduction_func, observed, sort, as_index, index_kind
506
+ ):
507
+ # GH#36327
508
+ if (
509
+ reduction_func in ("idxmin", "idxmax")
510
+ and not observed
511
+ and index_kind != "multi"
512
+ ):
513
+ msg = "GH#10694 - idxmin/max broken for categorical with observed=False"
514
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
515
+
516
+ # Ensure there is at least one null value by appending to the end
517
+ values = np.append(np.random.choice([1, 2, None], size=19), None)
518
+ df = pd.DataFrame(
519
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(20)}
520
+ )
521
+
522
+ # Strategy: Compare to dropna=True by filling null values with a new code
523
+ df_filled = df.copy()
524
+ df_filled["x"] = pd.Categorical(values, categories=[1, 2, 3, 4]).fillna(4)
525
+
526
+ if index_kind == "range":
527
+ keys = ["x"]
528
+ elif index_kind == "single":
529
+ keys = ["x"]
530
+ df = df.set_index("x")
531
+ df_filled = df_filled.set_index("x")
532
+ else:
533
+ keys = ["x", "x2"]
534
+ df["x2"] = df["x"]
535
+ df = df.set_index(["x", "x2"])
536
+ df_filled["x2"] = df_filled["x"]
537
+ df_filled = df_filled.set_index(["x", "x2"])
538
+ args = get_groupby_method_args(reduction_func, df)
539
+ args_filled = get_groupby_method_args(reduction_func, df_filled)
540
+ if reduction_func == "corrwith" and index_kind == "range":
541
+ # Don't include the grouping columns so we can call reset_index
542
+ args = (args[0].drop(columns=keys),)
543
+ args_filled = (args_filled[0].drop(columns=keys),)
544
+
545
+ gb_filled = df_filled.groupby(keys, observed=observed, sort=sort, as_index=True)
546
+ expected = getattr(gb_filled, reduction_func)(*args_filled).reset_index()
547
+ expected["x"] = expected["x"].replace(4, None)
548
+ if index_kind == "multi":
549
+ expected["x2"] = expected["x2"].replace(4, None)
550
+ if as_index:
551
+ if index_kind == "multi":
552
+ expected = expected.set_index(["x", "x2"])
553
+ else:
554
+ expected = expected.set_index("x")
555
+ else:
556
+ if index_kind != "range" and reduction_func != "size":
557
+ # size, unlike other methods, has the desired behavior in GH#49519
558
+ expected = expected.drop(columns="x")
559
+ if index_kind == "multi":
560
+ expected = expected.drop(columns="x2")
561
+ if reduction_func in ("idxmax", "idxmin") and index_kind != "range":
562
+ # expected was computed with a RangeIndex; need to translate to index values
563
+ values = expected["y"].values.tolist()
564
+ if index_kind == "single":
565
+ values = [np.nan if e == 4 else e for e in values]
566
+ else:
567
+ values = [(np.nan, np.nan) if e == (4, 4) else e for e in values]
568
+ expected["y"] = values
569
+ if reduction_func == "size":
570
+ # size, unlike other methods, has the desired behavior in GH#49519
571
+ expected = expected.rename(columns={0: "size"})
572
+ if as_index:
573
+ expected = expected["size"].rename(None)
574
+
575
+ gb_keepna = df.groupby(
576
+ keys, dropna=False, observed=observed, sort=sort, as_index=as_index
577
+ )
578
+ result = getattr(gb_keepna, reduction_func)(*args)
579
+
580
+ # size will return a Series, others are DataFrame
581
+ tm.assert_equal(result, expected)
582
+
583
+
584
+ def test_categorical_transformers(
585
+ request, transformation_func, observed, sort, as_index
586
+ ):
587
+ # GH#36327
588
+ if transformation_func == "fillna":
589
+ msg = "GH#49651 fillna may incorrectly reorders results when dropna=False"
590
+ request.node.add_marker(pytest.mark.xfail(reason=msg, strict=False))
591
+
592
+ values = np.append(np.random.choice([1, 2, None], size=19), None)
593
+ df = pd.DataFrame(
594
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(20)}
595
+ )
596
+ args = get_groupby_method_args(transformation_func, df)
597
+
598
+ # Compute result for null group
599
+ null_group_values = df[df["x"].isnull()]["y"]
600
+ if transformation_func == "cumcount":
601
+ null_group_data = list(range(len(null_group_values)))
602
+ elif transformation_func == "ngroup":
603
+ if sort:
604
+ if observed:
605
+ na_group = df["x"].nunique(dropna=False) - 1
606
+ else:
607
+ # TODO: Should this be 3?
608
+ na_group = df["x"].nunique(dropna=False) - 1
609
+ else:
610
+ na_group = df.iloc[: null_group_values.index[0]]["x"].nunique()
611
+ null_group_data = len(null_group_values) * [na_group]
612
+ else:
613
+ null_group_data = getattr(null_group_values, transformation_func)(*args)
614
+ null_group_result = pd.DataFrame({"y": null_group_data})
615
+
616
+ gb_keepna = df.groupby(
617
+ "x", dropna=False, observed=observed, sort=sort, as_index=as_index
618
+ )
619
+ gb_dropna = df.groupby("x", dropna=True, observed=observed, sort=sort)
620
+ result = getattr(gb_keepna, transformation_func)(*args)
621
+ expected = getattr(gb_dropna, transformation_func)(*args)
622
+ for iloc, value in zip(
623
+ df[df["x"].isnull()].index.tolist(), null_group_result.values.ravel()
624
+ ):
625
+ if expected.ndim == 1:
626
+ expected.iloc[iloc] = value
627
+ else:
628
+ expected.iloc[iloc, 0] = value
629
+ if transformation_func == "ngroup":
630
+ expected[df["x"].notnull() & expected.ge(na_group)] += 1
631
+ if transformation_func not in ("rank", "diff", "pct_change", "shift"):
632
+ expected = expected.astype("int64")
633
+
634
+ tm.assert_equal(result, expected)
635
+
636
+
637
+ @pytest.mark.parametrize("method", ["head", "tail"])
638
+ def test_categorical_head_tail(method, observed, sort, as_index):
639
+ # GH#36327
640
+ values = np.random.choice([1, 2, None], 30)
641
+ df = pd.DataFrame(
642
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))}
643
+ )
644
+ gb = df.groupby("x", dropna=False, observed=observed, sort=sort, as_index=as_index)
645
+ result = getattr(gb, method)()
646
+
647
+ if method == "tail":
648
+ values = values[::-1]
649
+ # Take the top 5 values from each group
650
+ mask = (
651
+ ((values == 1) & ((values == 1).cumsum() <= 5))
652
+ | ((values == 2) & ((values == 2).cumsum() <= 5))
653
+ # flake8 doesn't like the vectorized check for None, thinks we should use `is`
654
+ | ((values == None) & ((values == None).cumsum() <= 5)) # noqa: E711
655
+ )
656
+ if method == "tail":
657
+ mask = mask[::-1]
658
+ expected = df[mask]
659
+
660
+ tm.assert_frame_equal(result, expected)
661
+
662
+
663
+ def test_categorical_agg():
664
+ # GH#36327
665
+ values = np.random.choice([1, 2, None], 30)
666
+ df = pd.DataFrame(
667
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))}
668
+ )
669
+ gb = df.groupby("x", dropna=False)
670
+ result = gb.agg(lambda x: x.sum())
671
+ expected = gb.sum()
672
+ tm.assert_frame_equal(result, expected)
673
+
674
+
675
+ def test_categorical_transform():
676
+ # GH#36327
677
+ values = np.random.choice([1, 2, None], 30)
678
+ df = pd.DataFrame(
679
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))}
680
+ )
681
+ gb = df.groupby("x", dropna=False)
682
+ result = gb.transform(lambda x: x.sum())
683
+ expected = gb.transform("sum")
684
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_shift_diff.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ DataFrame,
6
+ NaT,
7
+ Series,
8
+ Timedelta,
9
+ Timestamp,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ def test_group_shift_with_null_key():
15
+ # This test is designed to replicate the segfault in issue #13813.
16
+ n_rows = 1200
17
+
18
+ # Generate a moderately large dataframe with occasional missing
19
+ # values in column `B`, and then group by [`A`, `B`]. This should
20
+ # force `-1` in `labels` array of `g.grouper.group_info` exactly
21
+ # at those places, where the group-by key is partially missing.
22
+ df = DataFrame(
23
+ [(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],
24
+ dtype=float,
25
+ columns=["A", "B", "Z"],
26
+ index=None,
27
+ )
28
+ g = df.groupby(["A", "B"])
29
+
30
+ expected = DataFrame(
31
+ [(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],
32
+ dtype=float,
33
+ columns=["Z"],
34
+ index=None,
35
+ )
36
+ result = g.shift(-1)
37
+
38
+ tm.assert_frame_equal(result, expected)
39
+
40
+
41
+ def test_group_shift_with_fill_value():
42
+ # GH #24128
43
+ n_rows = 24
44
+ df = DataFrame(
45
+ [(i % 12, i % 3, i) for i in range(n_rows)],
46
+ dtype=float,
47
+ columns=["A", "B", "Z"],
48
+ index=None,
49
+ )
50
+ g = df.groupby(["A", "B"])
51
+
52
+ expected = DataFrame(
53
+ [(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],
54
+ dtype=float,
55
+ columns=["Z"],
56
+ index=None,
57
+ )
58
+ result = g.shift(-1, fill_value=0)
59
+
60
+ tm.assert_frame_equal(result, expected)
61
+
62
+
63
+ def test_group_shift_lose_timezone():
64
+ # GH 30134
65
+ now_dt = Timestamp.utcnow()
66
+ df = DataFrame({"a": [1, 1], "date": now_dt})
67
+ result = df.groupby("a").shift(0).iloc[0]
68
+ expected = Series({"date": now_dt}, name=result.name)
69
+ tm.assert_series_equal(result, expected)
70
+
71
+
72
+ def test_group_diff_real_series(any_real_numpy_dtype):
73
+ df = DataFrame(
74
+ {"a": [1, 2, 3, 3, 2], "b": [1, 2, 3, 4, 5]},
75
+ dtype=any_real_numpy_dtype,
76
+ )
77
+ result = df.groupby("a")["b"].diff()
78
+ exp_dtype = "float"
79
+ if any_real_numpy_dtype in ["int8", "int16", "float32"]:
80
+ exp_dtype = "float32"
81
+ expected = Series([np.nan, np.nan, np.nan, 1.0, 3.0], dtype=exp_dtype, name="b")
82
+ tm.assert_series_equal(result, expected)
83
+
84
+
85
+ def test_group_diff_real_frame(any_real_numpy_dtype):
86
+ df = DataFrame(
87
+ {
88
+ "a": [1, 2, 3, 3, 2],
89
+ "b": [1, 2, 3, 4, 5],
90
+ "c": [1, 2, 3, 4, 6],
91
+ },
92
+ dtype=any_real_numpy_dtype,
93
+ )
94
+ result = df.groupby("a").diff()
95
+ exp_dtype = "float"
96
+ if any_real_numpy_dtype in ["int8", "int16", "float32"]:
97
+ exp_dtype = "float32"
98
+ expected = DataFrame(
99
+ {
100
+ "b": [np.nan, np.nan, np.nan, 1.0, 3.0],
101
+ "c": [np.nan, np.nan, np.nan, 1.0, 4.0],
102
+ },
103
+ dtype=exp_dtype,
104
+ )
105
+ tm.assert_frame_equal(result, expected)
106
+
107
+
108
+ @pytest.mark.parametrize(
109
+ "data",
110
+ [
111
+ [
112
+ Timestamp("2013-01-01"),
113
+ Timestamp("2013-01-02"),
114
+ Timestamp("2013-01-03"),
115
+ ],
116
+ [Timedelta("5 days"), Timedelta("6 days"), Timedelta("7 days")],
117
+ ],
118
+ )
119
+ def test_group_diff_datetimelike(data):
120
+ df = DataFrame({"a": [1, 2, 2], "b": data})
121
+ result = df.groupby("a")["b"].diff()
122
+ expected = Series([NaT, NaT, Timedelta("1 days")], name="b")
123
+ tm.assert_series_equal(result, expected)
124
+
125
+
126
+ def test_group_diff_bool():
127
+ df = DataFrame({"a": [1, 2, 3, 3, 2], "b": [True, True, False, False, True]})
128
+ result = df.groupby("a")["b"].diff()
129
+ expected = Series([np.nan, np.nan, np.nan, False, False], name="b")
130
+ tm.assert_series_equal(result, expected)
131
+
132
+
133
+ def test_group_diff_object_raises(object_dtype):
134
+ df = DataFrame(
135
+ {"a": ["foo", "bar", "bar"], "b": ["baz", "foo", "foo"]}, dtype=object_dtype
136
+ )
137
+ with pytest.raises(TypeError, match=r"unsupported operand type\(s\) for -"):
138
+ df.groupby("a")["b"].diff()
139
+
140
+
141
+ def test_empty_shift_with_fill():
142
+ # GH 41264, single-index check
143
+ df = DataFrame(columns=["a", "b", "c"])
144
+ shifted = df.groupby(["a"]).shift(1)
145
+ shifted_with_fill = df.groupby(["a"]).shift(1, fill_value=0)
146
+ tm.assert_frame_equal(shifted, shifted_with_fill)
147
+ tm.assert_index_equal(shifted.index, shifted_with_fill.index)
148
+
149
+
150
+ def test_multindex_empty_shift_with_fill():
151
+ # GH 41264, multi-index check
152
+ df = DataFrame(columns=["a", "b", "c"])
153
+ shifted = df.groupby(["a", "b"]).shift(1)
154
+ shifted_with_fill = df.groupby(["a", "b"]).shift(1, fill_value=0)
155
+ tm.assert_frame_equal(shifted, shifted_with_fill)
156
+ tm.assert_index_equal(shifted.index, shifted_with_fill.index)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_subclass.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ Index,
9
+ Series,
10
+ )
11
+ import pandas._testing as tm
12
+ from pandas.tests.groupby import get_groupby_method_args
13
+
14
+
15
+ @pytest.mark.parametrize(
16
+ "obj",
17
+ [
18
+ tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
19
+ tm.SubclassedSeries(np.arange(0, 10), name="A"),
20
+ ],
21
+ )
22
+ def test_groupby_preserves_subclass(obj, groupby_func):
23
+ # GH28330 -- preserve subclass through groupby operations
24
+
25
+ if isinstance(obj, Series) and groupby_func in {"corrwith"}:
26
+ pytest.skip(f"Not applicable for Series and {groupby_func}")
27
+
28
+ grouped = obj.groupby(np.arange(0, 10))
29
+
30
+ # Groups should preserve subclass type
31
+ assert isinstance(grouped.get_group(0), type(obj))
32
+
33
+ args = get_groupby_method_args(groupby_func, obj)
34
+
35
+ result1 = getattr(grouped, groupby_func)(*args)
36
+ result2 = grouped.agg(groupby_func, *args)
37
+
38
+ # Reduction or transformation kernels should preserve type
39
+ slices = {"ngroup", "cumcount", "size"}
40
+ if isinstance(obj, DataFrame) and groupby_func in slices:
41
+ assert isinstance(result1, tm.SubclassedSeries)
42
+ else:
43
+ assert isinstance(result1, type(obj))
44
+
45
+ # Confirm .agg() groupby operations return same results
46
+ if isinstance(result1, DataFrame):
47
+ tm.assert_frame_equal(result1, result2)
48
+ else:
49
+ tm.assert_series_equal(result1, result2)
50
+
51
+
52
+ def test_groupby_preserves_metadata():
53
+ # GH-37343
54
+ custom_df = tm.SubclassedDataFrame({"a": [1, 2, 3], "b": [1, 1, 2], "c": [7, 8, 9]})
55
+ assert "testattr" in custom_df._metadata
56
+ custom_df.testattr = "hello"
57
+ for _, group_df in custom_df.groupby("c"):
58
+ assert group_df.testattr == "hello"
59
+
60
+ # GH-45314
61
+ def func(group):
62
+ assert isinstance(group, tm.SubclassedDataFrame)
63
+ assert hasattr(group, "testattr")
64
+ return group.testattr
65
+
66
+ result = custom_df.groupby("c").apply(func)
67
+ expected = tm.SubclassedSeries(["hello"] * 3, index=Index([7, 8, 9], name="c"))
68
+ tm.assert_series_equal(result, expected)
69
+
70
+ def func2(group):
71
+ assert isinstance(group, tm.SubclassedSeries)
72
+ assert hasattr(group, "testattr")
73
+ return group.testattr
74
+
75
+ custom_series = tm.SubclassedSeries([1, 2, 3])
76
+ custom_series.testattr = "hello"
77
+ result = custom_series.groupby(custom_df["c"]).apply(func2)
78
+ tm.assert_series_equal(result, expected)
79
+ result = custom_series.groupby(custom_df["c"]).agg(func2)
80
+ tm.assert_series_equal(result, expected)
81
+
82
+
83
+ @pytest.mark.parametrize("obj", [DataFrame, tm.SubclassedDataFrame])
84
+ def test_groupby_resample_preserves_subclass(obj):
85
+ # GH28330 -- preserve subclass through groupby.resample()
86
+
87
+ df = obj(
88
+ {
89
+ "Buyer": "Carl Carl Carl Carl Joe Carl".split(),
90
+ "Quantity": [18, 3, 5, 1, 9, 3],
91
+ "Date": [
92
+ datetime(2013, 9, 1, 13, 0),
93
+ datetime(2013, 9, 1, 13, 5),
94
+ datetime(2013, 10, 1, 20, 0),
95
+ datetime(2013, 10, 3, 10, 0),
96
+ datetime(2013, 12, 2, 12, 0),
97
+ datetime(2013, 9, 2, 14, 0),
98
+ ],
99
+ }
100
+ )
101
+ df = df.set_index("Date")
102
+
103
+ # Confirm groupby.resample() preserves dataframe type
104
+ result = df.groupby("Buyer").resample("5D").sum()
105
+ assert isinstance(result, obj)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_grouping.py ADDED
@@ -0,0 +1,1077 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test where we are determining what we are grouping, or getting groups
3
+ """
4
+ from datetime import (
5
+ date,
6
+ timedelta,
7
+ )
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ import pandas as pd
13
+ from pandas import (
14
+ CategoricalIndex,
15
+ DataFrame,
16
+ Grouper,
17
+ Index,
18
+ MultiIndex,
19
+ Series,
20
+ Timestamp,
21
+ date_range,
22
+ )
23
+ import pandas._testing as tm
24
+ from pandas.core.groupby.grouper import Grouping
25
+
26
+ # selection
27
+ # --------------------------------
28
+
29
+
30
+ class TestSelection:
31
+ def test_select_bad_cols(self):
32
+ df = DataFrame([[1, 2]], columns=["A", "B"])
33
+ g = df.groupby("A")
34
+ with pytest.raises(KeyError, match="\"Columns not found: 'C'\""):
35
+ g[["C"]]
36
+
37
+ with pytest.raises(KeyError, match="^[^A]+$"):
38
+ # A should not be referenced as a bad column...
39
+ # will have to rethink regex if you change message!
40
+ g[["A", "C"]]
41
+
42
+ def test_groupby_duplicated_column_errormsg(self):
43
+ # GH7511
44
+ df = DataFrame(
45
+ columns=["A", "B", "A", "C"], data=[range(4), range(2, 6), range(0, 8, 2)]
46
+ )
47
+
48
+ msg = "Grouper for 'A' not 1-dimensional"
49
+ with pytest.raises(ValueError, match=msg):
50
+ df.groupby("A")
51
+ with pytest.raises(ValueError, match=msg):
52
+ df.groupby(["A", "B"])
53
+
54
+ grouped = df.groupby("B")
55
+ c = grouped.count()
56
+ assert c.columns.nlevels == 1
57
+ assert c.columns.size == 3
58
+
59
+ def test_column_select_via_attr(self, df):
60
+ result = df.groupby("A").C.sum()
61
+ expected = df.groupby("A")["C"].sum()
62
+ tm.assert_series_equal(result, expected)
63
+
64
+ df["mean"] = 1.5
65
+ result = df.groupby("A").mean(numeric_only=True)
66
+ expected = df.groupby("A")[["C", "D", "mean"]].agg(np.mean)
67
+ tm.assert_frame_equal(result, expected)
68
+
69
+ def test_getitem_list_of_columns(self):
70
+ df = DataFrame(
71
+ {
72
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
73
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
74
+ "C": np.random.randn(8),
75
+ "D": np.random.randn(8),
76
+ "E": np.random.randn(8),
77
+ }
78
+ )
79
+
80
+ result = df.groupby("A")[["C", "D"]].mean()
81
+ result2 = df.groupby("A")[df.columns[2:4]].mean()
82
+
83
+ expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
84
+
85
+ tm.assert_frame_equal(result, expected)
86
+ tm.assert_frame_equal(result2, expected)
87
+
88
+ def test_getitem_numeric_column_names(self):
89
+ # GH #13731
90
+ df = DataFrame(
91
+ {
92
+ 0: list("abcd") * 2,
93
+ 2: np.random.randn(8),
94
+ 4: np.random.randn(8),
95
+ 6: np.random.randn(8),
96
+ }
97
+ )
98
+ result = df.groupby(0)[df.columns[1:3]].mean()
99
+ result2 = df.groupby(0)[[2, 4]].mean()
100
+
101
+ expected = df.loc[:, [0, 2, 4]].groupby(0).mean()
102
+
103
+ tm.assert_frame_equal(result, expected)
104
+ tm.assert_frame_equal(result2, expected)
105
+
106
+ # per GH 23566 enforced deprecation raises a ValueError
107
+ with pytest.raises(ValueError, match="Cannot subset columns with a tuple"):
108
+ df.groupby(0)[2, 4].mean()
109
+
110
+ def test_getitem_single_tuple_of_columns_raises(self, df):
111
+ # per GH 23566 enforced deprecation raises a ValueError
112
+ with pytest.raises(ValueError, match="Cannot subset columns with a tuple"):
113
+ df.groupby("A")["C", "D"].mean()
114
+
115
+ def test_getitem_single_column(self):
116
+ df = DataFrame(
117
+ {
118
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
119
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
120
+ "C": np.random.randn(8),
121
+ "D": np.random.randn(8),
122
+ "E": np.random.randn(8),
123
+ }
124
+ )
125
+
126
+ result = df.groupby("A")["C"].mean()
127
+
128
+ as_frame = df.loc[:, ["A", "C"]].groupby("A").mean()
129
+ as_series = as_frame.iloc[:, 0]
130
+ expected = as_series
131
+
132
+ tm.assert_series_equal(result, expected)
133
+
134
+ def test_indices_grouped_by_tuple_with_lambda(self):
135
+ # GH 36158
136
+ df = DataFrame(
137
+ {"Tuples": ((x, y) for x in [0, 1] for y in np.random.randint(3, 5, 5))}
138
+ )
139
+
140
+ gb = df.groupby("Tuples")
141
+ gb_lambda = df.groupby(lambda x: df.iloc[x, 0])
142
+
143
+ expected = gb.indices
144
+ result = gb_lambda.indices
145
+
146
+ tm.assert_dict_equal(result, expected)
147
+
148
+
149
+ # grouping
150
+ # --------------------------------
151
+
152
+
153
+ class TestGrouping:
154
+ @pytest.mark.parametrize(
155
+ "index",
156
+ [
157
+ tm.makeFloatIndex,
158
+ tm.makeStringIndex,
159
+ tm.makeIntIndex,
160
+ tm.makeDateIndex,
161
+ tm.makePeriodIndex,
162
+ ],
163
+ )
164
+ def test_grouper_index_types(self, index):
165
+ # related GH5375
166
+ # groupby misbehaving when using a Floatlike index
167
+ df = DataFrame(np.arange(10).reshape(5, 2), columns=list("AB"))
168
+
169
+ df.index = index(len(df))
170
+ df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
171
+
172
+ df.index = list(reversed(df.index.tolist()))
173
+ df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
174
+
175
+ def test_grouper_multilevel_freq(self):
176
+ # GH 7885
177
+ # with level and freq specified in a Grouper
178
+ d0 = date.today() - timedelta(days=14)
179
+ dates = date_range(d0, date.today())
180
+ date_index = MultiIndex.from_product([dates, dates], names=["foo", "bar"])
181
+ df = DataFrame(np.random.randint(0, 100, 225), index=date_index)
182
+
183
+ # Check string level
184
+ expected = (
185
+ df.reset_index()
186
+ .groupby([Grouper(key="foo", freq="W"), Grouper(key="bar", freq="W")])
187
+ .sum()
188
+ )
189
+ # reset index changes columns dtype to object
190
+ expected.columns = Index([0], dtype="int64")
191
+
192
+ result = df.groupby(
193
+ [Grouper(level="foo", freq="W"), Grouper(level="bar", freq="W")]
194
+ ).sum()
195
+ tm.assert_frame_equal(result, expected)
196
+
197
+ # Check integer level
198
+ result = df.groupby(
199
+ [Grouper(level=0, freq="W"), Grouper(level=1, freq="W")]
200
+ ).sum()
201
+ tm.assert_frame_equal(result, expected)
202
+
203
+ def test_grouper_creation_bug(self):
204
+ # GH 8795
205
+ df = DataFrame({"A": [0, 0, 1, 1, 2, 2], "B": [1, 2, 3, 4, 5, 6]})
206
+ g = df.groupby("A")
207
+ expected = g.sum()
208
+
209
+ g = df.groupby(Grouper(key="A"))
210
+ result = g.sum()
211
+ tm.assert_frame_equal(result, expected)
212
+
213
+ g = df.groupby(Grouper(key="A", axis=0))
214
+ result = g.sum()
215
+ tm.assert_frame_equal(result, expected)
216
+
217
+ result = g.apply(lambda x: x.sum())
218
+ expected["A"] = [0, 2, 4]
219
+ expected = expected.loc[:, ["A", "B"]]
220
+ tm.assert_frame_equal(result, expected)
221
+
222
+ # GH14334
223
+ # Grouper(key=...) may be passed in a list
224
+ df = DataFrame(
225
+ {"A": [0, 0, 0, 1, 1, 1], "B": [1, 1, 2, 2, 3, 3], "C": [1, 2, 3, 4, 5, 6]}
226
+ )
227
+ # Group by single column
228
+ expected = df.groupby("A").sum()
229
+ g = df.groupby([Grouper(key="A")])
230
+ result = g.sum()
231
+ tm.assert_frame_equal(result, expected)
232
+
233
+ # Group by two columns
234
+ # using a combination of strings and Grouper objects
235
+ expected = df.groupby(["A", "B"]).sum()
236
+
237
+ # Group with two Grouper objects
238
+ g = df.groupby([Grouper(key="A"), Grouper(key="B")])
239
+ result = g.sum()
240
+ tm.assert_frame_equal(result, expected)
241
+
242
+ # Group with a string and a Grouper object
243
+ g = df.groupby(["A", Grouper(key="B")])
244
+ result = g.sum()
245
+ tm.assert_frame_equal(result, expected)
246
+
247
+ # Group with a Grouper object and a string
248
+ g = df.groupby([Grouper(key="A"), "B"])
249
+ result = g.sum()
250
+ tm.assert_frame_equal(result, expected)
251
+
252
+ # GH8866
253
+ s = Series(
254
+ np.arange(8, dtype="int64"),
255
+ index=MultiIndex.from_product(
256
+ [list("ab"), range(2), date_range("20130101", periods=2)],
257
+ names=["one", "two", "three"],
258
+ ),
259
+ )
260
+ result = s.groupby(Grouper(level="three", freq="M")).sum()
261
+ expected = Series(
262
+ [28],
263
+ index=pd.DatetimeIndex([Timestamp("2013-01-31")], freq="M", name="three"),
264
+ )
265
+ tm.assert_series_equal(result, expected)
266
+
267
+ # just specifying a level breaks
268
+ result = s.groupby(Grouper(level="one")).sum()
269
+ expected = s.groupby(level="one").sum()
270
+ tm.assert_series_equal(result, expected)
271
+
272
+ def test_grouper_column_and_index(self):
273
+ # GH 14327
274
+
275
+ # Grouping a multi-index frame by a column and an index level should
276
+ # be equivalent to resetting the index and grouping by two columns
277
+ idx = MultiIndex.from_tuples(
278
+ [("a", 1), ("a", 2), ("a", 3), ("b", 1), ("b", 2), ("b", 3)]
279
+ )
280
+ idx.names = ["outer", "inner"]
281
+ df_multi = DataFrame(
282
+ {"A": np.arange(6), "B": ["one", "one", "two", "two", "one", "one"]},
283
+ index=idx,
284
+ )
285
+ result = df_multi.groupby(["B", Grouper(level="inner")]).mean(numeric_only=True)
286
+ expected = (
287
+ df_multi.reset_index().groupby(["B", "inner"]).mean(numeric_only=True)
288
+ )
289
+ tm.assert_frame_equal(result, expected)
290
+
291
+ # Test the reverse grouping order
292
+ result = df_multi.groupby([Grouper(level="inner"), "B"]).mean(numeric_only=True)
293
+ expected = (
294
+ df_multi.reset_index().groupby(["inner", "B"]).mean(numeric_only=True)
295
+ )
296
+ tm.assert_frame_equal(result, expected)
297
+
298
+ # Grouping a single-index frame by a column and the index should
299
+ # be equivalent to resetting the index and grouping by two columns
300
+ df_single = df_multi.reset_index("outer")
301
+ result = df_single.groupby(["B", Grouper(level="inner")]).mean(
302
+ numeric_only=True
303
+ )
304
+ expected = (
305
+ df_single.reset_index().groupby(["B", "inner"]).mean(numeric_only=True)
306
+ )
307
+ tm.assert_frame_equal(result, expected)
308
+
309
+ # Test the reverse grouping order
310
+ result = df_single.groupby([Grouper(level="inner"), "B"]).mean(
311
+ numeric_only=True
312
+ )
313
+ expected = (
314
+ df_single.reset_index().groupby(["inner", "B"]).mean(numeric_only=True)
315
+ )
316
+ tm.assert_frame_equal(result, expected)
317
+
318
+ def test_groupby_levels_and_columns(self):
319
+ # GH9344, GH9049
320
+ idx_names = ["x", "y"]
321
+ idx = MultiIndex.from_tuples([(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names)
322
+ df = DataFrame(np.arange(12).reshape(-1, 3), index=idx)
323
+
324
+ by_levels = df.groupby(level=idx_names).mean()
325
+ # reset_index changes columns dtype to object
326
+ by_columns = df.reset_index().groupby(idx_names).mean()
327
+
328
+ # without casting, by_columns.columns is object-dtype
329
+ by_columns.columns = by_columns.columns.astype(np.int64)
330
+ tm.assert_frame_equal(by_levels, by_columns)
331
+
332
+ def test_groupby_categorical_index_and_columns(self, observed):
333
+ # GH18432, adapted for GH25871
334
+ columns = ["A", "B", "A", "B"]
335
+ categories = ["B", "A"]
336
+ data = np.array(
337
+ [[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]], int
338
+ )
339
+ cat_columns = CategoricalIndex(columns, categories=categories, ordered=True)
340
+ df = DataFrame(data=data, columns=cat_columns)
341
+ result = df.groupby(axis=1, level=0, observed=observed).sum()
342
+ expected_data = np.array([[4, 2], [4, 2], [4, 2], [4, 2], [4, 2]], int)
343
+ expected_columns = CategoricalIndex(
344
+ categories, categories=categories, ordered=True
345
+ )
346
+ expected = DataFrame(data=expected_data, columns=expected_columns)
347
+ tm.assert_frame_equal(result, expected)
348
+
349
+ # test transposed version
350
+ df = DataFrame(data.T, index=cat_columns)
351
+ result = df.groupby(axis=0, level=0, observed=observed).sum()
352
+ expected = DataFrame(data=expected_data.T, index=expected_columns)
353
+ tm.assert_frame_equal(result, expected)
354
+
355
+ def test_grouper_getting_correct_binner(self):
356
+ # GH 10063
357
+ # using a non-time-based grouper and a time-based grouper
358
+ # and specifying levels
359
+ df = DataFrame(
360
+ {"A": 1},
361
+ index=MultiIndex.from_product(
362
+ [list("ab"), date_range("20130101", periods=80)], names=["one", "two"]
363
+ ),
364
+ )
365
+ result = df.groupby(
366
+ [Grouper(level="one"), Grouper(level="two", freq="M")]
367
+ ).sum()
368
+ expected = DataFrame(
369
+ {"A": [31, 28, 21, 31, 28, 21]},
370
+ index=MultiIndex.from_product(
371
+ [list("ab"), date_range("20130101", freq="M", periods=3)],
372
+ names=["one", "two"],
373
+ ),
374
+ )
375
+ tm.assert_frame_equal(result, expected)
376
+
377
+ def test_grouper_iter(self, df):
378
+ assert sorted(df.groupby("A").grouper) == ["bar", "foo"]
379
+
380
+ def test_empty_groups(self, df):
381
+ # see gh-1048
382
+ with pytest.raises(ValueError, match="No group keys passed!"):
383
+ df.groupby([])
384
+
385
+ def test_groupby_grouper(self, df):
386
+ grouped = df.groupby("A")
387
+
388
+ result = df.groupby(grouped.grouper).mean(numeric_only=True)
389
+ expected = grouped.mean(numeric_only=True)
390
+ tm.assert_frame_equal(result, expected)
391
+
392
+ def test_groupby_dict_mapping(self):
393
+ # GH #679
394
+ s = Series({"T1": 5})
395
+ result = s.groupby({"T1": "T2"}).agg(sum)
396
+ expected = s.groupby(["T2"]).agg(sum)
397
+ tm.assert_series_equal(result, expected)
398
+
399
+ s = Series([1.0, 2.0, 3.0, 4.0], index=list("abcd"))
400
+ mapping = {"a": 0, "b": 0, "c": 1, "d": 1}
401
+
402
+ result = s.groupby(mapping).mean()
403
+ result2 = s.groupby(mapping).agg(np.mean)
404
+ exp_key = np.array([0, 0, 1, 1], dtype=np.int64)
405
+ expected = s.groupby(exp_key).mean()
406
+ expected2 = s.groupby(exp_key).mean()
407
+ tm.assert_series_equal(result, expected)
408
+ tm.assert_series_equal(result, result2)
409
+ tm.assert_series_equal(result, expected2)
410
+
411
+ @pytest.mark.parametrize(
412
+ "index",
413
+ [
414
+ [0, 1, 2, 3],
415
+ ["a", "b", "c", "d"],
416
+ [Timestamp(2021, 7, 28 + i) for i in range(4)],
417
+ ],
418
+ )
419
+ def test_groupby_series_named_with_tuple(self, frame_or_series, index):
420
+ # GH 42731
421
+ obj = frame_or_series([1, 2, 3, 4], index=index)
422
+ groups = Series([1, 0, 1, 0], index=index, name=("a", "a"))
423
+ result = obj.groupby(groups).last()
424
+ expected = frame_or_series([4, 3])
425
+ expected.index.name = ("a", "a")
426
+ tm.assert_equal(result, expected)
427
+
428
+ def test_groupby_grouper_f_sanity_checked(self):
429
+ dates = date_range("01-Jan-2013", periods=12, freq="MS")
430
+ ts = Series(np.random.randn(12), index=dates)
431
+
432
+ # GH3035
433
+ # index.map is used to apply grouper to the index
434
+ # if it fails on the elements, map tries it on the entire index as
435
+ # a sequence. That can yield invalid results that cause trouble
436
+ # down the line.
437
+ # the surprise comes from using key[0:6] rather than str(key)[0:6]
438
+ # when the elements are Timestamp.
439
+ # the result is Index[0:6], very confusing.
440
+
441
+ msg = r"Grouper result violates len\(labels\) == len\(data\)"
442
+ with pytest.raises(AssertionError, match=msg):
443
+ ts.groupby(lambda key: key[0:6])
444
+
445
+ def test_grouping_error_on_multidim_input(self, df):
446
+ msg = "Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional"
447
+ with pytest.raises(ValueError, match=msg):
448
+ Grouping(df.index, df[["A", "A"]])
449
+
450
+ def test_multiindex_passthru(self):
451
+ # GH 7997
452
+ # regression from 0.14.1
453
+ df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
454
+ df.columns = MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)])
455
+
456
+ result = df.groupby(axis=1, level=[0, 1]).first()
457
+ tm.assert_frame_equal(result, df)
458
+
459
+ def test_multiindex_negative_level(self, mframe):
460
+ # GH 13901
461
+ result = mframe.groupby(level=-1).sum()
462
+ expected = mframe.groupby(level="second").sum()
463
+ tm.assert_frame_equal(result, expected)
464
+
465
+ result = mframe.groupby(level=-2).sum()
466
+ expected = mframe.groupby(level="first").sum()
467
+ tm.assert_frame_equal(result, expected)
468
+
469
+ result = mframe.groupby(level=[-2, -1]).sum()
470
+ expected = mframe.sort_index()
471
+ tm.assert_frame_equal(result, expected)
472
+
473
+ result = mframe.groupby(level=[-1, "first"]).sum()
474
+ expected = mframe.groupby(level=["second", "first"]).sum()
475
+ tm.assert_frame_equal(result, expected)
476
+
477
+ def test_multifunc_select_col_integer_cols(self, df):
478
+ df.columns = np.arange(len(df.columns))
479
+
480
+ # it works!
481
+ df.groupby(1, as_index=False)[2].agg({"Q": np.mean})
482
+
483
+ def test_multiindex_columns_empty_level(self):
484
+ lst = [["count", "values"], ["to filter", ""]]
485
+ midx = MultiIndex.from_tuples(lst)
486
+
487
+ df = DataFrame([[1, "A"]], columns=midx)
488
+
489
+ grouped = df.groupby("to filter").groups
490
+ assert grouped["A"] == [0]
491
+
492
+ grouped = df.groupby([("to filter", "")]).groups
493
+ assert grouped["A"] == [0]
494
+
495
+ df = DataFrame([[1, "A"], [2, "B"]], columns=midx)
496
+
497
+ expected = df.groupby("to filter").groups
498
+ result = df.groupby([("to filter", "")]).groups
499
+ assert result == expected
500
+
501
+ df = DataFrame([[1, "A"], [2, "A"]], columns=midx)
502
+
503
+ expected = df.groupby("to filter").groups
504
+ result = df.groupby([("to filter", "")]).groups
505
+ tm.assert_dict_equal(result, expected)
506
+
507
+ def test_groupby_multiindex_tuple(self):
508
+ # GH 17979
509
+ df = DataFrame(
510
+ [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],
511
+ columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]),
512
+ )
513
+ expected = df.groupby([("b", 1)]).groups
514
+ result = df.groupby(("b", 1)).groups
515
+ tm.assert_dict_equal(expected, result)
516
+
517
+ df2 = DataFrame(
518
+ df.values,
519
+ columns=MultiIndex.from_arrays(
520
+ [["a", "b", "b", "c"], ["d", "d", "e", "e"]]
521
+ ),
522
+ )
523
+ expected = df2.groupby([("b", "d")]).groups
524
+ result = df.groupby(("b", 1)).groups
525
+ tm.assert_dict_equal(expected, result)
526
+
527
+ df3 = DataFrame(df.values, columns=[("a", "d"), ("b", "d"), ("b", "e"), "c"])
528
+ expected = df3.groupby([("b", "d")]).groups
529
+ result = df.groupby(("b", 1)).groups
530
+ tm.assert_dict_equal(expected, result)
531
+
532
+ @pytest.mark.parametrize("sort", [True, False])
533
+ def test_groupby_level(self, sort, mframe, df):
534
+ # GH 17537
535
+ frame = mframe
536
+ deleveled = frame.reset_index()
537
+
538
+ result0 = frame.groupby(level=0, sort=sort).sum()
539
+ result1 = frame.groupby(level=1, sort=sort).sum()
540
+
541
+ expected0 = frame.groupby(deleveled["first"].values, sort=sort).sum()
542
+ expected1 = frame.groupby(deleveled["second"].values, sort=sort).sum()
543
+
544
+ expected0.index.name = "first"
545
+ expected1.index.name = "second"
546
+
547
+ assert result0.index.name == "first"
548
+ assert result1.index.name == "second"
549
+
550
+ tm.assert_frame_equal(result0, expected0)
551
+ tm.assert_frame_equal(result1, expected1)
552
+ assert result0.index.name == frame.index.names[0]
553
+ assert result1.index.name == frame.index.names[1]
554
+
555
+ # groupby level name
556
+ result0 = frame.groupby(level="first", sort=sort).sum()
557
+ result1 = frame.groupby(level="second", sort=sort).sum()
558
+ tm.assert_frame_equal(result0, expected0)
559
+ tm.assert_frame_equal(result1, expected1)
560
+
561
+ # axis=1
562
+
563
+ result0 = frame.T.groupby(level=0, axis=1, sort=sort).sum()
564
+ result1 = frame.T.groupby(level=1, axis=1, sort=sort).sum()
565
+ tm.assert_frame_equal(result0, expected0.T)
566
+ tm.assert_frame_equal(result1, expected1.T)
567
+
568
+ # raise exception for non-MultiIndex
569
+ msg = "level > 0 or level < -1 only valid with MultiIndex"
570
+ with pytest.raises(ValueError, match=msg):
571
+ df.groupby(level=1)
572
+
573
+ def test_groupby_level_index_names(self, axis):
574
+ # GH4014 this used to raise ValueError since 'exp'>1 (in py2)
575
+ df = DataFrame({"exp": ["A"] * 3 + ["B"] * 3, "var1": range(6)}).set_index(
576
+ "exp"
577
+ )
578
+ if axis in (1, "columns"):
579
+ df = df.T
580
+ df.groupby(level="exp", axis=axis)
581
+ msg = f"level name foo is not the name of the {df._get_axis_name(axis)}"
582
+ with pytest.raises(ValueError, match=msg):
583
+ df.groupby(level="foo", axis=axis)
584
+
585
+ @pytest.mark.parametrize("sort", [True, False])
586
+ def test_groupby_level_with_nas(self, sort):
587
+ # GH 17537
588
+ index = MultiIndex(
589
+ levels=[[1, 0], [0, 1, 2, 3]],
590
+ codes=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
591
+ )
592
+
593
+ # factorizing doesn't confuse things
594
+ s = Series(np.arange(8.0), index=index)
595
+ result = s.groupby(level=0, sort=sort).sum()
596
+ expected = Series([6.0, 22.0], index=[0, 1])
597
+ tm.assert_series_equal(result, expected)
598
+
599
+ index = MultiIndex(
600
+ levels=[[1, 0], [0, 1, 2, 3]],
601
+ codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
602
+ )
603
+
604
+ # factorizing doesn't confuse things
605
+ s = Series(np.arange(8.0), index=index)
606
+ result = s.groupby(level=0, sort=sort).sum()
607
+ expected = Series([6.0, 18.0], index=[0.0, 1.0])
608
+ tm.assert_series_equal(result, expected)
609
+
610
+ def test_groupby_args(self, mframe):
611
+ # PR8618 and issue 8015
612
+ frame = mframe
613
+
614
+ msg = "You have to supply one of 'by' and 'level'"
615
+ with pytest.raises(TypeError, match=msg):
616
+ frame.groupby()
617
+
618
+ msg = "You have to supply one of 'by' and 'level'"
619
+ with pytest.raises(TypeError, match=msg):
620
+ frame.groupby(by=None, level=None)
621
+
622
+ @pytest.mark.parametrize(
623
+ "sort,labels",
624
+ [
625
+ [True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]],
626
+ [False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]],
627
+ ],
628
+ )
629
+ def test_level_preserve_order(self, sort, labels, mframe):
630
+ # GH 17537
631
+ grouped = mframe.groupby(level=0, sort=sort)
632
+ exp_labels = np.array(labels, np.intp)
633
+ tm.assert_almost_equal(grouped.grouper.codes[0], exp_labels)
634
+
635
+ def test_grouping_labels(self, mframe):
636
+ grouped = mframe.groupby(mframe.index.get_level_values(0))
637
+ exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3], dtype=np.intp)
638
+ tm.assert_almost_equal(grouped.grouper.codes[0], exp_labels)
639
+
640
+ def test_list_grouper_with_nat(self):
641
+ # GH 14715
642
+ df = DataFrame({"date": date_range("1/1/2011", periods=365, freq="D")})
643
+ df.iloc[-1] = pd.NaT
644
+ grouper = Grouper(key="date", freq="AS")
645
+
646
+ # Grouper in a list grouping
647
+ result = df.groupby([grouper])
648
+ expected = {Timestamp("2011-01-01"): Index(list(range(364)))}
649
+ tm.assert_dict_equal(result.groups, expected)
650
+
651
+ # Test case without a list
652
+ result = df.groupby(grouper)
653
+ expected = {Timestamp("2011-01-01"): 365}
654
+ tm.assert_dict_equal(result.groups, expected)
655
+
656
+ @pytest.mark.parametrize(
657
+ "func,expected",
658
+ [
659
+ (
660
+ "transform",
661
+ Series(name=2, dtype=np.float64),
662
+ ),
663
+ (
664
+ "agg",
665
+ Series(
666
+ name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1)
667
+ ),
668
+ ),
669
+ (
670
+ "apply",
671
+ Series(
672
+ name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1)
673
+ ),
674
+ ),
675
+ ],
676
+ )
677
+ def test_evaluate_with_empty_groups(self, func, expected):
678
+ # 26208
679
+ # test transform'ing empty groups
680
+ # (not testing other agg fns, because they return
681
+ # different index objects.
682
+ df = DataFrame({1: [], 2: []})
683
+ g = df.groupby(1, group_keys=False)
684
+ result = getattr(g[2], func)(lambda x: x)
685
+ tm.assert_series_equal(result, expected)
686
+
687
+ def test_groupby_empty(self):
688
+ # https://github.com/pandas-dev/pandas/issues/27190
689
+ s = Series([], name="name", dtype="float64")
690
+ gr = s.groupby([])
691
+
692
+ result = gr.mean()
693
+ expected = s.set_axis(Index([], dtype=np.intp))
694
+ tm.assert_series_equal(result, expected)
695
+
696
+ # check group properties
697
+ assert len(gr.grouper.groupings) == 1
698
+ tm.assert_numpy_array_equal(
699
+ gr.grouper.group_info[0], np.array([], dtype=np.dtype(np.intp))
700
+ )
701
+
702
+ tm.assert_numpy_array_equal(
703
+ gr.grouper.group_info[1], np.array([], dtype=np.dtype(np.intp))
704
+ )
705
+
706
+ assert gr.grouper.group_info[2] == 0
707
+
708
+ # check name
709
+ assert s.groupby(s).grouper.names == ["name"]
710
+
711
+ def test_groupby_level_index_value_all_na(self):
712
+ # issue 20519
713
+ df = DataFrame(
714
+ [["x", np.nan, 10], [None, np.nan, 20]], columns=["A", "B", "C"]
715
+ ).set_index(["A", "B"])
716
+ result = df.groupby(level=["A", "B"]).sum()
717
+ expected = DataFrame(
718
+ data=[],
719
+ index=MultiIndex(
720
+ levels=[Index(["x"], dtype="object"), Index([], dtype="float64")],
721
+ codes=[[], []],
722
+ names=["A", "B"],
723
+ ),
724
+ columns=["C"],
725
+ dtype="int64",
726
+ )
727
+ tm.assert_frame_equal(result, expected)
728
+
729
+ def test_groupby_multiindex_level_empty(self):
730
+ # https://github.com/pandas-dev/pandas/issues/31670
731
+ df = DataFrame(
732
+ [[123, "a", 1.0], [123, "b", 2.0]], columns=["id", "category", "value"]
733
+ )
734
+ df = df.set_index(["id", "category"])
735
+ empty = df[df.value < 0]
736
+ result = empty.groupby("id").sum()
737
+ expected = DataFrame(
738
+ dtype="float64",
739
+ columns=["value"],
740
+ index=Index([], dtype=np.int64, name="id"),
741
+ )
742
+ tm.assert_frame_equal(result, expected)
743
+
744
+
745
+ # get_group
746
+ # --------------------------------
747
+
748
+
749
+ class TestGetGroup:
750
+ def test_get_group(self):
751
+ # GH 5267
752
+ # be datelike friendly
753
+ df = DataFrame(
754
+ {
755
+ "DATE": pd.to_datetime(
756
+ [
757
+ "10-Oct-2013",
758
+ "10-Oct-2013",
759
+ "10-Oct-2013",
760
+ "11-Oct-2013",
761
+ "11-Oct-2013",
762
+ "11-Oct-2013",
763
+ ]
764
+ ),
765
+ "label": ["foo", "foo", "bar", "foo", "foo", "bar"],
766
+ "VAL": [1, 2, 3, 4, 5, 6],
767
+ }
768
+ )
769
+
770
+ g = df.groupby("DATE")
771
+ key = list(g.groups)[0]
772
+ result1 = g.get_group(key)
773
+ result2 = g.get_group(Timestamp(key).to_pydatetime())
774
+ result3 = g.get_group(str(Timestamp(key)))
775
+ tm.assert_frame_equal(result1, result2)
776
+ tm.assert_frame_equal(result1, result3)
777
+
778
+ g = df.groupby(["DATE", "label"])
779
+
780
+ key = list(g.groups)[0]
781
+ result1 = g.get_group(key)
782
+ result2 = g.get_group((Timestamp(key[0]).to_pydatetime(), key[1]))
783
+ result3 = g.get_group((str(Timestamp(key[0])), key[1]))
784
+ tm.assert_frame_equal(result1, result2)
785
+ tm.assert_frame_equal(result1, result3)
786
+
787
+ # must pass a same-length tuple with multiple keys
788
+ msg = "must supply a tuple to get_group with multiple grouping keys"
789
+ with pytest.raises(ValueError, match=msg):
790
+ g.get_group("foo")
791
+ with pytest.raises(ValueError, match=msg):
792
+ g.get_group("foo")
793
+ msg = "must supply a same-length tuple to get_group with multiple grouping keys"
794
+ with pytest.raises(ValueError, match=msg):
795
+ g.get_group(("foo", "bar", "baz"))
796
+
797
+ def test_get_group_empty_bins(self, observed):
798
+ d = DataFrame([3, 1, 7, 6])
799
+ bins = [0, 5, 10, 15]
800
+ g = d.groupby(pd.cut(d[0], bins), observed=observed)
801
+
802
+ # TODO: should prob allow a str of Interval work as well
803
+ # IOW '(0, 5]'
804
+ result = g.get_group(pd.Interval(0, 5))
805
+ expected = DataFrame([3, 1], index=[0, 1])
806
+ tm.assert_frame_equal(result, expected)
807
+
808
+ msg = r"Interval\(10, 15, closed='right'\)"
809
+ with pytest.raises(KeyError, match=msg):
810
+ g.get_group(pd.Interval(10, 15))
811
+
812
+ def test_get_group_grouped_by_tuple(self):
813
+ # GH 8121
814
+ df = DataFrame([[(1,), (1, 2), (1,), (1, 2)]], index=["ids"]).T
815
+ gr = df.groupby("ids")
816
+ expected = DataFrame({"ids": [(1,), (1,)]}, index=[0, 2])
817
+ result = gr.get_group((1,))
818
+ tm.assert_frame_equal(result, expected)
819
+
820
+ dt = pd.to_datetime(["2010-01-01", "2010-01-02", "2010-01-01", "2010-01-02"])
821
+ df = DataFrame({"ids": [(x,) for x in dt]})
822
+ gr = df.groupby("ids")
823
+ result = gr.get_group(("2010-01-01",))
824
+ expected = DataFrame({"ids": [(dt[0],), (dt[0],)]}, index=[0, 2])
825
+ tm.assert_frame_equal(result, expected)
826
+
827
+ def test_get_group_grouped_by_tuple_with_lambda(self):
828
+ # GH 36158
829
+ df = DataFrame(
830
+ {"Tuples": ((x, y) for x in [0, 1] for y in np.random.randint(3, 5, 5))}
831
+ )
832
+
833
+ gb = df.groupby("Tuples")
834
+ gb_lambda = df.groupby(lambda x: df.iloc[x, 0])
835
+
836
+ expected = gb.get_group(list(gb.groups.keys())[0])
837
+ result = gb_lambda.get_group(list(gb_lambda.groups.keys())[0])
838
+
839
+ tm.assert_frame_equal(result, expected)
840
+
841
+ def test_groupby_with_empty(self):
842
+ index = pd.DatetimeIndex(())
843
+ data = ()
844
+ series = Series(data, index, dtype=object)
845
+ grouper = Grouper(freq="D")
846
+ grouped = series.groupby(grouper)
847
+ assert next(iter(grouped), None) is None
848
+
849
+ def test_groupby_with_single_column(self):
850
+ df = DataFrame({"a": list("abssbab")})
851
+ tm.assert_frame_equal(df.groupby("a").get_group("a"), df.iloc[[0, 5]])
852
+ # GH 13530
853
+ exp = DataFrame(index=Index(["a", "b", "s"], name="a"), columns=[])
854
+ tm.assert_frame_equal(df.groupby("a").count(), exp)
855
+ tm.assert_frame_equal(df.groupby("a").sum(), exp)
856
+
857
+ exp = df.iloc[[3, 4, 5]]
858
+ tm.assert_frame_equal(df.groupby("a").nth(1), exp)
859
+
860
+ def test_gb_key_len_equal_axis_len(self):
861
+ # GH16843
862
+ # test ensures that index and column keys are recognized correctly
863
+ # when number of keys equals axis length of groupby
864
+ df = DataFrame(
865
+ [["foo", "bar", "B", 1], ["foo", "bar", "B", 2], ["foo", "baz", "C", 3]],
866
+ columns=["first", "second", "third", "one"],
867
+ )
868
+ df = df.set_index(["first", "second"])
869
+ df = df.groupby(["first", "second", "third"]).size()
870
+ assert df.loc[("foo", "bar", "B")] == 2
871
+ assert df.loc[("foo", "baz", "C")] == 1
872
+
873
+
874
+ # groups & iteration
875
+ # --------------------------------
876
+
877
+
878
+ class TestIteration:
879
+ def test_groups(self, df):
880
+ grouped = df.groupby(["A"])
881
+ groups = grouped.groups
882
+ assert groups is grouped.groups # caching works
883
+
884
+ for k, v in grouped.groups.items():
885
+ assert (df.loc[v]["A"] == k).all()
886
+
887
+ grouped = df.groupby(["A", "B"])
888
+ groups = grouped.groups
889
+ assert groups is grouped.groups # caching works
890
+
891
+ for k, v in grouped.groups.items():
892
+ assert (df.loc[v]["A"] == k[0]).all()
893
+ assert (df.loc[v]["B"] == k[1]).all()
894
+
895
+ def test_grouping_is_iterable(self, tsframe):
896
+ # this code path isn't used anywhere else
897
+ # not sure it's useful
898
+ grouped = tsframe.groupby([lambda x: x.weekday(), lambda x: x.year])
899
+
900
+ # test it works
901
+ for g in grouped.grouper.groupings[0]:
902
+ pass
903
+
904
+ def test_multi_iter(self):
905
+ s = Series(np.arange(6))
906
+ k1 = np.array(["a", "a", "a", "b", "b", "b"])
907
+ k2 = np.array(["1", "2", "1", "2", "1", "2"])
908
+
909
+ grouped = s.groupby([k1, k2])
910
+
911
+ iterated = list(grouped)
912
+ expected = [
913
+ ("a", "1", s[[0, 2]]),
914
+ ("a", "2", s[[1]]),
915
+ ("b", "1", s[[4]]),
916
+ ("b", "2", s[[3, 5]]),
917
+ ]
918
+ for i, ((one, two), three) in enumerate(iterated):
919
+ e1, e2, e3 = expected[i]
920
+ assert e1 == one
921
+ assert e2 == two
922
+ tm.assert_series_equal(three, e3)
923
+
924
+ def test_multi_iter_frame(self, three_group):
925
+ k1 = np.array(["b", "b", "b", "a", "a", "a"])
926
+ k2 = np.array(["1", "2", "1", "2", "1", "2"])
927
+ df = DataFrame(
928
+ {"v1": np.random.randn(6), "v2": np.random.randn(6), "k1": k1, "k2": k2},
929
+ index=["one", "two", "three", "four", "five", "six"],
930
+ )
931
+
932
+ grouped = df.groupby(["k1", "k2"])
933
+
934
+ # things get sorted!
935
+ iterated = list(grouped)
936
+ idx = df.index
937
+ expected = [
938
+ ("a", "1", df.loc[idx[[4]]]),
939
+ ("a", "2", df.loc[idx[[3, 5]]]),
940
+ ("b", "1", df.loc[idx[[0, 2]]]),
941
+ ("b", "2", df.loc[idx[[1]]]),
942
+ ]
943
+ for i, ((one, two), three) in enumerate(iterated):
944
+ e1, e2, e3 = expected[i]
945
+ assert e1 == one
946
+ assert e2 == two
947
+ tm.assert_frame_equal(three, e3)
948
+
949
+ # don't iterate through groups with no data
950
+ df["k1"] = np.array(["b", "b", "b", "a", "a", "a"])
951
+ df["k2"] = np.array(["1", "1", "1", "2", "2", "2"])
952
+ grouped = df.groupby(["k1", "k2"])
953
+ # calling `dict` on a DataFrameGroupBy leads to a TypeError,
954
+ # we need to use a dictionary comprehension here
955
+ # pylint: disable-next=unnecessary-comprehension
956
+ groups = {key: gp for key, gp in grouped}
957
+ assert len(groups) == 2
958
+
959
+ # axis = 1
960
+ three_levels = three_group.groupby(["A", "B", "C"]).mean()
961
+ grouped = three_levels.T.groupby(axis=1, level=(1, 2))
962
+ for key, group in grouped:
963
+ pass
964
+
965
+ def test_dictify(self, df):
966
+ dict(iter(df.groupby("A")))
967
+ dict(iter(df.groupby(["A", "B"])))
968
+ dict(iter(df["C"].groupby(df["A"])))
969
+ dict(iter(df["C"].groupby([df["A"], df["B"]])))
970
+ dict(iter(df.groupby("A")["C"]))
971
+ dict(iter(df.groupby(["A", "B"])["C"]))
972
+
973
+ def test_groupby_with_small_elem(self):
974
+ # GH 8542
975
+ # length=2
976
+ df = DataFrame(
977
+ {"event": ["start", "start"], "change": [1234, 5678]},
978
+ index=pd.DatetimeIndex(["2014-09-10", "2013-10-10"]),
979
+ )
980
+ grouped = df.groupby([Grouper(freq="M"), "event"])
981
+ assert len(grouped.groups) == 2
982
+ assert grouped.ngroups == 2
983
+ assert (Timestamp("2014-09-30"), "start") in grouped.groups
984
+ assert (Timestamp("2013-10-31"), "start") in grouped.groups
985
+
986
+ res = grouped.get_group((Timestamp("2014-09-30"), "start"))
987
+ tm.assert_frame_equal(res, df.iloc[[0], :])
988
+ res = grouped.get_group((Timestamp("2013-10-31"), "start"))
989
+ tm.assert_frame_equal(res, df.iloc[[1], :])
990
+
991
+ df = DataFrame(
992
+ {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]},
993
+ index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-09-15"]),
994
+ )
995
+ grouped = df.groupby([Grouper(freq="M"), "event"])
996
+ assert len(grouped.groups) == 2
997
+ assert grouped.ngroups == 2
998
+ assert (Timestamp("2014-09-30"), "start") in grouped.groups
999
+ assert (Timestamp("2013-10-31"), "start") in grouped.groups
1000
+
1001
+ res = grouped.get_group((Timestamp("2014-09-30"), "start"))
1002
+ tm.assert_frame_equal(res, df.iloc[[0, 2], :])
1003
+ res = grouped.get_group((Timestamp("2013-10-31"), "start"))
1004
+ tm.assert_frame_equal(res, df.iloc[[1], :])
1005
+
1006
+ # length=3
1007
+ df = DataFrame(
1008
+ {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]},
1009
+ index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-08-05"]),
1010
+ )
1011
+ grouped = df.groupby([Grouper(freq="M"), "event"])
1012
+ assert len(grouped.groups) == 3
1013
+ assert grouped.ngroups == 3
1014
+ assert (Timestamp("2014-09-30"), "start") in grouped.groups
1015
+ assert (Timestamp("2013-10-31"), "start") in grouped.groups
1016
+ assert (Timestamp("2014-08-31"), "start") in grouped.groups
1017
+
1018
+ res = grouped.get_group((Timestamp("2014-09-30"), "start"))
1019
+ tm.assert_frame_equal(res, df.iloc[[0], :])
1020
+ res = grouped.get_group((Timestamp("2013-10-31"), "start"))
1021
+ tm.assert_frame_equal(res, df.iloc[[1], :])
1022
+ res = grouped.get_group((Timestamp("2014-08-31"), "start"))
1023
+ tm.assert_frame_equal(res, df.iloc[[2], :])
1024
+
1025
+ def test_grouping_string_repr(self):
1026
+ # GH 13394
1027
+ mi = MultiIndex.from_arrays([list("AAB"), list("aba")])
1028
+ df = DataFrame([[1, 2, 3]], columns=mi)
1029
+ gr = df.groupby(df[("A", "a")])
1030
+
1031
+ result = gr.grouper.groupings[0].__repr__()
1032
+ expected = "Grouping(('A', 'a'))"
1033
+ assert result == expected
1034
+
1035
+
1036
+ def test_grouping_by_key_is_in_axis():
1037
+ # GH#50413 - Groupers specified by key are in-axis
1038
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 1, 2], "c": [3, 4, 5]}).set_index("a")
1039
+ gb = df.groupby([Grouper(level="a"), Grouper(key="b")], as_index=False)
1040
+ assert not gb.grouper.groupings[0].in_axis
1041
+ assert gb.grouper.groupings[1].in_axis
1042
+
1043
+ # Currently only in-axis groupings are including in the result when as_index=False;
1044
+ # This is likely to change in the future.
1045
+ result = gb.sum()
1046
+ expected = DataFrame({"b": [1, 2], "c": [7, 5]})
1047
+ tm.assert_frame_equal(result, expected)
1048
+
1049
+
1050
+ def test_grouper_groups():
1051
+ # GH#51182 check Grouper.groups does not raise AttributeError
1052
+ df = DataFrame({"a": [1, 2, 3], "b": 1})
1053
+ grper = Grouper(key="a")
1054
+ gb = df.groupby(grper)
1055
+
1056
+ msg = "Use GroupBy.groups instead"
1057
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1058
+ res = grper.groups
1059
+ assert res is gb.groups
1060
+
1061
+ msg = "Use GroupBy.grouper instead"
1062
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1063
+ res = grper.grouper
1064
+ assert res is gb.grouper
1065
+
1066
+ msg = "Grouper.obj is deprecated and will be removed"
1067
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1068
+ res = grper.obj
1069
+ assert res is gb.obj
1070
+
1071
+ msg = "Use Resampler.ax instead"
1072
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1073
+ grper.ax
1074
+
1075
+ msg = "Grouper.indexer is deprecated"
1076
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1077
+ grper.indexer
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_index_as_string.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ import pandas._testing as tm
6
+
7
+
8
+ @pytest.fixture(params=[["inner"], ["inner", "outer"]])
9
+ def frame(request):
10
+ levels = request.param
11
+ df = pd.DataFrame(
12
+ {
13
+ "outer": ["a", "a", "a", "b", "b", "b"],
14
+ "inner": [1, 2, 3, 1, 2, 3],
15
+ "A": np.arange(6),
16
+ "B": ["one", "one", "two", "two", "one", "one"],
17
+ }
18
+ )
19
+ if levels:
20
+ df = df.set_index(levels)
21
+
22
+ return df
23
+
24
+
25
+ @pytest.fixture()
26
+ def series():
27
+ df = pd.DataFrame(
28
+ {
29
+ "outer": ["a", "a", "a", "b", "b", "b"],
30
+ "inner": [1, 2, 3, 1, 2, 3],
31
+ "A": np.arange(6),
32
+ "B": ["one", "one", "two", "two", "one", "one"],
33
+ }
34
+ )
35
+ s = df.set_index(["outer", "inner", "B"])["A"]
36
+
37
+ return s
38
+
39
+
40
+ @pytest.mark.parametrize(
41
+ "key_strs,groupers",
42
+ [
43
+ ("inner", pd.Grouper(level="inner")), # Index name
44
+ (["inner"], [pd.Grouper(level="inner")]), # List of index name
45
+ (["B", "inner"], ["B", pd.Grouper(level="inner")]), # Column and index
46
+ (["inner", "B"], [pd.Grouper(level="inner"), "B"]), # Index and column
47
+ ],
48
+ )
49
+ def test_grouper_index_level_as_string(frame, key_strs, groupers):
50
+ if "B" not in key_strs or "outer" in frame.columns:
51
+ result = frame.groupby(key_strs).mean(numeric_only=True)
52
+ expected = frame.groupby(groupers).mean(numeric_only=True)
53
+ else:
54
+ result = frame.groupby(key_strs).mean()
55
+ expected = frame.groupby(groupers).mean()
56
+ tm.assert_frame_equal(result, expected)
57
+
58
+
59
+ @pytest.mark.parametrize(
60
+ "levels",
61
+ [
62
+ "inner",
63
+ "outer",
64
+ "B",
65
+ ["inner"],
66
+ ["outer"],
67
+ ["B"],
68
+ ["inner", "outer"],
69
+ ["outer", "inner"],
70
+ ["inner", "outer", "B"],
71
+ ["B", "outer", "inner"],
72
+ ],
73
+ )
74
+ def test_grouper_index_level_as_string_series(series, levels):
75
+ # Compute expected result
76
+ if isinstance(levels, list):
77
+ groupers = [pd.Grouper(level=lv) for lv in levels]
78
+ else:
79
+ groupers = pd.Grouper(level=levels)
80
+
81
+ expected = series.groupby(groupers).mean()
82
+
83
+ # Compute and check result
84
+ result = series.groupby(levels).mean()
85
+ tm.assert_series_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_indexing.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Test GroupBy._positional_selector positional grouped indexing GH#42864
2
+
3
+ import random
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import pandas as pd
9
+ import pandas._testing as tm
10
+
11
+
12
+ @pytest.mark.parametrize(
13
+ "arg, expected_rows",
14
+ [
15
+ [0, [0, 1, 4]],
16
+ [2, [5]],
17
+ [5, []],
18
+ [-1, [3, 4, 7]],
19
+ [-2, [1, 6]],
20
+ [-6, []],
21
+ ],
22
+ )
23
+ def test_int(slice_test_df, slice_test_grouped, arg, expected_rows):
24
+ # Test single integer
25
+ result = slice_test_grouped._positional_selector[arg]
26
+ expected = slice_test_df.iloc[expected_rows]
27
+
28
+ tm.assert_frame_equal(result, expected)
29
+
30
+
31
+ def test_slice(slice_test_df, slice_test_grouped):
32
+ # Test single slice
33
+ result = slice_test_grouped._positional_selector[0:3:2]
34
+ expected = slice_test_df.iloc[[0, 1, 4, 5]]
35
+
36
+ tm.assert_frame_equal(result, expected)
37
+
38
+
39
+ @pytest.mark.parametrize(
40
+ "arg, expected_rows",
41
+ [
42
+ [[0, 2], [0, 1, 4, 5]],
43
+ [[0, 2, -1], [0, 1, 3, 4, 5, 7]],
44
+ [range(0, 3, 2), [0, 1, 4, 5]],
45
+ [{0, 2}, [0, 1, 4, 5]],
46
+ ],
47
+ ids=[
48
+ "list",
49
+ "negative",
50
+ "range",
51
+ "set",
52
+ ],
53
+ )
54
+ def test_list(slice_test_df, slice_test_grouped, arg, expected_rows):
55
+ # Test lists of integers and integer valued iterables
56
+ result = slice_test_grouped._positional_selector[arg]
57
+ expected = slice_test_df.iloc[expected_rows]
58
+
59
+ tm.assert_frame_equal(result, expected)
60
+
61
+
62
+ def test_ints(slice_test_df, slice_test_grouped):
63
+ # Test tuple of ints
64
+ result = slice_test_grouped._positional_selector[0, 2, -1]
65
+ expected = slice_test_df.iloc[[0, 1, 3, 4, 5, 7]]
66
+
67
+ tm.assert_frame_equal(result, expected)
68
+
69
+
70
+ def test_slices(slice_test_df, slice_test_grouped):
71
+ # Test tuple of slices
72
+ result = slice_test_grouped._positional_selector[:2, -2:]
73
+ expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]]
74
+
75
+ tm.assert_frame_equal(result, expected)
76
+
77
+
78
+ def test_mix(slice_test_df, slice_test_grouped):
79
+ # Test mixed tuple of ints and slices
80
+ result = slice_test_grouped._positional_selector[0, 1, -2:]
81
+ expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]]
82
+
83
+ tm.assert_frame_equal(result, expected)
84
+
85
+
86
+ @pytest.mark.parametrize(
87
+ "arg, expected_rows",
88
+ [
89
+ [0, [0, 1, 4]],
90
+ [[0, 2, -1], [0, 1, 3, 4, 5, 7]],
91
+ [(slice(None, 2), slice(-2, None)), [0, 1, 2, 3, 4, 6, 7]],
92
+ ],
93
+ )
94
+ def test_as_index(slice_test_df, arg, expected_rows):
95
+ # Test the default as_index behaviour
96
+ result = slice_test_df.groupby("Group", sort=False)._positional_selector[arg]
97
+ expected = slice_test_df.iloc[expected_rows]
98
+
99
+ tm.assert_frame_equal(result, expected)
100
+
101
+
102
+ def test_doc_examples():
103
+ # Test the examples in the documentation
104
+ df = pd.DataFrame(
105
+ [["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]], columns=["A", "B"]
106
+ )
107
+
108
+ grouped = df.groupby("A", as_index=False)
109
+
110
+ result = grouped._positional_selector[1:2]
111
+ expected = pd.DataFrame([["a", 2], ["b", 5]], columns=["A", "B"], index=[1, 4])
112
+
113
+ tm.assert_frame_equal(result, expected)
114
+
115
+ result = grouped._positional_selector[1, -1]
116
+ expected = pd.DataFrame(
117
+ [["a", 2], ["a", 3], ["b", 5]], columns=["A", "B"], index=[1, 2, 4]
118
+ )
119
+
120
+ tm.assert_frame_equal(result, expected)
121
+
122
+
123
+ @pytest.fixture()
124
+ def multiindex_data():
125
+ ndates = 100
126
+ nitems = 20
127
+ dates = pd.date_range("20130101", periods=ndates, freq="D")
128
+ items = [f"item {i}" for i in range(nitems)]
129
+
130
+ data = {}
131
+ for date in dates:
132
+ nitems_for_date = nitems - random.randint(0, 12)
133
+ levels = [
134
+ (item, random.randint(0, 10000) / 100, random.randint(0, 10000) / 100)
135
+ for item in items[:nitems_for_date]
136
+ ]
137
+ levels.sort(key=lambda x: x[1])
138
+ data[date] = levels
139
+
140
+ return data
141
+
142
+
143
+ def _make_df_from_data(data):
144
+ rows = {}
145
+ for date in data:
146
+ for level in data[date]:
147
+ rows[(date, level[0])] = {"A": level[1], "B": level[2]}
148
+
149
+ df = pd.DataFrame.from_dict(rows, orient="index")
150
+ df.index.names = ("Date", "Item")
151
+ return df
152
+
153
+
154
+ def test_multiindex(multiindex_data):
155
+ # Test the multiindex mentioned as the use-case in the documentation
156
+ df = _make_df_from_data(multiindex_data)
157
+ result = df.groupby("Date", as_index=False).nth(slice(3, -3))
158
+
159
+ sliced = {date: multiindex_data[date][3:-3] for date in multiindex_data}
160
+ expected = _make_df_from_data(sliced)
161
+
162
+ tm.assert_frame_equal(result, expected)
163
+
164
+
165
+ @pytest.mark.parametrize("arg", [1, 5, 30, 1000, -1, -5, -30, -1000])
166
+ @pytest.mark.parametrize("method", ["head", "tail"])
167
+ @pytest.mark.parametrize("simulated", [True, False])
168
+ def test_against_head_and_tail(arg, method, simulated):
169
+ # Test gives the same results as grouped head and tail
170
+ n_groups = 100
171
+ n_rows_per_group = 30
172
+
173
+ data = {
174
+ "group": [
175
+ f"group {g}" for j in range(n_rows_per_group) for g in range(n_groups)
176
+ ],
177
+ "value": [
178
+ f"group {g} row {j}"
179
+ for j in range(n_rows_per_group)
180
+ for g in range(n_groups)
181
+ ],
182
+ }
183
+ df = pd.DataFrame(data)
184
+ grouped = df.groupby("group", as_index=False)
185
+ size = arg if arg >= 0 else n_rows_per_group + arg
186
+
187
+ if method == "head":
188
+ result = grouped._positional_selector[:arg]
189
+
190
+ if simulated:
191
+ indices = []
192
+ for j in range(size):
193
+ for i in range(n_groups):
194
+ if j * n_groups + i < n_groups * n_rows_per_group:
195
+ indices.append(j * n_groups + i)
196
+
197
+ expected = df.iloc[indices]
198
+
199
+ else:
200
+ expected = grouped.head(arg)
201
+
202
+ else:
203
+ result = grouped._positional_selector[-arg:]
204
+
205
+ if simulated:
206
+ indices = []
207
+ for j in range(size):
208
+ for i in range(n_groups):
209
+ if (n_rows_per_group + j - size) * n_groups + i >= 0:
210
+ indices.append((n_rows_per_group + j - size) * n_groups + i)
211
+
212
+ expected = df.iloc[indices]
213
+
214
+ else:
215
+ expected = grouped.tail(arg)
216
+
217
+ tm.assert_frame_equal(result, expected)
218
+
219
+
220
+ @pytest.mark.parametrize("start", [None, 0, 1, 10, -1, -10])
221
+ @pytest.mark.parametrize("stop", [None, 0, 1, 10, -1, -10])
222
+ @pytest.mark.parametrize("step", [None, 1, 5])
223
+ def test_against_df_iloc(start, stop, step):
224
+ # Test that a single group gives the same results as DataFrame.iloc
225
+ n_rows = 30
226
+
227
+ data = {
228
+ "group": ["group 0"] * n_rows,
229
+ "value": list(range(n_rows)),
230
+ }
231
+ df = pd.DataFrame(data)
232
+ grouped = df.groupby("group", as_index=False)
233
+
234
+ result = grouped._positional_selector[start:stop:step]
235
+ expected = df.iloc[start:stop:step]
236
+
237
+ tm.assert_frame_equal(result, expected)
238
+
239
+
240
+ def test_series():
241
+ # Test grouped Series
242
+ ser = pd.Series([1, 2, 3, 4, 5], index=["a", "a", "a", "b", "b"])
243
+ grouped = ser.groupby(level=0)
244
+ result = grouped._positional_selector[1:2]
245
+ expected = pd.Series([2, 5], index=["a", "b"])
246
+
247
+ tm.assert_series_equal(result, expected)
248
+
249
+
250
+ @pytest.mark.parametrize("step", [1, 2, 3, 4, 5])
251
+ def test_step(step):
252
+ # Test slice with various step values
253
+ data = [["x", f"x{i}"] for i in range(5)]
254
+ data += [["y", f"y{i}"] for i in range(4)]
255
+ data += [["z", f"z{i}"] for i in range(3)]
256
+ df = pd.DataFrame(data, columns=["A", "B"])
257
+
258
+ grouped = df.groupby("A", as_index=False)
259
+
260
+ result = grouped._positional_selector[::step]
261
+
262
+ data = [["x", f"x{i}"] for i in range(0, 5, step)]
263
+ data += [["y", f"y{i}"] for i in range(0, 4, step)]
264
+ data += [["z", f"z{i}"] for i in range(0, 3, step)]
265
+
266
+ index = [0 + i for i in range(0, 5, step)]
267
+ index += [5 + i for i in range(0, 4, step)]
268
+ index += [9 + i for i in range(0, 3, step)]
269
+
270
+ expected = pd.DataFrame(data, columns=["A", "B"], index=index)
271
+
272
+ tm.assert_frame_equal(result, expected)
273
+
274
+
275
+ @pytest.fixture()
276
+ def column_group_df():
277
+ return pd.DataFrame(
278
+ [[0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 0, 1, 0, 2]],
279
+ columns=["A", "B", "C", "D", "E", "F", "G"],
280
+ )
281
+
282
+
283
+ def test_column_axis(column_group_df):
284
+ g = column_group_df.groupby(column_group_df.iloc[1], axis=1)
285
+ result = g._positional_selector[1:-1]
286
+ expected = column_group_df.iloc[:, [1, 3]]
287
+
288
+ tm.assert_frame_equal(result, expected)
289
+
290
+
291
+ def test_columns_on_iter():
292
+ # GitHub issue #44821
293
+ df = pd.DataFrame({k: range(10) for k in "ABC"})
294
+
295
+ # Group-by and select columns
296
+ cols = ["A", "B"]
297
+ for _, dg in df.groupby(df.A < 4)[cols]:
298
+ tm.assert_index_equal(dg.columns, pd.Index(cols))
299
+ assert "C" not in dg.columns
300
+
301
+
302
+ @pytest.mark.parametrize("func", [list, pd.Index, pd.Series, np.array])
303
+ def test_groupby_duplicated_columns(func):
304
+ # GH#44924
305
+ df = pd.DataFrame(
306
+ {
307
+ "A": [1, 2],
308
+ "B": [3, 3],
309
+ "C": ["G", "G"],
310
+ }
311
+ )
312
+ result = df.groupby("C")[func(["A", "B", "A"])].mean()
313
+ expected = pd.DataFrame(
314
+ [[1.5, 3.0, 1.5]], columns=["A", "B", "A"], index=pd.Index(["G"], name="C")
315
+ )
316
+ tm.assert_frame_equal(result, expected)
317
+
318
+
319
+ def test_groupby_get_nonexisting_groups():
320
+ # GH#32492
321
+ df = pd.DataFrame(
322
+ data={
323
+ "A": ["a1", "a2", None],
324
+ "B": ["b1", "b2", "b1"],
325
+ "val": [1, 2, 3],
326
+ }
327
+ )
328
+ grps = df.groupby(by=["A", "B"])
329
+
330
+ msg = "('a2', 'b1')"
331
+ with pytest.raises(KeyError, match=msg):
332
+ grps.get_group(("a2", "b1"))
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_libgroupby.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs import groupby as libgroupby
5
+ from pandas._libs.groupby import (
6
+ group_cumprod,
7
+ group_cumsum,
8
+ group_mean,
9
+ group_var,
10
+ )
11
+
12
+ from pandas.core.dtypes.common import ensure_platform_int
13
+
14
+ from pandas import isna
15
+ import pandas._testing as tm
16
+
17
+
18
+ class GroupVarTestMixin:
19
+ def test_group_var_generic_1d(self):
20
+ prng = np.random.RandomState(1234)
21
+
22
+ out = (np.nan * np.ones((5, 1))).astype(self.dtype)
23
+ counts = np.zeros(5, dtype="int64")
24
+ values = 10 * prng.rand(15, 1).astype(self.dtype)
25
+ labels = np.tile(np.arange(5), (3,)).astype("intp")
26
+
27
+ expected_out = (
28
+ np.squeeze(values).reshape((5, 3), order="F").std(axis=1, ddof=1) ** 2
29
+ )[:, np.newaxis]
30
+ expected_counts = counts + 3
31
+
32
+ self.algo(out, counts, values, labels)
33
+ assert np.allclose(out, expected_out, self.rtol)
34
+ tm.assert_numpy_array_equal(counts, expected_counts)
35
+
36
+ def test_group_var_generic_1d_flat_labels(self):
37
+ prng = np.random.RandomState(1234)
38
+
39
+ out = (np.nan * np.ones((1, 1))).astype(self.dtype)
40
+ counts = np.zeros(1, dtype="int64")
41
+ values = 10 * prng.rand(5, 1).astype(self.dtype)
42
+ labels = np.zeros(5, dtype="intp")
43
+
44
+ expected_out = np.array([[values.std(ddof=1) ** 2]])
45
+ expected_counts = counts + 5
46
+
47
+ self.algo(out, counts, values, labels)
48
+
49
+ assert np.allclose(out, expected_out, self.rtol)
50
+ tm.assert_numpy_array_equal(counts, expected_counts)
51
+
52
+ def test_group_var_generic_2d_all_finite(self):
53
+ prng = np.random.RandomState(1234)
54
+
55
+ out = (np.nan * np.ones((5, 2))).astype(self.dtype)
56
+ counts = np.zeros(5, dtype="int64")
57
+ values = 10 * prng.rand(10, 2).astype(self.dtype)
58
+ labels = np.tile(np.arange(5), (2,)).astype("intp")
59
+
60
+ expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
61
+ expected_counts = counts + 2
62
+
63
+ self.algo(out, counts, values, labels)
64
+ assert np.allclose(out, expected_out, self.rtol)
65
+ tm.assert_numpy_array_equal(counts, expected_counts)
66
+
67
+ def test_group_var_generic_2d_some_nan(self):
68
+ prng = np.random.RandomState(1234)
69
+
70
+ out = (np.nan * np.ones((5, 2))).astype(self.dtype)
71
+ counts = np.zeros(5, dtype="int64")
72
+ values = 10 * prng.rand(10, 2).astype(self.dtype)
73
+ values[:, 1] = np.nan
74
+ labels = np.tile(np.arange(5), (2,)).astype("intp")
75
+
76
+ expected_out = np.vstack(
77
+ [
78
+ values[:, 0].reshape(5, 2, order="F").std(ddof=1, axis=1) ** 2,
79
+ np.nan * np.ones(5),
80
+ ]
81
+ ).T.astype(self.dtype)
82
+ expected_counts = counts + 2
83
+
84
+ self.algo(out, counts, values, labels)
85
+ tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)
86
+ tm.assert_numpy_array_equal(counts, expected_counts)
87
+
88
+ def test_group_var_constant(self):
89
+ # Regression test from GH 10448.
90
+
91
+ out = np.array([[np.nan]], dtype=self.dtype)
92
+ counts = np.array([0], dtype="int64")
93
+ values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
94
+ labels = np.zeros(3, dtype="intp")
95
+
96
+ self.algo(out, counts, values, labels)
97
+
98
+ assert counts[0] == 3
99
+ assert out[0, 0] >= 0
100
+ tm.assert_almost_equal(out[0, 0], 0.0)
101
+
102
+
103
+ class TestGroupVarFloat64(GroupVarTestMixin):
104
+ __test__ = True
105
+
106
+ algo = staticmethod(group_var)
107
+ dtype = np.float64
108
+ rtol = 1e-5
109
+
110
+ def test_group_var_large_inputs(self):
111
+ prng = np.random.RandomState(1234)
112
+
113
+ out = np.array([[np.nan]], dtype=self.dtype)
114
+ counts = np.array([0], dtype="int64")
115
+ values = (prng.rand(10**6) + 10**12).astype(self.dtype)
116
+ values.shape = (10**6, 1)
117
+ labels = np.zeros(10**6, dtype="intp")
118
+
119
+ self.algo(out, counts, values, labels)
120
+
121
+ assert counts[0] == 10**6
122
+ tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
123
+
124
+
125
+ class TestGroupVarFloat32(GroupVarTestMixin):
126
+ __test__ = True
127
+
128
+ algo = staticmethod(group_var)
129
+ dtype = np.float32
130
+ rtol = 1e-2
131
+
132
+
133
+ @pytest.mark.parametrize("dtype", ["float32", "float64"])
134
+ def test_group_ohlc(dtype):
135
+ obj = np.array(np.random.randn(20), dtype=dtype)
136
+
137
+ bins = np.array([6, 12, 20])
138
+ out = np.zeros((3, 4), dtype)
139
+ counts = np.zeros(len(out), dtype=np.int64)
140
+ labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
141
+
142
+ func = libgroupby.group_ohlc
143
+ func(out, counts, obj[:, None], labels)
144
+
145
+ def _ohlc(group):
146
+ if isna(group).all():
147
+ return np.repeat(np.nan, 4)
148
+ return [group[0], group.max(), group.min(), group[-1]]
149
+
150
+ expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
151
+
152
+ tm.assert_almost_equal(out, expected)
153
+ tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
154
+
155
+ obj[:6] = np.nan
156
+ func(out, counts, obj[:, None], labels)
157
+ expected[0] = np.nan
158
+ tm.assert_almost_equal(out, expected)
159
+
160
+
161
+ def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
162
+ """
163
+ Check a group transform that executes a cumulative function.
164
+
165
+ Parameters
166
+ ----------
167
+ pd_op : callable
168
+ The pandas cumulative function.
169
+ np_op : callable
170
+ The analogous one in NumPy.
171
+ dtype : type
172
+ The specified dtype of the data.
173
+ """
174
+ is_datetimelike = False
175
+
176
+ data = np.array([[1], [2], [3], [4]], dtype=dtype)
177
+ answer = np.zeros_like(data)
178
+
179
+ labels = np.array([0, 0, 0, 0], dtype=np.intp)
180
+ ngroups = 1
181
+ pd_op(answer, data, labels, ngroups, is_datetimelike)
182
+
183
+ tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)
184
+
185
+
186
+ @pytest.mark.parametrize("np_dtype", ["int64", "uint64", "float32", "float64"])
187
+ def test_cython_group_transform_cumsum(np_dtype):
188
+ # see gh-4095
189
+ dtype = np.dtype(np_dtype).type
190
+ pd_op, np_op = group_cumsum, np.cumsum
191
+ _check_cython_group_transform_cumulative(pd_op, np_op, dtype)
192
+
193
+
194
+ def test_cython_group_transform_cumprod():
195
+ # see gh-4095
196
+ dtype = np.float64
197
+ pd_op, np_op = group_cumprod, np.cumprod
198
+ _check_cython_group_transform_cumulative(pd_op, np_op, dtype)
199
+
200
+
201
+ def test_cython_group_transform_algos():
202
+ # see gh-4095
203
+ is_datetimelike = False
204
+
205
+ # with nans
206
+ labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)
207
+ ngroups = 1
208
+
209
+ data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")
210
+ actual = np.zeros_like(data)
211
+ actual.fill(np.nan)
212
+ group_cumprod(actual, data, labels, ngroups, is_datetimelike)
213
+ expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")
214
+ tm.assert_numpy_array_equal(actual[:, 0], expected)
215
+
216
+ actual = np.zeros_like(data)
217
+ actual.fill(np.nan)
218
+ group_cumsum(actual, data, labels, ngroups, is_datetimelike)
219
+ expected = np.array([1, 3, 6, np.nan, 10], dtype="float64")
220
+ tm.assert_numpy_array_equal(actual[:, 0], expected)
221
+
222
+ # timedelta
223
+ is_datetimelike = True
224
+ data = np.array([np.timedelta64(1, "ns")] * 5, dtype="m8[ns]")[:, None]
225
+ actual = np.zeros_like(data, dtype="int64")
226
+ group_cumsum(actual, data.view("int64"), labels, ngroups, is_datetimelike)
227
+ expected = np.array(
228
+ [
229
+ np.timedelta64(1, "ns"),
230
+ np.timedelta64(2, "ns"),
231
+ np.timedelta64(3, "ns"),
232
+ np.timedelta64(4, "ns"),
233
+ np.timedelta64(5, "ns"),
234
+ ]
235
+ )
236
+ tm.assert_numpy_array_equal(actual[:, 0].view("m8[ns]"), expected)
237
+
238
+
239
+ def test_cython_group_mean_datetimelike():
240
+ actual = np.zeros(shape=(1, 1), dtype="float64")
241
+ counts = np.array([0], dtype="int64")
242
+ data = (
243
+ np.array(
244
+ [np.timedelta64(2, "ns"), np.timedelta64(4, "ns"), np.timedelta64("NaT")],
245
+ dtype="m8[ns]",
246
+ )[:, None]
247
+ .view("int64")
248
+ .astype("float64")
249
+ )
250
+ labels = np.zeros(len(data), dtype=np.intp)
251
+
252
+ group_mean(actual, counts, data, labels, is_datetimelike=True)
253
+
254
+ tm.assert_numpy_array_equal(actual[:, 0], np.array([3], dtype="float64"))
255
+
256
+
257
+ def test_cython_group_mean_wrong_min_count():
258
+ actual = np.zeros(shape=(1, 1), dtype="float64")
259
+ counts = np.zeros(1, dtype="int64")
260
+ data = np.zeros(1, dtype="float64")[:, None]
261
+ labels = np.zeros(1, dtype=np.intp)
262
+
263
+ with pytest.raises(AssertionError, match="min_count"):
264
+ group_mean(actual, counts, data, labels, is_datetimelike=True, min_count=0)
265
+
266
+
267
+ def test_cython_group_mean_not_datetimelike_but_has_NaT_values():
268
+ actual = np.zeros(shape=(1, 1), dtype="float64")
269
+ counts = np.array([0], dtype="int64")
270
+ data = (
271
+ np.array(
272
+ [np.timedelta64("NaT"), np.timedelta64("NaT")],
273
+ dtype="m8[ns]",
274
+ )[:, None]
275
+ .view("int64")
276
+ .astype("float64")
277
+ )
278
+ labels = np.zeros(len(data), dtype=np.intp)
279
+
280
+ group_mean(actual, counts, data, labels, is_datetimelike=False)
281
+
282
+ tm.assert_numpy_array_equal(
283
+ actual[:, 0], np.array(np.divide(np.add(data[0], data[1]), 2), dtype="float64")
284
+ )
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_min_max.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs.tslibs import iNaT
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ DataFrame,
9
+ Index,
10
+ Series,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+
15
+ def test_max_min_non_numeric():
16
+ # #2700
17
+ aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
18
+
19
+ result = aa.groupby("nn").max()
20
+ assert "ss" in result
21
+
22
+ result = aa.groupby("nn").max(numeric_only=False)
23
+ assert "ss" in result
24
+
25
+ result = aa.groupby("nn").min()
26
+ assert "ss" in result
27
+
28
+ result = aa.groupby("nn").min(numeric_only=False)
29
+ assert "ss" in result
30
+
31
+
32
+ def test_max_min_object_multiple_columns(using_array_manager):
33
+ # GH#41111 case where the aggregation is valid for some columns but not
34
+ # others; we split object blocks column-wise, consistent with
35
+ # DataFrame._reduce
36
+
37
+ df = DataFrame(
38
+ {
39
+ "A": [1, 1, 2, 2, 3],
40
+ "B": [1, "foo", 2, "bar", False],
41
+ "C": ["a", "b", "c", "d", "e"],
42
+ }
43
+ )
44
+ df._consolidate_inplace() # should already be consolidate, but double-check
45
+ if not using_array_manager:
46
+ assert len(df._mgr.blocks) == 2
47
+
48
+ gb = df.groupby("A")
49
+
50
+ result = gb[["C"]].max()
51
+ # "max" is valid for column "C" but not for "B"
52
+ ei = Index([1, 2, 3], name="A")
53
+ expected = DataFrame({"C": ["b", "d", "e"]}, index=ei)
54
+ tm.assert_frame_equal(result, expected)
55
+
56
+ result = gb[["C"]].min()
57
+ # "min" is valid for column "C" but not for "B"
58
+ ei = Index([1, 2, 3], name="A")
59
+ expected = DataFrame({"C": ["a", "c", "e"]}, index=ei)
60
+ tm.assert_frame_equal(result, expected)
61
+
62
+
63
+ def test_min_date_with_nans():
64
+ # GH26321
65
+ dates = pd.to_datetime(
66
+ Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
67
+ ).dt.date
68
+ df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
69
+
70
+ result = df.groupby("b", as_index=False)["c"].min()["c"]
71
+ expected = pd.to_datetime(
72
+ Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
73
+ ).dt.date
74
+ tm.assert_series_equal(result, expected)
75
+
76
+ result = df.groupby("b")["c"].min()
77
+ expected.index.name = "b"
78
+ tm.assert_series_equal(result, expected)
79
+
80
+
81
+ def test_max_inat():
82
+ # GH#40767 dont interpret iNaT as NaN
83
+ ser = Series([1, iNaT])
84
+ key = np.array([1, 1], dtype=np.int64)
85
+ gb = ser.groupby(key)
86
+
87
+ result = gb.max(min_count=2)
88
+ expected = Series({1: 1}, dtype=np.int64)
89
+ tm.assert_series_equal(result, expected, check_exact=True)
90
+
91
+ result = gb.min(min_count=2)
92
+ expected = Series({1: iNaT}, dtype=np.int64)
93
+ tm.assert_series_equal(result, expected, check_exact=True)
94
+
95
+ # not enough entries -> gets masked to NaN
96
+ result = gb.min(min_count=3)
97
+ expected = Series({1: np.nan})
98
+ tm.assert_series_equal(result, expected, check_exact=True)
99
+
100
+
101
+ def test_max_inat_not_all_na():
102
+ # GH#40767 dont interpret iNaT as NaN
103
+
104
+ # make sure we dont round iNaT+1 to iNaT
105
+ ser = Series([1, iNaT, 2, iNaT + 1])
106
+ gb = ser.groupby([1, 2, 3, 3])
107
+ result = gb.min(min_count=2)
108
+
109
+ # Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy
110
+ expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1})
111
+ expected.index = expected.index.astype(np.int_)
112
+ tm.assert_series_equal(result, expected, check_exact=True)
113
+
114
+
115
+ @pytest.mark.parametrize("func", ["min", "max"])
116
+ def test_groupby_aggregate_period_column(func):
117
+ # GH 31471
118
+ groups = [1, 2]
119
+ periods = pd.period_range("2020", periods=2, freq="Y")
120
+ df = DataFrame({"a": groups, "b": periods})
121
+
122
+ result = getattr(df.groupby("a")["b"], func)()
123
+ idx = Index([1, 2], name="a")
124
+ expected = Series(periods, index=idx, name="b")
125
+
126
+ tm.assert_series_equal(result, expected)
127
+
128
+
129
+ @pytest.mark.parametrize("func", ["min", "max"])
130
+ def test_groupby_aggregate_period_frame(func):
131
+ # GH 31471
132
+ groups = [1, 2]
133
+ periods = pd.period_range("2020", periods=2, freq="Y")
134
+ df = DataFrame({"a": groups, "b": periods})
135
+
136
+ result = getattr(df.groupby("a"), func)()
137
+ idx = Index([1, 2], name="a")
138
+ expected = DataFrame({"b": periods}, index=idx)
139
+
140
+ tm.assert_frame_equal(result, expected)
141
+
142
+
143
+ def test_aggregate_numeric_object_dtype():
144
+ # https://github.com/pandas-dev/pandas/issues/39329
145
+ # simplified case: multiple object columns where one is all-NaN
146
+ # -> gets split as the all-NaN is inferred as float
147
+ df = DataFrame(
148
+ {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4},
149
+ ).astype(object)
150
+ result = df.groupby("key").min()
151
+ expected = (
152
+ DataFrame(
153
+ {"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]},
154
+ )
155
+ .set_index("key")
156
+ .astype(object)
157
+ )
158
+ tm.assert_frame_equal(result, expected)
159
+
160
+ # same but with numbers
161
+ df = DataFrame(
162
+ {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)},
163
+ ).astype(object)
164
+ result = df.groupby("key").min()
165
+ expected = (
166
+ DataFrame({"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]})
167
+ .set_index("key")
168
+ .astype(object)
169
+ )
170
+ tm.assert_frame_equal(result, expected)
171
+
172
+
173
+ @pytest.mark.parametrize("func", ["min", "max"])
174
+ def test_aggregate_categorical_lost_index(func: str):
175
+ # GH: 28641 groupby drops index, when grouping over categorical column with min/max
176
+ ds = Series(["b"], dtype="category").cat.as_ordered()
177
+ df = DataFrame({"A": [1997], "B": ds})
178
+ result = df.groupby("A").agg({"B": func})
179
+ expected = DataFrame({"B": ["b"]}, index=Index([1997], name="A"))
180
+
181
+ # ordered categorical dtype should be preserved
182
+ expected["B"] = expected["B"].astype(ds.dtype)
183
+
184
+ tm.assert_frame_equal(result, expected)
185
+
186
+
187
+ @pytest.mark.parametrize("dtype", ["Int64", "Int32", "Float64", "Float32", "boolean"])
188
+ def test_groupby_min_max_nullable(dtype):
189
+ if dtype == "Int64":
190
+ # GH#41743 avoid precision loss
191
+ ts = 1618556707013635762
192
+ elif dtype == "boolean":
193
+ ts = 0
194
+ else:
195
+ ts = 4.0
196
+
197
+ df = DataFrame({"id": [2, 2], "ts": [ts, ts + 1]})
198
+ df["ts"] = df["ts"].astype(dtype)
199
+
200
+ gb = df.groupby("id")
201
+
202
+ result = gb.min()
203
+ expected = df.iloc[:1].set_index("id")
204
+ tm.assert_frame_equal(result, expected)
205
+
206
+ res_max = gb.max()
207
+ expected_max = df.iloc[1:].set_index("id")
208
+ tm.assert_frame_equal(res_max, expected_max)
209
+
210
+ result2 = gb.min(min_count=3)
211
+ expected2 = DataFrame({"ts": [pd.NA]}, index=expected.index, dtype=dtype)
212
+ tm.assert_frame_equal(result2, expected2)
213
+
214
+ res_max2 = gb.max(min_count=3)
215
+ tm.assert_frame_equal(res_max2, expected2)
216
+
217
+ # Case with NA values
218
+ df2 = DataFrame({"id": [2, 2, 2], "ts": [ts, pd.NA, ts + 1]})
219
+ df2["ts"] = df2["ts"].astype(dtype)
220
+ gb2 = df2.groupby("id")
221
+
222
+ result3 = gb2.min()
223
+ tm.assert_frame_equal(result3, expected)
224
+
225
+ res_max3 = gb2.max()
226
+ tm.assert_frame_equal(res_max3, expected_max)
227
+
228
+ result4 = gb2.min(min_count=100)
229
+ tm.assert_frame_equal(result4, expected2)
230
+
231
+ res_max4 = gb2.max(min_count=100)
232
+ tm.assert_frame_equal(res_max4, expected2)
233
+
234
+
235
+ def test_min_max_nullable_uint64_empty_group():
236
+ # don't raise NotImplementedError from libgroupby
237
+ cat = pd.Categorical([0] * 10, categories=[0, 1])
238
+ df = DataFrame({"A": cat, "B": pd.array(np.arange(10, dtype=np.uint64))})
239
+ gb = df.groupby("A")
240
+
241
+ res = gb.min()
242
+
243
+ idx = pd.CategoricalIndex([0, 1], dtype=cat.dtype, name="A")
244
+ expected = DataFrame({"B": pd.array([0, pd.NA], dtype="UInt64")}, index=idx)
245
+ tm.assert_frame_equal(res, expected)
246
+
247
+ res = gb.max()
248
+ expected.iloc[0, 0] = 9
249
+ tm.assert_frame_equal(res, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_missing.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ Index,
8
+ date_range,
9
+ )
10
+ import pandas._testing as tm
11
+
12
+
13
+ @pytest.mark.parametrize("func", ["ffill", "bfill"])
14
+ def test_groupby_column_index_name_lost_fill_funcs(func):
15
+ # GH: 29764 groupby loses index sometimes
16
+ df = DataFrame(
17
+ [[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]],
18
+ columns=Index(["type", "a", "b"], name="idx"),
19
+ )
20
+ df_grouped = df.groupby(["type"])[["a", "b"]]
21
+ result = getattr(df_grouped, func)().columns
22
+ expected = Index(["a", "b"], name="idx")
23
+ tm.assert_index_equal(result, expected)
24
+
25
+
26
+ @pytest.mark.parametrize("func", ["ffill", "bfill"])
27
+ def test_groupby_fill_duplicate_column_names(func):
28
+ # GH: 25610 ValueError with duplicate column names
29
+ df1 = DataFrame({"field1": [1, 3, 4], "field2": [1, 3, 4]})
30
+ df2 = DataFrame({"field1": [1, np.nan, 4]})
31
+ df_grouped = pd.concat([df1, df2], axis=1).groupby(by=["field2"])
32
+ expected = DataFrame(
33
+ [[1, 1.0], [3, np.nan], [4, 4.0]], columns=["field1", "field1"]
34
+ )
35
+ result = getattr(df_grouped, func)()
36
+ tm.assert_frame_equal(result, expected)
37
+
38
+
39
+ def test_ffill_missing_arguments():
40
+ # GH 14955
41
+ df = DataFrame({"a": [1, 2], "b": [1, 1]})
42
+ with pytest.raises(ValueError, match="Must specify a fill"):
43
+ df.groupby("b").fillna()
44
+
45
+
46
+ @pytest.mark.parametrize(
47
+ "method, expected", [("ffill", [None, "a", "a"]), ("bfill", ["a", "a", None])]
48
+ )
49
+ def test_fillna_with_string_dtype(method, expected):
50
+ # GH 40250
51
+ df = DataFrame({"a": pd.array([None, "a", None], dtype="string"), "b": [0, 0, 0]})
52
+ grp = df.groupby("b")
53
+ result = grp.fillna(method=method)
54
+ expected = DataFrame({"a": pd.array(expected, dtype="string")})
55
+ tm.assert_frame_equal(result, expected)
56
+
57
+
58
+ def test_fill_consistency():
59
+ # GH9221
60
+ # pass thru keyword arguments to the generated wrapper
61
+ # are set if the passed kw is None (only)
62
+ df = DataFrame(
63
+ index=pd.MultiIndex.from_product(
64
+ [["value1", "value2"], date_range("2014-01-01", "2014-01-06")]
65
+ ),
66
+ columns=Index(["1", "2"], name="id"),
67
+ )
68
+ df["1"] = [
69
+ np.nan,
70
+ 1,
71
+ np.nan,
72
+ np.nan,
73
+ 11,
74
+ np.nan,
75
+ np.nan,
76
+ 2,
77
+ np.nan,
78
+ np.nan,
79
+ 22,
80
+ np.nan,
81
+ ]
82
+ df["2"] = [
83
+ np.nan,
84
+ 3,
85
+ np.nan,
86
+ np.nan,
87
+ 33,
88
+ np.nan,
89
+ np.nan,
90
+ 4,
91
+ np.nan,
92
+ np.nan,
93
+ 44,
94
+ np.nan,
95
+ ]
96
+
97
+ expected = df.groupby(level=0, axis=0).fillna(method="ffill")
98
+ result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
99
+ tm.assert_frame_equal(result, expected)
100
+
101
+
102
+ @pytest.mark.parametrize("method", ["ffill", "bfill"])
103
+ @pytest.mark.parametrize("dropna", [True, False])
104
+ @pytest.mark.parametrize("has_nan_group", [True, False])
105
+ def test_ffill_handles_nan_groups(dropna, method, has_nan_group):
106
+ # GH 34725
107
+
108
+ df_without_nan_rows = DataFrame([(1, 0.1), (2, 0.2)])
109
+
110
+ ridx = [-1, 0, -1, -1, 1, -1]
111
+ df = df_without_nan_rows.reindex(ridx).reset_index(drop=True)
112
+
113
+ group_b = np.nan if has_nan_group else "b"
114
+ df["group_col"] = pd.Series(["a"] * 3 + [group_b] * 3)
115
+
116
+ grouped = df.groupby(by="group_col", dropna=dropna)
117
+ result = getattr(grouped, method)(limit=None)
118
+
119
+ expected_rows = {
120
+ ("ffill", True, True): [-1, 0, 0, -1, -1, -1],
121
+ ("ffill", True, False): [-1, 0, 0, -1, 1, 1],
122
+ ("ffill", False, True): [-1, 0, 0, -1, 1, 1],
123
+ ("ffill", False, False): [-1, 0, 0, -1, 1, 1],
124
+ ("bfill", True, True): [0, 0, -1, -1, -1, -1],
125
+ ("bfill", True, False): [0, 0, -1, 1, 1, -1],
126
+ ("bfill", False, True): [0, 0, -1, 1, 1, -1],
127
+ ("bfill", False, False): [0, 0, -1, 1, 1, -1],
128
+ }
129
+
130
+ ridx = expected_rows.get((method, dropna, has_nan_group))
131
+ expected = df_without_nan_rows.reindex(ridx).reset_index(drop=True)
132
+ # columns are a 'take' on df.columns, which are object dtype
133
+ expected.columns = expected.columns.astype(object)
134
+
135
+ tm.assert_frame_equal(result, expected)
136
+
137
+
138
+ @pytest.mark.parametrize("min_count, value", [(2, np.nan), (-1, 1.0)])
139
+ @pytest.mark.parametrize("func", ["first", "last", "max", "min"])
140
+ def test_min_count(func, min_count, value):
141
+ # GH#37821
142
+ df = DataFrame({"a": [1] * 3, "b": [1, np.nan, np.nan], "c": [np.nan] * 3})
143
+ result = getattr(df.groupby("a"), func)(min_count=min_count)
144
+ expected = DataFrame({"b": [value], "c": [np.nan]}, index=Index([1], name="a"))
145
+ tm.assert_frame_equal(result, expected)
146
+
147
+
148
+ def test_indices_with_missing():
149
+ # GH 9304
150
+ df = DataFrame({"a": [1, 1, np.nan], "b": [2, 3, 4], "c": [5, 6, 7]})
151
+ g = df.groupby(["a", "b"])
152
+ result = g.indices
153
+ expected = {(1.0, 2): np.array([0]), (1.0, 3): np.array([1])}
154
+ assert result == expected
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_nth.py ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ Index,
8
+ MultiIndex,
9
+ Series,
10
+ Timestamp,
11
+ isna,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+
16
+ def test_first_last_nth(df):
17
+ # tests for first / last / nth
18
+ grouped = df.groupby("A")
19
+ first = grouped.first()
20
+ expected = df.loc[[1, 0], ["B", "C", "D"]]
21
+ expected.index = Index(["bar", "foo"], name="A")
22
+ expected = expected.sort_index()
23
+ tm.assert_frame_equal(first, expected)
24
+
25
+ nth = grouped.nth(0)
26
+ expected = df.loc[[0, 1]]
27
+ tm.assert_frame_equal(nth, expected)
28
+
29
+ last = grouped.last()
30
+ expected = df.loc[[5, 7], ["B", "C", "D"]]
31
+ expected.index = Index(["bar", "foo"], name="A")
32
+ tm.assert_frame_equal(last, expected)
33
+
34
+ nth = grouped.nth(-1)
35
+ expected = df.iloc[[5, 7]]
36
+ tm.assert_frame_equal(nth, expected)
37
+
38
+ nth = grouped.nth(1)
39
+ expected = df.iloc[[2, 3]]
40
+ tm.assert_frame_equal(nth, expected)
41
+
42
+ # it works!
43
+ grouped["B"].first()
44
+ grouped["B"].last()
45
+ grouped["B"].nth(0)
46
+
47
+ df.loc[df["A"] == "foo", "B"] = np.nan
48
+ assert isna(grouped["B"].first()["foo"])
49
+ assert isna(grouped["B"].last()["foo"])
50
+ assert isna(grouped["B"].nth(0).iloc[0])
51
+
52
+ # v0.14.0 whatsnew
53
+ df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
54
+ g = df.groupby("A")
55
+ result = g.first()
56
+ expected = df.iloc[[1, 2]].set_index("A")
57
+ tm.assert_frame_equal(result, expected)
58
+
59
+ expected = df.iloc[[1, 2]]
60
+ result = g.nth(0, dropna="any")
61
+ tm.assert_frame_equal(result, expected)
62
+
63
+
64
+ @pytest.mark.parametrize("method", ["first", "last"])
65
+ def test_first_last_with_na_object(method, nulls_fixture):
66
+ # https://github.com/pandas-dev/pandas/issues/32123
67
+ groups = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby("a")
68
+ result = getattr(groups, method)()
69
+
70
+ if method == "first":
71
+ values = [1, 3]
72
+ else:
73
+ values = [2, 3]
74
+
75
+ values = np.array(values, dtype=result["b"].dtype)
76
+ idx = Index([1, 2], name="a")
77
+ expected = DataFrame({"b": values}, index=idx)
78
+
79
+ tm.assert_frame_equal(result, expected)
80
+
81
+
82
+ @pytest.mark.parametrize("index", [0, -1])
83
+ def test_nth_with_na_object(index, nulls_fixture):
84
+ # https://github.com/pandas-dev/pandas/issues/32123
85
+ df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]})
86
+ groups = df.groupby("a")
87
+ result = groups.nth(index)
88
+ expected = df.iloc[[0, 2]] if index == 0 else df.iloc[[1, 3]]
89
+ tm.assert_frame_equal(result, expected)
90
+
91
+
92
+ @pytest.mark.parametrize("method", ["first", "last"])
93
+ def test_first_last_with_None(method):
94
+ # https://github.com/pandas-dev/pandas/issues/32800
95
+ # None should be preserved as object dtype
96
+ df = DataFrame.from_dict({"id": ["a"], "value": [None]})
97
+ groups = df.groupby("id", as_index=False)
98
+ result = getattr(groups, method)()
99
+
100
+ tm.assert_frame_equal(result, df)
101
+
102
+
103
+ @pytest.mark.parametrize("method", ["first", "last"])
104
+ @pytest.mark.parametrize(
105
+ "df, expected",
106
+ [
107
+ (
108
+ DataFrame({"id": "a", "value": [None, "foo", np.nan]}),
109
+ DataFrame({"value": ["foo"]}, index=Index(["a"], name="id")),
110
+ ),
111
+ (
112
+ DataFrame({"id": "a", "value": [np.nan]}, dtype=object),
113
+ DataFrame({"value": [None]}, index=Index(["a"], name="id")),
114
+ ),
115
+ ],
116
+ )
117
+ def test_first_last_with_None_expanded(method, df, expected):
118
+ # GH 32800, 38286
119
+ result = getattr(df.groupby("id"), method)()
120
+ tm.assert_frame_equal(result, expected)
121
+
122
+
123
+ def test_first_last_nth_dtypes(df_mixed_floats):
124
+ df = df_mixed_floats.copy()
125
+ df["E"] = True
126
+ df["F"] = 1
127
+
128
+ # tests for first / last / nth
129
+ grouped = df.groupby("A")
130
+ first = grouped.first()
131
+ expected = df.loc[[1, 0], ["B", "C", "D", "E", "F"]]
132
+ expected.index = Index(["bar", "foo"], name="A")
133
+ expected = expected.sort_index()
134
+ tm.assert_frame_equal(first, expected)
135
+
136
+ last = grouped.last()
137
+ expected = df.loc[[5, 7], ["B", "C", "D", "E", "F"]]
138
+ expected.index = Index(["bar", "foo"], name="A")
139
+ expected = expected.sort_index()
140
+ tm.assert_frame_equal(last, expected)
141
+
142
+ nth = grouped.nth(1)
143
+ expected = df.iloc[[2, 3]]
144
+ tm.assert_frame_equal(nth, expected)
145
+
146
+ # GH 2763, first/last shifting dtypes
147
+ idx = list(range(10))
148
+ idx.append(9)
149
+ s = Series(data=range(11), index=idx, name="IntCol")
150
+ assert s.dtype == "int64"
151
+ f = s.groupby(level=0).first()
152
+ assert f.dtype == "int64"
153
+
154
+
155
+ def test_first_last_nth_nan_dtype():
156
+ # GH 33591
157
+ df = DataFrame({"data": ["A"], "nans": Series([np.nan], dtype=object)})
158
+ grouped = df.groupby("data")
159
+
160
+ expected = df.set_index("data").nans
161
+ tm.assert_series_equal(grouped.nans.first(), expected)
162
+ tm.assert_series_equal(grouped.nans.last(), expected)
163
+
164
+ expected = df.nans
165
+ tm.assert_series_equal(grouped.nans.nth(-1), expected)
166
+ tm.assert_series_equal(grouped.nans.nth(0), expected)
167
+
168
+
169
+ def test_first_strings_timestamps():
170
+ # GH 11244
171
+ test = DataFrame(
172
+ {
173
+ Timestamp("2012-01-01 00:00:00"): ["a", "b"],
174
+ Timestamp("2012-01-02 00:00:00"): ["c", "d"],
175
+ "name": ["e", "e"],
176
+ "aaaa": ["f", "g"],
177
+ }
178
+ )
179
+ result = test.groupby("name").first()
180
+ expected = DataFrame(
181
+ [["a", "c", "f"]],
182
+ columns=Index([Timestamp("2012-01-01"), Timestamp("2012-01-02"), "aaaa"]),
183
+ index=Index(["e"], name="name"),
184
+ )
185
+ tm.assert_frame_equal(result, expected)
186
+
187
+
188
+ def test_nth():
189
+ df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
190
+ g = df.groupby("A")
191
+
192
+ tm.assert_frame_equal(g.nth(0), df.iloc[[0, 2]])
193
+ tm.assert_frame_equal(g.nth(1), df.iloc[[1]])
194
+ tm.assert_frame_equal(g.nth(2), df.loc[[]])
195
+ tm.assert_frame_equal(g.nth(-1), df.iloc[[1, 2]])
196
+ tm.assert_frame_equal(g.nth(-2), df.iloc[[0]])
197
+ tm.assert_frame_equal(g.nth(-3), df.loc[[]])
198
+ tm.assert_series_equal(g.B.nth(0), df.B.iloc[[0, 2]])
199
+ tm.assert_series_equal(g.B.nth(1), df.B.iloc[[1]])
200
+ tm.assert_frame_equal(g[["B"]].nth(0), df[["B"]].iloc[[0, 2]])
201
+
202
+ tm.assert_frame_equal(g.nth(0, dropna="any"), df.iloc[[1, 2]])
203
+ tm.assert_frame_equal(g.nth(-1, dropna="any"), df.iloc[[1, 2]])
204
+
205
+ tm.assert_frame_equal(g.nth(7, dropna="any"), df.iloc[:0])
206
+ tm.assert_frame_equal(g.nth(2, dropna="any"), df.iloc[:0])
207
+
208
+ # out of bounds, regression from 0.13.1
209
+ # GH 6621
210
+ df = DataFrame(
211
+ {
212
+ "color": {0: "green", 1: "green", 2: "red", 3: "red", 4: "red"},
213
+ "food": {0: "ham", 1: "eggs", 2: "eggs", 3: "ham", 4: "pork"},
214
+ "two": {
215
+ 0: 1.5456590000000001,
216
+ 1: -0.070345000000000005,
217
+ 2: -2.4004539999999999,
218
+ 3: 0.46206000000000003,
219
+ 4: 0.52350799999999997,
220
+ },
221
+ "one": {
222
+ 0: 0.56573799999999996,
223
+ 1: -0.9742360000000001,
224
+ 2: 1.033801,
225
+ 3: -0.78543499999999999,
226
+ 4: 0.70422799999999997,
227
+ },
228
+ }
229
+ ).set_index(["color", "food"])
230
+
231
+ result = df.groupby(level=0, as_index=False).nth(2)
232
+ expected = df.iloc[[-1]]
233
+ tm.assert_frame_equal(result, expected)
234
+
235
+ result = df.groupby(level=0, as_index=False).nth(3)
236
+ expected = df.loc[[]]
237
+ tm.assert_frame_equal(result, expected)
238
+
239
+ # GH 7559
240
+ # from the vbench
241
+ df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype="int64")
242
+ s = df[1]
243
+ g = df[0]
244
+ expected = s.groupby(g).first()
245
+ expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
246
+ tm.assert_series_equal(expected2, expected, check_names=False)
247
+ assert expected.name == 1
248
+ assert expected2.name == 1
249
+
250
+ # validate first
251
+ v = s[g == 1].iloc[0]
252
+ assert expected.iloc[0] == v
253
+ assert expected2.iloc[0] == v
254
+
255
+ with pytest.raises(ValueError, match="For a DataFrame"):
256
+ s.groupby(g, sort=False).nth(0, dropna=True)
257
+
258
+ # doc example
259
+ df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
260
+ g = df.groupby("A")
261
+ result = g.B.nth(0, dropna="all")
262
+ expected = df.B.iloc[[1, 2]]
263
+ tm.assert_series_equal(result, expected)
264
+
265
+ # test multiple nth values
266
+ df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]], columns=["A", "B"])
267
+ g = df.groupby("A")
268
+
269
+ tm.assert_frame_equal(g.nth(0), df.iloc[[0, 3]])
270
+ tm.assert_frame_equal(g.nth([0]), df.iloc[[0, 3]])
271
+ tm.assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]])
272
+ tm.assert_frame_equal(g.nth([0, -1]), df.iloc[[0, 2, 3, 4]])
273
+ tm.assert_frame_equal(g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]])
274
+ tm.assert_frame_equal(g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]])
275
+ tm.assert_frame_equal(g.nth([2]), df.iloc[[2]])
276
+ tm.assert_frame_equal(g.nth([3, 4]), df.loc[[]])
277
+
278
+ business_dates = pd.date_range(start="4/1/2014", end="6/30/2014", freq="B")
279
+ df = DataFrame(1, index=business_dates, columns=["a", "b"])
280
+ # get the first, fourth and last two business days for each month
281
+ key = [df.index.year, df.index.month]
282
+ result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
283
+ expected_dates = pd.to_datetime(
284
+ [
285
+ "2014/4/1",
286
+ "2014/4/4",
287
+ "2014/4/29",
288
+ "2014/4/30",
289
+ "2014/5/1",
290
+ "2014/5/6",
291
+ "2014/5/29",
292
+ "2014/5/30",
293
+ "2014/6/2",
294
+ "2014/6/5",
295
+ "2014/6/27",
296
+ "2014/6/30",
297
+ ]
298
+ )
299
+ expected = DataFrame(1, columns=["a", "b"], index=expected_dates)
300
+ tm.assert_frame_equal(result, expected)
301
+
302
+
303
+ def test_nth_multi_grouper(three_group):
304
+ # PR 9090, related to issue 8979
305
+ # test nth on multiple groupers
306
+ grouped = three_group.groupby(["A", "B"])
307
+ result = grouped.nth(0)
308
+ expected = three_group.iloc[[0, 3, 4, 7]]
309
+ tm.assert_frame_equal(result, expected)
310
+
311
+
312
+ @pytest.mark.parametrize(
313
+ "data, expected_first, expected_last",
314
+ [
315
+ (
316
+ {
317
+ "id": ["A"],
318
+ "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
319
+ "foo": [1],
320
+ },
321
+ {
322
+ "id": ["A"],
323
+ "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
324
+ "foo": [1],
325
+ },
326
+ {
327
+ "id": ["A"],
328
+ "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
329
+ "foo": [1],
330
+ },
331
+ ),
332
+ (
333
+ {
334
+ "id": ["A", "B", "A"],
335
+ "time": [
336
+ Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
337
+ Timestamp("2012-02-01 14:00:00", tz="US/Central"),
338
+ Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
339
+ ],
340
+ "foo": [1, 2, 3],
341
+ },
342
+ {
343
+ "id": ["A", "B"],
344
+ "time": [
345
+ Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
346
+ Timestamp("2012-02-01 14:00:00", tz="US/Central"),
347
+ ],
348
+ "foo": [1, 2],
349
+ },
350
+ {
351
+ "id": ["A", "B"],
352
+ "time": [
353
+ Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
354
+ Timestamp("2012-02-01 14:00:00", tz="US/Central"),
355
+ ],
356
+ "foo": [3, 2],
357
+ },
358
+ ),
359
+ ],
360
+ )
361
+ def test_first_last_tz(data, expected_first, expected_last):
362
+ # GH15884
363
+ # Test that the timezone is retained when calling first
364
+ # or last on groupby with as_index=False
365
+
366
+ df = DataFrame(data)
367
+
368
+ result = df.groupby("id", as_index=False).first()
369
+ expected = DataFrame(expected_first)
370
+ cols = ["id", "time", "foo"]
371
+ tm.assert_frame_equal(result[cols], expected[cols])
372
+
373
+ result = df.groupby("id", as_index=False)["time"].first()
374
+ tm.assert_frame_equal(result, expected[["id", "time"]])
375
+
376
+ result = df.groupby("id", as_index=False).last()
377
+ expected = DataFrame(expected_last)
378
+ cols = ["id", "time", "foo"]
379
+ tm.assert_frame_equal(result[cols], expected[cols])
380
+
381
+ result = df.groupby("id", as_index=False)["time"].last()
382
+ tm.assert_frame_equal(result, expected[["id", "time"]])
383
+
384
+
385
+ @pytest.mark.parametrize(
386
+ "method, ts, alpha",
387
+ [
388
+ ["first", Timestamp("2013-01-01", tz="US/Eastern"), "a"],
389
+ ["last", Timestamp("2013-01-02", tz="US/Eastern"), "b"],
390
+ ],
391
+ )
392
+ def test_first_last_tz_multi_column(method, ts, alpha):
393
+ # GH 21603
394
+ category_string = Series(list("abc")).astype("category")
395
+ df = DataFrame(
396
+ {
397
+ "group": [1, 1, 2],
398
+ "category_string": category_string,
399
+ "datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
400
+ }
401
+ )
402
+ result = getattr(df.groupby("group"), method)()
403
+ expected = DataFrame(
404
+ {
405
+ "category_string": pd.Categorical(
406
+ [alpha, "c"], dtype=category_string.dtype
407
+ ),
408
+ "datetimetz": [ts, Timestamp("2013-01-03", tz="US/Eastern")],
409
+ },
410
+ index=Index([1, 2], name="group"),
411
+ )
412
+ tm.assert_frame_equal(result, expected)
413
+
414
+
415
+ @pytest.mark.parametrize(
416
+ "values",
417
+ [
418
+ pd.array([True, False], dtype="boolean"),
419
+ pd.array([1, 2], dtype="Int64"),
420
+ pd.to_datetime(["2020-01-01", "2020-02-01"]),
421
+ pd.to_timedelta([1, 2], unit="D"),
422
+ ],
423
+ )
424
+ @pytest.mark.parametrize("function", ["first", "last", "min", "max"])
425
+ def test_first_last_extension_array_keeps_dtype(values, function):
426
+ # https://github.com/pandas-dev/pandas/issues/33071
427
+ # https://github.com/pandas-dev/pandas/issues/32194
428
+ df = DataFrame({"a": [1, 2], "b": values})
429
+ grouped = df.groupby("a")
430
+ idx = Index([1, 2], name="a")
431
+ expected_series = Series(values, name="b", index=idx)
432
+ expected_frame = DataFrame({"b": values}, index=idx)
433
+
434
+ result_series = getattr(grouped["b"], function)()
435
+ tm.assert_series_equal(result_series, expected_series)
436
+
437
+ result_frame = grouped.agg({"b": function})
438
+ tm.assert_frame_equal(result_frame, expected_frame)
439
+
440
+
441
+ def test_nth_multi_index_as_expected():
442
+ # PR 9090, related to issue 8979
443
+ # test nth on MultiIndex
444
+ three_group = DataFrame(
445
+ {
446
+ "A": [
447
+ "foo",
448
+ "foo",
449
+ "foo",
450
+ "foo",
451
+ "bar",
452
+ "bar",
453
+ "bar",
454
+ "bar",
455
+ "foo",
456
+ "foo",
457
+ "foo",
458
+ ],
459
+ "B": [
460
+ "one",
461
+ "one",
462
+ "one",
463
+ "two",
464
+ "one",
465
+ "one",
466
+ "one",
467
+ "two",
468
+ "two",
469
+ "two",
470
+ "one",
471
+ ],
472
+ "C": [
473
+ "dull",
474
+ "dull",
475
+ "shiny",
476
+ "dull",
477
+ "dull",
478
+ "shiny",
479
+ "shiny",
480
+ "dull",
481
+ "shiny",
482
+ "shiny",
483
+ "shiny",
484
+ ],
485
+ }
486
+ )
487
+ grouped = three_group.groupby(["A", "B"])
488
+ result = grouped.nth(0)
489
+ expected = three_group.iloc[[0, 3, 4, 7]]
490
+ tm.assert_frame_equal(result, expected)
491
+
492
+
493
+ @pytest.mark.parametrize(
494
+ "op, n, expected_rows",
495
+ [
496
+ ("head", -1, [0]),
497
+ ("head", 0, []),
498
+ ("head", 1, [0, 2]),
499
+ ("head", 7, [0, 1, 2]),
500
+ ("tail", -1, [1]),
501
+ ("tail", 0, []),
502
+ ("tail", 1, [1, 2]),
503
+ ("tail", 7, [0, 1, 2]),
504
+ ],
505
+ )
506
+ @pytest.mark.parametrize("columns", [None, [], ["A"], ["B"], ["A", "B"]])
507
+ @pytest.mark.parametrize("as_index", [True, False])
508
+ def test_groupby_head_tail(op, n, expected_rows, columns, as_index):
509
+ df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
510
+ g = df.groupby("A", as_index=as_index)
511
+ expected = df.iloc[expected_rows]
512
+ if columns is not None:
513
+ g = g[columns]
514
+ expected = expected[columns]
515
+ result = getattr(g, op)(n)
516
+ tm.assert_frame_equal(result, expected)
517
+
518
+
519
+ @pytest.mark.parametrize(
520
+ "op, n, expected_cols",
521
+ [
522
+ ("head", -1, [0]),
523
+ ("head", 0, []),
524
+ ("head", 1, [0, 2]),
525
+ ("head", 7, [0, 1, 2]),
526
+ ("tail", -1, [1]),
527
+ ("tail", 0, []),
528
+ ("tail", 1, [1, 2]),
529
+ ("tail", 7, [0, 1, 2]),
530
+ ],
531
+ )
532
+ def test_groupby_head_tail_axis_1(op, n, expected_cols):
533
+ # GH 9772
534
+ df = DataFrame(
535
+ [[1, 2, 3], [1, 4, 5], [2, 6, 7], [3, 8, 9]], columns=["A", "B", "C"]
536
+ )
537
+ g = df.groupby([0, 0, 1], axis=1)
538
+ expected = df.iloc[:, expected_cols]
539
+ result = getattr(g, op)(n)
540
+ tm.assert_frame_equal(result, expected)
541
+
542
+
543
+ def test_group_selection_cache():
544
+ # GH 12839 nth, head, and tail should return same result consistently
545
+ df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
546
+ expected = df.iloc[[0, 2]]
547
+
548
+ g = df.groupby("A")
549
+ result1 = g.head(n=2)
550
+ result2 = g.nth(0)
551
+ tm.assert_frame_equal(result1, df)
552
+ tm.assert_frame_equal(result2, expected)
553
+
554
+ g = df.groupby("A")
555
+ result1 = g.tail(n=2)
556
+ result2 = g.nth(0)
557
+ tm.assert_frame_equal(result1, df)
558
+ tm.assert_frame_equal(result2, expected)
559
+
560
+ g = df.groupby("A")
561
+ result1 = g.nth(0)
562
+ result2 = g.head(n=2)
563
+ tm.assert_frame_equal(result1, expected)
564
+ tm.assert_frame_equal(result2, df)
565
+
566
+ g = df.groupby("A")
567
+ result1 = g.nth(0)
568
+ result2 = g.tail(n=2)
569
+ tm.assert_frame_equal(result1, expected)
570
+ tm.assert_frame_equal(result2, df)
571
+
572
+
573
+ def test_nth_empty():
574
+ # GH 16064
575
+ df = DataFrame(index=[0], columns=["a", "b", "c"])
576
+ result = df.groupby("a").nth(10)
577
+ expected = df.iloc[:0]
578
+ tm.assert_frame_equal(result, expected)
579
+
580
+ result = df.groupby(["a", "b"]).nth(10)
581
+ expected = df.iloc[:0]
582
+ tm.assert_frame_equal(result, expected)
583
+
584
+
585
+ def test_nth_column_order():
586
+ # GH 20760
587
+ # Check that nth preserves column order
588
+ df = DataFrame(
589
+ [[1, "b", 100], [1, "a", 50], [1, "a", np.nan], [2, "c", 200], [2, "d", 150]],
590
+ columns=["A", "C", "B"],
591
+ )
592
+ result = df.groupby("A").nth(0)
593
+ expected = df.iloc[[0, 3]]
594
+ tm.assert_frame_equal(result, expected)
595
+
596
+ result = df.groupby("A").nth(-1, dropna="any")
597
+ expected = df.iloc[[1, 4]]
598
+ tm.assert_frame_equal(result, expected)
599
+
600
+
601
+ @pytest.mark.parametrize("dropna", [None, "any", "all"])
602
+ def test_nth_nan_in_grouper(dropna):
603
+ # GH 26011
604
+ df = DataFrame(
605
+ {
606
+ "a": [np.nan, "a", np.nan, "b", np.nan],
607
+ "b": [0, 2, 4, 6, 8],
608
+ "c": [1, 3, 5, 7, 9],
609
+ }
610
+ )
611
+ result = df.groupby("a").nth(0, dropna=dropna)
612
+ expected = df.iloc[[1, 3]]
613
+
614
+ tm.assert_frame_equal(result, expected)
615
+
616
+
617
+ @pytest.mark.parametrize("dropna", [None, "any", "all"])
618
+ def test_nth_nan_in_grouper_series(dropna):
619
+ # GH 26454
620
+ df = DataFrame(
621
+ {
622
+ "a": [np.nan, "a", np.nan, "b", np.nan],
623
+ "b": [0, 2, 4, 6, 8],
624
+ }
625
+ )
626
+ result = df.groupby("a")["b"].nth(0, dropna=dropna)
627
+ expected = df["b"].iloc[[1, 3]]
628
+
629
+ tm.assert_series_equal(result, expected)
630
+
631
+
632
+ def test_first_categorical_and_datetime_data_nat():
633
+ # GH 20520
634
+ df = DataFrame(
635
+ {
636
+ "group": ["first", "first", "second", "third", "third"],
637
+ "time": 5 * [np.datetime64("NaT")],
638
+ "categories": Series(["a", "b", "c", "a", "b"], dtype="category"),
639
+ }
640
+ )
641
+ result = df.groupby("group").first()
642
+ expected = DataFrame(
643
+ {
644
+ "time": 3 * [np.datetime64("NaT")],
645
+ "categories": Series(["a", "c", "a"]).astype(
646
+ pd.CategoricalDtype(["a", "b", "c"])
647
+ ),
648
+ }
649
+ )
650
+ expected.index = Index(["first", "second", "third"], name="group")
651
+ tm.assert_frame_equal(result, expected)
652
+
653
+
654
+ def test_first_multi_key_groupby_categorical():
655
+ # GH 22512
656
+ df = DataFrame(
657
+ {
658
+ "A": [1, 1, 1, 2, 2],
659
+ "B": [100, 100, 200, 100, 100],
660
+ "C": ["apple", "orange", "mango", "mango", "orange"],
661
+ "D": ["jupiter", "mercury", "mars", "venus", "venus"],
662
+ }
663
+ )
664
+ df = df.astype({"D": "category"})
665
+ result = df.groupby(by=["A", "B"]).first()
666
+ expected = DataFrame(
667
+ {
668
+ "C": ["apple", "mango", "mango"],
669
+ "D": Series(["jupiter", "mars", "venus"]).astype(
670
+ pd.CategoricalDtype(["jupiter", "mars", "mercury", "venus"])
671
+ ),
672
+ }
673
+ )
674
+ expected.index = MultiIndex.from_tuples(
675
+ [(1, 100), (1, 200), (2, 100)], names=["A", "B"]
676
+ )
677
+ tm.assert_frame_equal(result, expected)
678
+
679
+
680
+ @pytest.mark.parametrize("method", ["first", "last", "nth"])
681
+ def test_groupby_last_first_nth_with_none(method, nulls_fixture):
682
+ # GH29645
683
+ expected = Series(["y"])
684
+ data = Series(
685
+ [nulls_fixture, nulls_fixture, nulls_fixture, "y", nulls_fixture],
686
+ index=[0, 0, 0, 0, 0],
687
+ ).groupby(level=0)
688
+
689
+ if method == "nth":
690
+ result = getattr(data, method)(3)
691
+ else:
692
+ result = getattr(data, method)()
693
+
694
+ tm.assert_series_equal(result, expected)
695
+
696
+
697
+ @pytest.mark.parametrize(
698
+ "arg, expected_rows",
699
+ [
700
+ [slice(None, 3, 2), [0, 1, 4, 5]],
701
+ [slice(None, -2), [0, 2, 5]],
702
+ [[slice(None, 2), slice(-2, None)], [0, 1, 2, 3, 4, 6, 7]],
703
+ [[0, 1, slice(-2, None)], [0, 1, 2, 3, 4, 6, 7]],
704
+ ],
705
+ )
706
+ def test_slice(slice_test_df, slice_test_grouped, arg, expected_rows):
707
+ # Test slices GH #42947
708
+
709
+ result = slice_test_grouped.nth[arg]
710
+ equivalent = slice_test_grouped.nth(arg)
711
+ expected = slice_test_df.iloc[expected_rows]
712
+
713
+ tm.assert_frame_equal(result, expected)
714
+ tm.assert_frame_equal(equivalent, expected)
715
+
716
+
717
+ def test_nth_indexed(slice_test_df, slice_test_grouped):
718
+ # Test index notation GH #44688
719
+
720
+ result = slice_test_grouped.nth[0, 1, -2:]
721
+ equivalent = slice_test_grouped.nth([0, 1, slice(-2, None)])
722
+ expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]]
723
+
724
+ tm.assert_frame_equal(result, expected)
725
+ tm.assert_frame_equal(equivalent, expected)
726
+
727
+
728
+ def test_invalid_argument(slice_test_grouped):
729
+ # Test for error on invalid argument
730
+
731
+ with pytest.raises(TypeError, match="Invalid index"):
732
+ slice_test_grouped.nth(3.14)
733
+
734
+
735
+ def test_negative_step(slice_test_grouped):
736
+ # Test for error on negative slice step
737
+
738
+ with pytest.raises(ValueError, match="Invalid step"):
739
+ slice_test_grouped.nth(slice(None, None, -1))
740
+
741
+
742
+ def test_np_ints(slice_test_df, slice_test_grouped):
743
+ # Test np ints work
744
+
745
+ result = slice_test_grouped.nth(np.array([0, 1]))
746
+ expected = slice_test_df.iloc[[0, 1, 2, 3, 4]]
747
+ tm.assert_frame_equal(result, expected)
748
+
749
+
750
+ def test_groupby_nth_with_column_axis():
751
+ # GH43926
752
+ df = DataFrame(
753
+ [
754
+ [4, 5, 6],
755
+ [8, 8, 7],
756
+ ],
757
+ index=["z", "y"],
758
+ columns=["C", "B", "A"],
759
+ )
760
+ result = df.groupby(df.iloc[1], axis=1).nth(0)
761
+ expected = df.iloc[:, [0, 2]]
762
+ tm.assert_frame_equal(result, expected)
763
+
764
+
765
+ @pytest.mark.parametrize(
766
+ "start, stop, expected_values, expected_columns",
767
+ [
768
+ (None, None, [0, 1, 2, 3, 4], list("ABCDE")),
769
+ (None, 1, [0, 3], list("AD")),
770
+ (None, 9, [0, 1, 2, 3, 4], list("ABCDE")),
771
+ (None, -1, [0, 1, 3], list("ABD")),
772
+ (1, None, [1, 2, 4], list("BCE")),
773
+ (1, -1, [1], list("B")),
774
+ (-1, None, [2, 4], list("CE")),
775
+ (-1, 2, [4], list("E")),
776
+ ],
777
+ )
778
+ @pytest.mark.parametrize("method", ["call", "index"])
779
+ def test_nth_slices_with_column_axis(
780
+ start, stop, expected_values, expected_columns, method
781
+ ):
782
+ df = DataFrame([range(5)], columns=[list("ABCDE")])
783
+ gb = df.groupby([5, 5, 5, 6, 6], axis=1)
784
+ result = {
785
+ "call": lambda start, stop: gb.nth(slice(start, stop)),
786
+ "index": lambda start, stop: gb.nth[start:stop],
787
+ }[method](start, stop)
788
+ expected = DataFrame([expected_values], columns=[expected_columns])
789
+ tm.assert_frame_equal(result, expected)
790
+
791
+
792
+ @pytest.mark.filterwarnings(
793
+ "ignore:invalid value encountered in remainder:RuntimeWarning"
794
+ )
795
+ def test_head_tail_dropna_true():
796
+ # GH#45089
797
+ df = DataFrame(
798
+ [["a", "z"], ["b", np.nan], ["c", np.nan], ["c", np.nan]], columns=["X", "Y"]
799
+ )
800
+ expected = DataFrame([["a", "z"]], columns=["X", "Y"])
801
+
802
+ result = df.groupby(["X", "Y"]).head(n=1)
803
+ tm.assert_frame_equal(result, expected)
804
+
805
+ result = df.groupby(["X", "Y"]).tail(n=1)
806
+ tm.assert_frame_equal(result, expected)
807
+
808
+ result = df.groupby(["X", "Y"]).nth(n=0)
809
+ tm.assert_frame_equal(result, expected)
810
+
811
+
812
+ def test_head_tail_dropna_false():
813
+ # GH#45089
814
+ df = DataFrame([["a", "z"], ["b", np.nan], ["c", np.nan]], columns=["X", "Y"])
815
+ expected = DataFrame([["a", "z"], ["b", np.nan], ["c", np.nan]], columns=["X", "Y"])
816
+
817
+ result = df.groupby(["X", "Y"], dropna=False).head(n=1)
818
+ tm.assert_frame_equal(result, expected)
819
+
820
+ result = df.groupby(["X", "Y"], dropna=False).tail(n=1)
821
+ tm.assert_frame_equal(result, expected)
822
+
823
+ result = df.groupby(["X", "Y"], dropna=False).nth(n=0)
824
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_numba.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import pandas.util._test_decorators as td
4
+
5
+ from pandas import (
6
+ DataFrame,
7
+ Series,
8
+ )
9
+ import pandas._testing as tm
10
+
11
+
12
+ @td.skip_if_no("numba")
13
+ @pytest.mark.filterwarnings("ignore")
14
+ # Filter warnings when parallel=True and the function can't be parallelized by Numba
15
+ class TestEngine:
16
+ def test_cython_vs_numba_frame(
17
+ self, sort, nogil, parallel, nopython, numba_supported_reductions
18
+ ):
19
+ func, kwargs = numba_supported_reductions
20
+ df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})
21
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
22
+ gb = df.groupby("a", sort=sort)
23
+ result = getattr(gb, func)(
24
+ engine="numba", engine_kwargs=engine_kwargs, **kwargs
25
+ )
26
+ expected = getattr(gb, func)(**kwargs)
27
+ # check_dtype can be removed if GH 44952 is addressed
28
+ check_dtype = func not in ("sum", "min", "max")
29
+ tm.assert_frame_equal(result, expected, check_dtype=check_dtype)
30
+
31
+ def test_cython_vs_numba_getitem(
32
+ self, sort, nogil, parallel, nopython, numba_supported_reductions
33
+ ):
34
+ func, kwargs = numba_supported_reductions
35
+ df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})
36
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
37
+ gb = df.groupby("a", sort=sort)["c"]
38
+ result = getattr(gb, func)(
39
+ engine="numba", engine_kwargs=engine_kwargs, **kwargs
40
+ )
41
+ expected = getattr(gb, func)(**kwargs)
42
+ # check_dtype can be removed if GH 44952 is addressed
43
+ check_dtype = func not in ("sum", "min", "max")
44
+ tm.assert_series_equal(result, expected, check_dtype=check_dtype)
45
+
46
+ def test_cython_vs_numba_series(
47
+ self, sort, nogil, parallel, nopython, numba_supported_reductions
48
+ ):
49
+ func, kwargs = numba_supported_reductions
50
+ ser = Series(range(3), index=[1, 2, 1], name="foo")
51
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
52
+ gb = ser.groupby(level=0, sort=sort)
53
+ result = getattr(gb, func)(
54
+ engine="numba", engine_kwargs=engine_kwargs, **kwargs
55
+ )
56
+ expected = getattr(gb, func)(**kwargs)
57
+ # check_dtype can be removed if GH 44952 is addressed
58
+ check_dtype = func not in ("sum", "min", "max")
59
+ tm.assert_series_equal(result, expected, check_dtype=check_dtype)
60
+
61
+ def test_as_index_false_unsupported(self, numba_supported_reductions):
62
+ func, kwargs = numba_supported_reductions
63
+ df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})
64
+ gb = df.groupby("a", as_index=False)
65
+ with pytest.raises(NotImplementedError, match="as_index=False"):
66
+ getattr(gb, func)(engine="numba", **kwargs)
67
+
68
+ def test_axis_1_unsupported(self, numba_supported_reductions):
69
+ func, kwargs = numba_supported_reductions
70
+ df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})
71
+ gb = df.groupby("a", axis=1)
72
+ with pytest.raises(NotImplementedError, match="axis=1"):
73
+ getattr(gb, func)(engine="numba", **kwargs)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_nunique.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime as dt
2
+ from string import ascii_lowercase
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ DataFrame,
10
+ MultiIndex,
11
+ NaT,
12
+ Series,
13
+ Timestamp,
14
+ date_range,
15
+ )
16
+ import pandas._testing as tm
17
+
18
+
19
+ @pytest.mark.slow
20
+ @pytest.mark.parametrize("n", 10 ** np.arange(2, 6))
21
+ @pytest.mark.parametrize("m", [10, 100, 1000])
22
+ @pytest.mark.parametrize("sort", [False, True])
23
+ @pytest.mark.parametrize("dropna", [False, True])
24
+ def test_series_groupby_nunique(n, m, sort, dropna):
25
+ def check_nunique(df, keys, as_index=True):
26
+ original_df = df.copy()
27
+ gr = df.groupby(keys, as_index=as_index, sort=sort)
28
+ left = gr["julie"].nunique(dropna=dropna)
29
+
30
+ gr = df.groupby(keys, as_index=as_index, sort=sort)
31
+ right = gr["julie"].apply(Series.nunique, dropna=dropna)
32
+ if not as_index:
33
+ right = right.reset_index(drop=True)
34
+
35
+ if as_index:
36
+ tm.assert_series_equal(left, right, check_names=False)
37
+ else:
38
+ tm.assert_frame_equal(left, right, check_names=False)
39
+ tm.assert_frame_equal(df, original_df)
40
+
41
+ days = date_range("2015-08-23", periods=10)
42
+
43
+ frame = DataFrame(
44
+ {
45
+ "jim": np.random.choice(list(ascii_lowercase), n),
46
+ "joe": np.random.choice(days, n),
47
+ "julie": np.random.randint(0, m, n),
48
+ }
49
+ )
50
+
51
+ check_nunique(frame, ["jim"])
52
+ check_nunique(frame, ["jim", "joe"])
53
+
54
+ frame.loc[1::17, "jim"] = None
55
+ frame.loc[3::37, "joe"] = None
56
+ frame.loc[7::19, "julie"] = None
57
+ frame.loc[8::19, "julie"] = None
58
+ frame.loc[9::19, "julie"] = None
59
+
60
+ check_nunique(frame, ["jim"])
61
+ check_nunique(frame, ["jim", "joe"])
62
+ check_nunique(frame, ["jim"], as_index=False)
63
+ check_nunique(frame, ["jim", "joe"], as_index=False)
64
+
65
+
66
+ def test_nunique():
67
+ df = DataFrame({"A": list("abbacc"), "B": list("abxacc"), "C": list("abbacx")})
68
+
69
+ expected = DataFrame({"A": list("abc"), "B": [1, 2, 1], "C": [1, 1, 2]})
70
+ result = df.groupby("A", as_index=False).nunique()
71
+ tm.assert_frame_equal(result, expected)
72
+
73
+ # as_index
74
+ expected.index = list("abc")
75
+ expected.index.name = "A"
76
+ expected = expected.drop(columns="A")
77
+ result = df.groupby("A").nunique()
78
+ tm.assert_frame_equal(result, expected)
79
+
80
+ # with na
81
+ result = df.replace({"x": None}).groupby("A").nunique(dropna=False)
82
+ tm.assert_frame_equal(result, expected)
83
+
84
+ # dropna
85
+ expected = DataFrame({"B": [1] * 3, "C": [1] * 3}, index=list("abc"))
86
+ expected.index.name = "A"
87
+ result = df.replace({"x": None}).groupby("A").nunique()
88
+ tm.assert_frame_equal(result, expected)
89
+
90
+
91
+ def test_nunique_with_object():
92
+ # GH 11077
93
+ data = DataFrame(
94
+ [
95
+ [100, 1, "Alice"],
96
+ [200, 2, "Bob"],
97
+ [300, 3, "Charlie"],
98
+ [-400, 4, "Dan"],
99
+ [500, 5, "Edith"],
100
+ ],
101
+ columns=["amount", "id", "name"],
102
+ )
103
+
104
+ result = data.groupby(["id", "amount"])["name"].nunique()
105
+ index = MultiIndex.from_arrays([data.id, data.amount])
106
+ expected = Series([1] * 5, name="name", index=index)
107
+ tm.assert_series_equal(result, expected)
108
+
109
+
110
+ def test_nunique_with_empty_series():
111
+ # GH 12553
112
+ data = Series(name="name", dtype=object)
113
+ result = data.groupby(level=0).nunique()
114
+ expected = Series(name="name", dtype="int64")
115
+ tm.assert_series_equal(result, expected)
116
+
117
+
118
+ def test_nunique_with_timegrouper():
119
+ # GH 13453
120
+ test = DataFrame(
121
+ {
122
+ "time": [
123
+ Timestamp("2016-06-28 09:35:35"),
124
+ Timestamp("2016-06-28 16:09:30"),
125
+ Timestamp("2016-06-28 16:46:28"),
126
+ ],
127
+ "data": ["1", "2", "3"],
128
+ }
129
+ ).set_index("time")
130
+ result = test.groupby(pd.Grouper(freq="h"))["data"].nunique()
131
+ expected = test.groupby(pd.Grouper(freq="h"))["data"].apply(Series.nunique)
132
+ tm.assert_series_equal(result, expected)
133
+
134
+
135
+ @pytest.mark.parametrize(
136
+ "key, data, dropna, expected",
137
+ [
138
+ (
139
+ ["x", "x", "x"],
140
+ [Timestamp("2019-01-01"), NaT, Timestamp("2019-01-01")],
141
+ True,
142
+ Series([1], index=pd.Index(["x"], name="key"), name="data"),
143
+ ),
144
+ (
145
+ ["x", "x", "x"],
146
+ [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)],
147
+ True,
148
+ Series([1], index=pd.Index(["x"], name="key"), name="data"),
149
+ ),
150
+ (
151
+ ["x", "x", "x", "y", "y"],
152
+ [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)],
153
+ False,
154
+ Series([2, 2], index=pd.Index(["x", "y"], name="key"), name="data"),
155
+ ),
156
+ (
157
+ ["x", "x", "x", "x", "y"],
158
+ [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)],
159
+ False,
160
+ Series([2, 1], index=pd.Index(["x", "y"], name="key"), name="data"),
161
+ ),
162
+ ],
163
+ )
164
+ def test_nunique_with_NaT(key, data, dropna, expected):
165
+ # GH 27951
166
+ df = DataFrame({"key": key, "data": data})
167
+ result = df.groupby(["key"])["data"].nunique(dropna=dropna)
168
+ tm.assert_series_equal(result, expected)
169
+
170
+
171
+ def test_nunique_preserves_column_level_names():
172
+ # GH 23222
173
+ test = DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0"))
174
+ result = test.groupby([0, 0, 0]).nunique()
175
+ expected = DataFrame([2], index=np.array([0]), columns=test.columns)
176
+ tm.assert_frame_equal(result, expected)
177
+
178
+
179
+ def test_nunique_transform_with_datetime():
180
+ # GH 35109 - transform with nunique on datetimes results in integers
181
+ df = DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"])
182
+ result = df.groupby([0, 0, 1])["date"].transform("nunique")
183
+ expected = Series([2, 2, 1], name="date")
184
+ tm.assert_series_equal(result, expected)
185
+
186
+
187
+ def test_empty_categorical(observed):
188
+ # GH#21334
189
+ cat = Series([1]).astype("category")
190
+ ser = cat[:0]
191
+ gb = ser.groupby(ser, observed=observed)
192
+ result = gb.nunique()
193
+ if observed:
194
+ expected = Series([], index=cat[:0], dtype="int64")
195
+ else:
196
+ expected = Series([0], index=cat, dtype="int64")
197
+ tm.assert_series_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_pipe.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ import pandas as pd
4
+ from pandas import (
5
+ DataFrame,
6
+ Index,
7
+ )
8
+ import pandas._testing as tm
9
+
10
+
11
+ def test_pipe():
12
+ # Test the pipe method of DataFrameGroupBy.
13
+ # Issue #17871
14
+
15
+ random_state = np.random.RandomState(1234567890)
16
+
17
+ df = DataFrame(
18
+ {
19
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
20
+ "B": random_state.randn(8),
21
+ "C": random_state.randn(8),
22
+ }
23
+ )
24
+
25
+ def f(dfgb):
26
+ return dfgb.B.max() - dfgb.C.min().min()
27
+
28
+ def square(srs):
29
+ return srs**2
30
+
31
+ # Note that the transformations are
32
+ # GroupBy -> Series
33
+ # Series -> Series
34
+ # This then chains the GroupBy.pipe and the
35
+ # NDFrame.pipe methods
36
+ result = df.groupby("A").pipe(f).pipe(square)
37
+
38
+ index = Index(["bar", "foo"], dtype="object", name="A")
39
+ expected = pd.Series([8.99110003361, 8.17516964785], name="B", index=index)
40
+
41
+ tm.assert_series_equal(expected, result)
42
+
43
+
44
+ def test_pipe_args():
45
+ # Test passing args to the pipe method of DataFrameGroupBy.
46
+ # Issue #17871
47
+
48
+ df = DataFrame(
49
+ {
50
+ "group": ["A", "A", "B", "B", "C"],
51
+ "x": [1.0, 2.0, 3.0, 2.0, 5.0],
52
+ "y": [10.0, 100.0, 1000.0, -100.0, -1000.0],
53
+ }
54
+ )
55
+
56
+ def f(dfgb, arg1):
57
+ filtered = dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False)
58
+ return filtered.groupby("group")
59
+
60
+ def g(dfgb, arg2):
61
+ return dfgb.sum() / dfgb.sum().sum() + arg2
62
+
63
+ def h(df, arg3):
64
+ return df.x + df.y - arg3
65
+
66
+ result = df.groupby("group").pipe(f, 0).pipe(g, 10).pipe(h, 100)
67
+
68
+ # Assert the results here
69
+ index = Index(["A", "B"], name="group")
70
+ expected = pd.Series([-79.5160891089, -78.4839108911], index=index)
71
+
72
+ tm.assert_series_equal(result, expected)
73
+
74
+ # test SeriesGroupby.pipe
75
+ ser = pd.Series([1, 1, 2, 2, 3, 3])
76
+ result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
77
+
78
+ expected = pd.Series([4, 8, 12], index=Index([1, 2, 3], dtype=np.int64))
79
+
80
+ tm.assert_series_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_quantile.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ Index,
8
+ )
9
+ import pandas._testing as tm
10
+
11
+
12
+ @pytest.mark.parametrize(
13
+ "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
14
+ )
15
+ @pytest.mark.parametrize(
16
+ "a_vals,b_vals",
17
+ [
18
+ # Ints
19
+ ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]),
20
+ ([1, 2, 3, 4], [4, 3, 2, 1]),
21
+ ([1, 2, 3, 4, 5], [4, 3, 2, 1]),
22
+ # Floats
23
+ ([1.0, 2.0, 3.0, 4.0, 5.0], [5.0, 4.0, 3.0, 2.0, 1.0]),
24
+ # Missing data
25
+ ([1.0, np.nan, 3.0, np.nan, 5.0], [5.0, np.nan, 3.0, np.nan, 1.0]),
26
+ ([np.nan, 4.0, np.nan, 2.0, np.nan], [np.nan, 4.0, np.nan, 2.0, np.nan]),
27
+ # Timestamps
28
+ (
29
+ pd.date_range("1/1/18", freq="D", periods=5),
30
+ pd.date_range("1/1/18", freq="D", periods=5)[::-1],
31
+ ),
32
+ (
33
+ pd.date_range("1/1/18", freq="D", periods=5).as_unit("s"),
34
+ pd.date_range("1/1/18", freq="D", periods=5)[::-1].as_unit("s"),
35
+ ),
36
+ # All NA
37
+ ([np.nan] * 5, [np.nan] * 5),
38
+ ],
39
+ )
40
+ @pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1])
41
+ def test_quantile(interpolation, a_vals, b_vals, q, request):
42
+ if (
43
+ interpolation == "nearest"
44
+ and q == 0.5
45
+ and isinstance(b_vals, list)
46
+ and b_vals == [4, 3, 2, 1]
47
+ ):
48
+ request.node.add_marker(
49
+ pytest.mark.xfail(
50
+ reason="Unclear numpy expectation for nearest "
51
+ "result with equidistant data"
52
+ )
53
+ )
54
+ all_vals = pd.concat([pd.Series(a_vals), pd.Series(b_vals)])
55
+
56
+ a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation)
57
+ b_expected = pd.Series(b_vals).quantile(q, interpolation=interpolation)
58
+
59
+ df = DataFrame({"key": ["a"] * len(a_vals) + ["b"] * len(b_vals), "val": all_vals})
60
+
61
+ expected = DataFrame(
62
+ [a_expected, b_expected], columns=["val"], index=Index(["a", "b"], name="key")
63
+ )
64
+ if all_vals.dtype.kind == "M" and expected.dtypes.values[0].kind == "M":
65
+ # TODO(non-nano): this should be unnecessary once array_to_datetime
66
+ # correctly infers non-nano from Timestamp.unit
67
+ expected = expected.astype(all_vals.dtype)
68
+ result = df.groupby("key").quantile(q, interpolation=interpolation)
69
+
70
+ tm.assert_frame_equal(result, expected)
71
+
72
+
73
+ def test_quantile_array():
74
+ # https://github.com/pandas-dev/pandas/issues/27526
75
+ df = DataFrame({"A": [0, 1, 2, 3, 4]})
76
+ key = np.array([0, 0, 1, 1, 1], dtype=np.int64)
77
+ result = df.groupby(key).quantile([0.25])
78
+
79
+ index = pd.MultiIndex.from_product([[0, 1], [0.25]])
80
+ expected = DataFrame({"A": [0.25, 2.50]}, index=index)
81
+ tm.assert_frame_equal(result, expected)
82
+
83
+ df = DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]})
84
+ index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]])
85
+
86
+ key = np.array([0, 0, 1, 1], dtype=np.int64)
87
+ result = df.groupby(key).quantile([0.25, 0.75])
88
+ expected = DataFrame(
89
+ {"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index
90
+ )
91
+ tm.assert_frame_equal(result, expected)
92
+
93
+
94
+ def test_quantile_array2():
95
+ # https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959
96
+ arr = np.random.RandomState(0).randint(0, 5, size=(10, 3), dtype=np.int64)
97
+ df = DataFrame(arr, columns=list("ABC"))
98
+ result = df.groupby("A").quantile([0.3, 0.7])
99
+ expected = DataFrame(
100
+ {
101
+ "B": [0.9, 2.1, 2.2, 3.4, 1.6, 2.4, 2.3, 2.7, 0.0, 0.0],
102
+ "C": [1.2, 2.8, 1.8, 3.0, 0.0, 0.0, 1.9, 3.1, 3.0, 3.0],
103
+ },
104
+ index=pd.MultiIndex.from_product(
105
+ [[0, 1, 2, 3, 4], [0.3, 0.7]], names=["A", None]
106
+ ),
107
+ )
108
+ tm.assert_frame_equal(result, expected)
109
+
110
+
111
+ def test_quantile_array_no_sort():
112
+ df = DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]})
113
+ key = np.array([1, 0, 1], dtype=np.int64)
114
+ result = df.groupby(key, sort=False).quantile([0.25, 0.5, 0.75])
115
+ expected = DataFrame(
116
+ {"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]},
117
+ index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]),
118
+ )
119
+ tm.assert_frame_equal(result, expected)
120
+
121
+ result = df.groupby(key, sort=False).quantile([0.75, 0.25])
122
+ expected = DataFrame(
123
+ {"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]},
124
+ index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]),
125
+ )
126
+ tm.assert_frame_equal(result, expected)
127
+
128
+
129
+ def test_quantile_array_multiple_levels():
130
+ df = DataFrame(
131
+ {"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]}
132
+ )
133
+ result = df.groupby(["c", "d"]).quantile([0.25, 0.75])
134
+ index = pd.MultiIndex.from_tuples(
135
+ [("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)],
136
+ names=["c", "d", None],
137
+ )
138
+ expected = DataFrame(
139
+ {"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index
140
+ )
141
+ tm.assert_frame_equal(result, expected)
142
+
143
+
144
+ @pytest.mark.parametrize("frame_size", [(2, 3), (100, 10)])
145
+ @pytest.mark.parametrize("groupby", [[0], [0, 1]])
146
+ @pytest.mark.parametrize("q", [[0.5, 0.6]])
147
+ def test_groupby_quantile_with_arraylike_q_and_int_columns(frame_size, groupby, q):
148
+ # GH30289
149
+ nrow, ncol = frame_size
150
+ df = DataFrame(np.array([ncol * [_ % 4] for _ in range(nrow)]), columns=range(ncol))
151
+
152
+ idx_levels = [np.arange(min(nrow, 4))] * len(groupby) + [q]
153
+ idx_codes = [[x for x in range(min(nrow, 4)) for _ in q]] * len(groupby) + [
154
+ list(range(len(q))) * min(nrow, 4)
155
+ ]
156
+ expected_index = pd.MultiIndex(
157
+ levels=idx_levels, codes=idx_codes, names=groupby + [None]
158
+ )
159
+ expected_values = [
160
+ [float(x)] * (ncol - len(groupby)) for x in range(min(nrow, 4)) for _ in q
161
+ ]
162
+ expected_columns = [x for x in range(ncol) if x not in groupby]
163
+ expected = DataFrame(
164
+ expected_values, index=expected_index, columns=expected_columns
165
+ )
166
+ result = df.groupby(groupby).quantile(q)
167
+
168
+ tm.assert_frame_equal(result, expected)
169
+
170
+
171
+ def test_quantile_raises():
172
+ df = DataFrame([["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"])
173
+
174
+ with pytest.raises(TypeError, match="cannot be performed against 'object' dtypes"):
175
+ df.groupby("key").quantile()
176
+
177
+
178
+ def test_quantile_out_of_bounds_q_raises():
179
+ # https://github.com/pandas-dev/pandas/issues/27470
180
+ df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)})
181
+ g = df.groupby([0, 0, 0, 1, 1, 1])
182
+ with pytest.raises(ValueError, match="Got '50.0' instead"):
183
+ g.quantile(50)
184
+
185
+ with pytest.raises(ValueError, match="Got '-1.0' instead"):
186
+ g.quantile(-1)
187
+
188
+
189
+ def test_quantile_missing_group_values_no_segfaults():
190
+ # GH 28662
191
+ data = np.array([1.0, np.nan, 1.0])
192
+ df = DataFrame({"key": data, "val": range(3)})
193
+
194
+ # Random segfaults; would have been guaranteed in loop
195
+ grp = df.groupby("key")
196
+ for _ in range(100):
197
+ grp.quantile()
198
+
199
+
200
+ @pytest.mark.parametrize(
201
+ "key, val, expected_key, expected_val",
202
+ [
203
+ ([1.0, np.nan, 3.0, np.nan], range(4), [1.0, 3.0], [0.0, 2.0]),
204
+ ([1.0, np.nan, 2.0, 2.0], range(4), [1.0, 2.0], [0.0, 2.5]),
205
+ (["a", "b", "b", np.nan], range(4), ["a", "b"], [0, 1.5]),
206
+ ([0], [42], [0], [42.0]),
207
+ ([], [], np.array([], dtype="float64"), np.array([], dtype="float64")),
208
+ ],
209
+ )
210
+ def test_quantile_missing_group_values_correct_results(
211
+ key, val, expected_key, expected_val
212
+ ):
213
+ # GH 28662, GH 33200, GH 33569
214
+ df = DataFrame({"key": key, "val": val})
215
+
216
+ expected = DataFrame(
217
+ expected_val, index=Index(expected_key, name="key"), columns=["val"]
218
+ )
219
+
220
+ grp = df.groupby("key")
221
+
222
+ result = grp.quantile(0.5)
223
+ tm.assert_frame_equal(result, expected)
224
+
225
+ result = grp.quantile()
226
+ tm.assert_frame_equal(result, expected)
227
+
228
+
229
+ @pytest.mark.parametrize(
230
+ "values",
231
+ [
232
+ pd.array([1, 0, None] * 2, dtype="Int64"),
233
+ pd.array([True, False, None] * 2, dtype="boolean"),
234
+ ],
235
+ )
236
+ @pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])
237
+ def test_groupby_quantile_nullable_array(values, q):
238
+ # https://github.com/pandas-dev/pandas/issues/33136
239
+ df = DataFrame({"a": ["x"] * 3 + ["y"] * 3, "b": values})
240
+ result = df.groupby("a")["b"].quantile(q)
241
+
242
+ if isinstance(q, list):
243
+ idx = pd.MultiIndex.from_product((["x", "y"], q), names=["a", None])
244
+ true_quantiles = [0.0, 0.5, 1.0]
245
+ else:
246
+ idx = Index(["x", "y"], name="a")
247
+ true_quantiles = [0.5]
248
+
249
+ expected = pd.Series(true_quantiles * 2, index=idx, name="b", dtype="Float64")
250
+ tm.assert_series_equal(result, expected)
251
+
252
+
253
+ @pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])
254
+ @pytest.mark.parametrize("numeric_only", [True, False])
255
+ def test_groupby_quantile_raises_on_invalid_dtype(q, numeric_only):
256
+ df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]})
257
+ if numeric_only:
258
+ result = df.groupby("a").quantile(q, numeric_only=numeric_only)
259
+ expected = df.groupby("a")[["b"]].quantile(q)
260
+ tm.assert_frame_equal(result, expected)
261
+ else:
262
+ with pytest.raises(
263
+ TypeError, match="'quantile' cannot be performed against 'object' dtypes!"
264
+ ):
265
+ df.groupby("a").quantile(q, numeric_only=numeric_only)
266
+
267
+
268
+ def test_groupby_quantile_NA_float(any_float_dtype):
269
+ # GH#42849
270
+ df = DataFrame({"x": [1, 1], "y": [0.2, np.nan]}, dtype=any_float_dtype)
271
+ result = df.groupby("x")["y"].quantile(0.5)
272
+ exp_index = Index([1.0], dtype=any_float_dtype, name="x")
273
+
274
+ if any_float_dtype in ["Float32", "Float64"]:
275
+ expected_dtype = any_float_dtype
276
+ else:
277
+ expected_dtype = None
278
+
279
+ expected = pd.Series([0.2], dtype=expected_dtype, index=exp_index, name="y")
280
+ tm.assert_series_equal(result, expected)
281
+
282
+ result = df.groupby("x")["y"].quantile([0.5, 0.75])
283
+ expected = pd.Series(
284
+ [0.2] * 2,
285
+ index=pd.MultiIndex.from_product((exp_index, [0.5, 0.75]), names=["x", None]),
286
+ name="y",
287
+ dtype=expected_dtype,
288
+ )
289
+ tm.assert_series_equal(result, expected)
290
+
291
+
292
+ def test_groupby_quantile_NA_int(any_int_ea_dtype):
293
+ # GH#42849
294
+ df = DataFrame({"x": [1, 1], "y": [2, 5]}, dtype=any_int_ea_dtype)
295
+ result = df.groupby("x")["y"].quantile(0.5)
296
+ expected = pd.Series(
297
+ [3.5],
298
+ dtype="Float64",
299
+ index=Index([1], name="x", dtype=any_int_ea_dtype),
300
+ name="y",
301
+ )
302
+ tm.assert_series_equal(expected, result)
303
+
304
+ result = df.groupby("x").quantile(0.5)
305
+ expected = DataFrame(
306
+ {"y": 3.5}, dtype="Float64", index=Index([1], name="x", dtype=any_int_ea_dtype)
307
+ )
308
+ tm.assert_frame_equal(result, expected)
309
+
310
+
311
+ @pytest.mark.parametrize(
312
+ "interpolation, val1, val2", [("lower", 2, 2), ("higher", 2, 3), ("nearest", 2, 2)]
313
+ )
314
+ def test_groupby_quantile_all_na_group_masked(
315
+ interpolation, val1, val2, any_numeric_ea_dtype
316
+ ):
317
+ # GH#37493
318
+ df = DataFrame(
319
+ {"a": [1, 1, 1, 2], "b": [1, 2, 3, pd.NA]}, dtype=any_numeric_ea_dtype
320
+ )
321
+ result = df.groupby("a").quantile(q=[0.5, 0.7], interpolation=interpolation)
322
+ expected = DataFrame(
323
+ {"b": [val1, val2, pd.NA, pd.NA]},
324
+ dtype=any_numeric_ea_dtype,
325
+ index=pd.MultiIndex.from_arrays(
326
+ [pd.Series([1, 1, 2, 2], dtype=any_numeric_ea_dtype), [0.5, 0.7, 0.5, 0.7]],
327
+ names=["a", None],
328
+ ),
329
+ )
330
+ tm.assert_frame_equal(result, expected)
331
+
332
+
333
+ @pytest.mark.parametrize("interpolation", ["midpoint", "linear"])
334
+ def test_groupby_quantile_all_na_group_masked_interp(
335
+ interpolation, any_numeric_ea_dtype
336
+ ):
337
+ # GH#37493
338
+ df = DataFrame(
339
+ {"a": [1, 1, 1, 2], "b": [1, 2, 3, pd.NA]}, dtype=any_numeric_ea_dtype
340
+ )
341
+ result = df.groupby("a").quantile(q=[0.5, 0.75], interpolation=interpolation)
342
+
343
+ if any_numeric_ea_dtype == "Float32":
344
+ expected_dtype = any_numeric_ea_dtype
345
+ else:
346
+ expected_dtype = "Float64"
347
+
348
+ expected = DataFrame(
349
+ {"b": [2.0, 2.5, pd.NA, pd.NA]},
350
+ dtype=expected_dtype,
351
+ index=pd.MultiIndex.from_arrays(
352
+ [
353
+ pd.Series([1, 1, 2, 2], dtype=any_numeric_ea_dtype),
354
+ [0.5, 0.75, 0.5, 0.75],
355
+ ],
356
+ names=["a", None],
357
+ ),
358
+ )
359
+ tm.assert_frame_equal(result, expected)
360
+
361
+
362
+ @pytest.mark.parametrize("dtype", ["Float64", "Float32"])
363
+ def test_groupby_quantile_allNA_column(dtype):
364
+ # GH#42849
365
+ df = DataFrame({"x": [1, 1], "y": [pd.NA] * 2}, dtype=dtype)
366
+ result = df.groupby("x")["y"].quantile(0.5)
367
+ expected = pd.Series(
368
+ [np.nan], dtype=dtype, index=Index([1.0], dtype=dtype), name="y"
369
+ )
370
+ expected.index.name = "x"
371
+ tm.assert_series_equal(expected, result)
372
+
373
+
374
+ def test_groupby_timedelta_quantile():
375
+ # GH: 29485
376
+ df = DataFrame(
377
+ {"value": pd.to_timedelta(np.arange(4), unit="s"), "group": [1, 1, 2, 2]}
378
+ )
379
+ result = df.groupby("group").quantile(0.99)
380
+ expected = DataFrame(
381
+ {
382
+ "value": [
383
+ pd.Timedelta("0 days 00:00:00.990000"),
384
+ pd.Timedelta("0 days 00:00:02.990000"),
385
+ ]
386
+ },
387
+ index=Index([1, 2], name="group"),
388
+ )
389
+ tm.assert_frame_equal(result, expected)
390
+
391
+
392
+ def test_columns_groupby_quantile():
393
+ # GH 33795
394
+ df = DataFrame(
395
+ np.arange(12).reshape(3, -1),
396
+ index=list("XYZ"),
397
+ columns=pd.Series(list("ABAB"), name="col"),
398
+ )
399
+ result = df.groupby("col", axis=1).quantile(q=[0.8, 0.2])
400
+ expected = DataFrame(
401
+ [
402
+ [1.6, 0.4, 2.6, 1.4],
403
+ [5.6, 4.4, 6.6, 5.4],
404
+ [9.6, 8.4, 10.6, 9.4],
405
+ ],
406
+ index=list("XYZ"),
407
+ columns=pd.MultiIndex.from_tuples(
408
+ [("A", 0.8), ("A", 0.2), ("B", 0.8), ("B", 0.2)], names=["col", None]
409
+ ),
410
+ )
411
+
412
+ tm.assert_frame_equal(result, expected)
413
+
414
+
415
+ def test_timestamp_groupby_quantile():
416
+ # GH 33168
417
+ df = DataFrame(
418
+ {
419
+ "timestamp": pd.date_range(
420
+ start="2020-04-19 00:00:00", freq="1T", periods=100, tz="UTC"
421
+ ).floor("1H"),
422
+ "category": list(range(1, 101)),
423
+ "value": list(range(101, 201)),
424
+ }
425
+ )
426
+
427
+ result = df.groupby("timestamp").quantile([0.2, 0.8])
428
+
429
+ expected = DataFrame(
430
+ [
431
+ {"category": 12.8, "value": 112.8},
432
+ {"category": 48.2, "value": 148.2},
433
+ {"category": 68.8, "value": 168.8},
434
+ {"category": 92.2, "value": 192.2},
435
+ ],
436
+ index=pd.MultiIndex.from_tuples(
437
+ [
438
+ (pd.Timestamp("2020-04-19 00:00:00+00:00"), 0.2),
439
+ (pd.Timestamp("2020-04-19 00:00:00+00:00"), 0.8),
440
+ (pd.Timestamp("2020-04-19 01:00:00+00:00"), 0.2),
441
+ (pd.Timestamp("2020-04-19 01:00:00+00:00"), 0.8),
442
+ ],
443
+ names=("timestamp", None),
444
+ ),
445
+ )
446
+
447
+ tm.assert_frame_equal(result, expected)
448
+
449
+
450
+ def test_groupby_quantile_dt64tz_period():
451
+ # GH#51373
452
+ dti = pd.date_range("2016-01-01", periods=1000)
453
+ ser = pd.Series(dti)
454
+ df = ser.to_frame()
455
+ df[1] = dti.tz_localize("US/Pacific")
456
+ df[2] = dti.to_period("D")
457
+ df[3] = dti - dti[0]
458
+ df.iloc[-1] = pd.NaT
459
+
460
+ by = np.tile(np.arange(5), 200)
461
+ gb = df.groupby(by)
462
+
463
+ result = gb.quantile(0.5)
464
+
465
+ # Check that we match the group-by-group result
466
+ exp = {i: df.iloc[i::5].quantile(0.5) for i in range(5)}
467
+ expected = DataFrame(exp).T
468
+ expected.index = expected.index.astype(np.int_)
469
+
470
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_raises.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Only tests that raise an error and have no better location should go here.
2
+ # Tests for specific groupby methods should go in their respective
3
+ # test file.
4
+
5
+ import datetime
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas import (
11
+ Categorical,
12
+ DataFrame,
13
+ Grouper,
14
+ Series,
15
+ )
16
+ from pandas.tests.groupby import get_groupby_method_args
17
+
18
+
19
+ @pytest.fixture(
20
+ params=[
21
+ "a",
22
+ ["a"],
23
+ ["a", "b"],
24
+ Grouper(key="a"),
25
+ lambda x: x % 2,
26
+ [0, 0, 0, 1, 2, 2, 2, 3, 3],
27
+ np.array([0, 0, 0, 1, 2, 2, 2, 3, 3]),
28
+ dict(zip(range(9), [0, 0, 0, 1, 2, 2, 2, 3, 3])),
29
+ Series([1, 1, 1, 1, 1, 2, 2, 2, 2]),
30
+ [Series([1, 1, 1, 1, 1, 2, 2, 2, 2]), Series([3, 3, 4, 4, 4, 4, 4, 3, 3])],
31
+ ]
32
+ )
33
+ def by(request):
34
+ return request.param
35
+
36
+
37
+ @pytest.fixture(params=[True, False])
38
+ def groupby_series(request):
39
+ return request.param
40
+
41
+
42
+ @pytest.mark.parametrize("how", ["method", "agg", "transform"])
43
+ def test_groupby_raises_string(how, by, groupby_series, groupby_func):
44
+ df = DataFrame(
45
+ {
46
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
47
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
48
+ "c": range(9),
49
+ "d": list("xyzwtyuio"),
50
+ }
51
+ )
52
+ args = get_groupby_method_args(groupby_func, df)
53
+ gb = df.groupby(by=by)
54
+
55
+ if groupby_series:
56
+ gb = gb["d"]
57
+
58
+ if groupby_func == "corrwith":
59
+ assert not hasattr(gb, "corrwith")
60
+ return
61
+
62
+ klass, msg = {
63
+ "all": (None, ""),
64
+ "any": (None, ""),
65
+ "bfill": (None, ""),
66
+ "corrwith": (TypeError, "Could not convert"),
67
+ "count": (None, ""),
68
+ "cumcount": (None, ""),
69
+ "cummax": (
70
+ (NotImplementedError, TypeError),
71
+ "(function|cummax) is not (implemented|supported) for (this|object) dtype",
72
+ ),
73
+ "cummin": (
74
+ (NotImplementedError, TypeError),
75
+ "(function|cummin) is not (implemented|supported) for (this|object) dtype",
76
+ ),
77
+ "cumprod": (
78
+ (NotImplementedError, TypeError),
79
+ "(function|cumprod) is not (implemented|supported) for (this|object) dtype",
80
+ ),
81
+ "cumsum": (
82
+ (NotImplementedError, TypeError),
83
+ "(function|cumsum) is not (implemented|supported) for (this|object) dtype",
84
+ ),
85
+ "diff": (TypeError, "unsupported operand type"),
86
+ "ffill": (None, ""),
87
+ "fillna": (None, ""),
88
+ "first": (None, ""),
89
+ "idxmax": (TypeError, "'argmax' not allowed for this dtype"),
90
+ "idxmin": (TypeError, "'argmin' not allowed for this dtype"),
91
+ "last": (None, ""),
92
+ "max": (None, ""),
93
+ "mean": (TypeError, "Could not convert xy?z?w?t?y?u?i?o? to numeric"),
94
+ "median": (TypeError, "could not convert string to float"),
95
+ "min": (None, ""),
96
+ "ngroup": (None, ""),
97
+ "nunique": (None, ""),
98
+ "pct_change": (TypeError, "unsupported operand type"),
99
+ "prod": (TypeError, "can't multiply sequence by non-int of type 'str'"),
100
+ "quantile": (TypeError, "cannot be performed against 'object' dtypes!"),
101
+ "rank": (None, ""),
102
+ "sem": (ValueError, "could not convert string to float"),
103
+ "shift": (None, ""),
104
+ "size": (None, ""),
105
+ "skew": (TypeError, "could not convert string to float"),
106
+ "std": (ValueError, "could not convert string to float"),
107
+ "sum": (None, ""),
108
+ "var": (TypeError, "could not convert string to float"),
109
+ }[groupby_func]
110
+
111
+ if klass is None:
112
+ if how == "method":
113
+ getattr(gb, groupby_func)(*args)
114
+ elif how == "agg":
115
+ gb.agg(groupby_func, *args)
116
+ else:
117
+ gb.transform(groupby_func, *args)
118
+ else:
119
+ with pytest.raises(klass, match=msg):
120
+ if how == "method":
121
+ getattr(gb, groupby_func)(*args)
122
+ elif how == "agg":
123
+ gb.agg(groupby_func, *args)
124
+ else:
125
+ gb.transform(groupby_func, *args)
126
+
127
+
128
+ @pytest.mark.parametrize("how", ["agg", "transform"])
129
+ def test_groupby_raises_string_udf(how, by, groupby_series):
130
+ df = DataFrame(
131
+ {
132
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
133
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
134
+ "c": range(9),
135
+ "d": list("xyzwtyuio"),
136
+ }
137
+ )
138
+ gb = df.groupby(by=by)
139
+
140
+ if groupby_series:
141
+ gb = gb["d"]
142
+
143
+ def func(x):
144
+ raise TypeError("Test error message")
145
+
146
+ with pytest.raises(TypeError, match="Test error message"):
147
+ getattr(gb, how)(func)
148
+
149
+
150
+ @pytest.mark.parametrize("how", ["agg", "transform"])
151
+ @pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])
152
+ def test_groupby_raises_string_np(how, by, groupby_series, groupby_func_np):
153
+ # GH#50749
154
+ df = DataFrame(
155
+ {
156
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
157
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
158
+ "c": range(9),
159
+ "d": list("xyzwtyuio"),
160
+ }
161
+ )
162
+ gb = df.groupby(by=by)
163
+
164
+ if groupby_series:
165
+ gb = gb["d"]
166
+
167
+ klass, msg = {
168
+ np.sum: (None, ""),
169
+ np.mean: (TypeError, "Could not convert xy?z?w?t?y?u?i?o? to numeric"),
170
+ }[groupby_func_np]
171
+
172
+ if klass is None:
173
+ getattr(gb, how)(groupby_func_np)
174
+ else:
175
+ with pytest.raises(klass, match=msg):
176
+ getattr(gb, how)(groupby_func_np)
177
+
178
+
179
+ @pytest.mark.parametrize("how", ["method", "agg", "transform"])
180
+ def test_groupby_raises_datetime(how, by, groupby_series, groupby_func):
181
+ df = DataFrame(
182
+ {
183
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
184
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
185
+ "c": range(9),
186
+ "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
187
+ }
188
+ )
189
+ args = get_groupby_method_args(groupby_func, df)
190
+ gb = df.groupby(by=by)
191
+
192
+ if groupby_series:
193
+ gb = gb["d"]
194
+
195
+ if groupby_func == "corrwith":
196
+ assert not hasattr(gb, "corrwith")
197
+ return
198
+
199
+ klass, msg = {
200
+ "all": (None, ""),
201
+ "any": (None, ""),
202
+ "bfill": (None, ""),
203
+ "corrwith": (TypeError, "cannot perform __mul__ with this index type"),
204
+ "count": (None, ""),
205
+ "cumcount": (None, ""),
206
+ "cummax": (None, ""),
207
+ "cummin": (None, ""),
208
+ "cumprod": (TypeError, "datetime64 type does not support cumprod operations"),
209
+ "cumsum": (TypeError, "datetime64 type does not support cumsum operations"),
210
+ "diff": (None, ""),
211
+ "ffill": (None, ""),
212
+ "fillna": (None, ""),
213
+ "first": (None, ""),
214
+ "idxmax": (None, ""),
215
+ "idxmin": (None, ""),
216
+ "last": (None, ""),
217
+ "max": (None, ""),
218
+ "mean": (None, ""),
219
+ "median": (None, ""),
220
+ "min": (None, ""),
221
+ "ngroup": (None, ""),
222
+ "nunique": (None, ""),
223
+ "pct_change": (TypeError, "cannot perform __truediv__ with this index type"),
224
+ "prod": (TypeError, "datetime64 type does not support prod"),
225
+ "quantile": (None, ""),
226
+ "rank": (None, ""),
227
+ "sem": (None, ""),
228
+ "shift": (None, ""),
229
+ "size": (None, ""),
230
+ "skew": (TypeError, r"dtype datetime64\[ns\] does not support reduction"),
231
+ "std": (None, ""),
232
+ "sum": (TypeError, "datetime64 type does not support sum operations"),
233
+ "var": (None, ""),
234
+ }[groupby_func]
235
+
236
+ if klass is None:
237
+ if how == "method":
238
+ getattr(gb, groupby_func)(*args)
239
+ elif how == "agg":
240
+ gb.agg(groupby_func, *args)
241
+ else:
242
+ gb.transform(groupby_func, *args)
243
+ else:
244
+ with pytest.raises(klass, match=msg):
245
+ if how == "method":
246
+ getattr(gb, groupby_func)(*args)
247
+ elif how == "agg":
248
+ gb.agg(groupby_func, *args)
249
+ else:
250
+ gb.transform(groupby_func, *args)
251
+
252
+
253
+ @pytest.mark.parametrize("how", ["agg", "transform"])
254
+ def test_groupby_raises_datetime_udf(how, by, groupby_series):
255
+ df = DataFrame(
256
+ {
257
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
258
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
259
+ "c": range(9),
260
+ "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
261
+ }
262
+ )
263
+
264
+ gb = df.groupby(by=by)
265
+
266
+ if groupby_series:
267
+ gb = gb["d"]
268
+
269
+ def func(x):
270
+ raise TypeError("Test error message")
271
+
272
+ with pytest.raises(TypeError, match="Test error message"):
273
+ getattr(gb, how)(func)
274
+
275
+
276
+ @pytest.mark.parametrize("how", ["agg", "transform"])
277
+ @pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])
278
+ def test_groupby_raises_datetime_np(how, by, groupby_series, groupby_func_np):
279
+ # GH#50749
280
+ df = DataFrame(
281
+ {
282
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
283
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
284
+ "c": range(9),
285
+ "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
286
+ }
287
+ )
288
+ gb = df.groupby(by=by)
289
+
290
+ if groupby_series:
291
+ gb = gb["d"]
292
+
293
+ klass, msg = {
294
+ np.sum: (TypeError, "datetime64 type does not support sum operations"),
295
+ np.mean: (None, ""),
296
+ }[groupby_func_np]
297
+
298
+ if klass is None:
299
+ getattr(gb, how)(groupby_func_np)
300
+ else:
301
+ with pytest.raises(klass, match=msg):
302
+ getattr(gb, how)(groupby_func_np)
303
+
304
+
305
+ @pytest.mark.parametrize("how", ["method", "agg", "transform"])
306
+ def test_groupby_raises_category(
307
+ how, by, groupby_series, groupby_func, using_copy_on_write
308
+ ):
309
+ # GH#50749
310
+ df = DataFrame(
311
+ {
312
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
313
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
314
+ "c": range(9),
315
+ "d": Categorical(
316
+ ["a", "a", "a", "a", "b", "b", "b", "b", "c"],
317
+ categories=["a", "b", "c", "d"],
318
+ ordered=True,
319
+ ),
320
+ }
321
+ )
322
+ args = get_groupby_method_args(groupby_func, df)
323
+ gb = df.groupby(by=by)
324
+
325
+ if groupby_series:
326
+ gb = gb["d"]
327
+
328
+ if groupby_func == "corrwith":
329
+ assert not hasattr(gb, "corrwith")
330
+ return
331
+
332
+ klass, msg = {
333
+ "all": (None, ""),
334
+ "any": (None, ""),
335
+ "bfill": (None, ""),
336
+ "corrwith": (
337
+ TypeError,
338
+ r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'",
339
+ ),
340
+ "count": (None, ""),
341
+ "cumcount": (None, ""),
342
+ "cummax": (
343
+ (NotImplementedError, TypeError),
344
+ "(category type does not support cummax operations|"
345
+ + "category dtype not supported|"
346
+ + "cummax is not supported for category dtype)",
347
+ ),
348
+ "cummin": (
349
+ (NotImplementedError, TypeError),
350
+ "(category type does not support cummin operations|"
351
+ + "category dtype not supported|"
352
+ "cummin is not supported for category dtype)",
353
+ ),
354
+ "cumprod": (
355
+ (NotImplementedError, TypeError),
356
+ "(category type does not support cumprod operations|"
357
+ + "category dtype not supported|"
358
+ "cumprod is not supported for category dtype)",
359
+ ),
360
+ "cumsum": (
361
+ (NotImplementedError, TypeError),
362
+ "(category type does not support cumsum operations|"
363
+ + "category dtype not supported|"
364
+ "cumsum is not supported for category dtype)",
365
+ ),
366
+ "diff": (
367
+ TypeError,
368
+ r"unsupported operand type\(s\) for -: 'Categorical' and 'Categorical'",
369
+ ),
370
+ "ffill": (None, ""),
371
+ "fillna": (
372
+ TypeError,
373
+ r"Cannot setitem on a Categorical with a new category \(0\), "
374
+ + "set the categories first",
375
+ )
376
+ if not using_copy_on_write
377
+ else (None, ""), # no-op with CoW
378
+ "first": (None, ""),
379
+ "idxmax": (None, ""),
380
+ "idxmin": (None, ""),
381
+ "last": (None, ""),
382
+ "max": (None, ""),
383
+ "mean": (
384
+ TypeError,
385
+ "'Categorical' with dtype category does not support reduction 'mean'",
386
+ ),
387
+ "median": (
388
+ TypeError,
389
+ "'Categorical' with dtype category does not support reduction 'median'",
390
+ ),
391
+ "min": (None, ""),
392
+ "ngroup": (None, ""),
393
+ "nunique": (None, ""),
394
+ "pct_change": (
395
+ TypeError,
396
+ r"unsupported operand type\(s\) for /: 'Categorical' and 'Categorical'",
397
+ ),
398
+ "prod": (TypeError, "category type does not support prod operations"),
399
+ "quantile": (TypeError, "No matching signature found"),
400
+ "rank": (None, ""),
401
+ "sem": (ValueError, "Cannot cast object dtype to float64"),
402
+ "shift": (None, ""),
403
+ "size": (None, ""),
404
+ "skew": (
405
+ TypeError,
406
+ "'Categorical' with dtype category does not support reduction 'skew'",
407
+ ),
408
+ "std": (ValueError, "Cannot cast object dtype to float64"),
409
+ "sum": (TypeError, "category type does not support sum operations"),
410
+ "var": (
411
+ TypeError,
412
+ "'Categorical' with dtype category does not support reduction 'var'",
413
+ ),
414
+ }[groupby_func]
415
+
416
+ if klass is None:
417
+ if how == "method":
418
+ getattr(gb, groupby_func)(*args)
419
+ elif how == "agg":
420
+ gb.agg(groupby_func, *args)
421
+ else:
422
+ gb.transform(groupby_func, *args)
423
+ else:
424
+ with pytest.raises(klass, match=msg):
425
+ if how == "method":
426
+ getattr(gb, groupby_func)(*args)
427
+ elif how == "agg":
428
+ gb.agg(groupby_func, *args)
429
+ else:
430
+ gb.transform(groupby_func, *args)
431
+
432
+
433
+ @pytest.mark.parametrize("how", ["agg", "transform"])
434
+ def test_groupby_raises_category_udf(how, by, groupby_series):
435
+ # GH#50749
436
+ df = DataFrame(
437
+ {
438
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
439
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
440
+ "c": range(9),
441
+ "d": Categorical(
442
+ ["a", "a", "a", "a", "b", "b", "b", "b", "c"],
443
+ categories=["a", "b", "c", "d"],
444
+ ordered=True,
445
+ ),
446
+ }
447
+ )
448
+ gb = df.groupby(by=by)
449
+
450
+ if groupby_series:
451
+ gb = gb["d"]
452
+
453
+ def func(x):
454
+ raise TypeError("Test error message")
455
+
456
+ with pytest.raises(TypeError, match="Test error message"):
457
+ getattr(gb, how)(func)
458
+
459
+
460
+ @pytest.mark.parametrize("how", ["agg", "transform"])
461
+ @pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])
462
+ def test_groupby_raises_category_np(how, by, groupby_series, groupby_func_np):
463
+ # GH#50749
464
+ df = DataFrame(
465
+ {
466
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
467
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
468
+ "c": range(9),
469
+ "d": Categorical(
470
+ ["a", "a", "a", "a", "b", "b", "b", "b", "c"],
471
+ categories=["a", "b", "c", "d"],
472
+ ordered=True,
473
+ ),
474
+ }
475
+ )
476
+ gb = df.groupby(by=by)
477
+
478
+ if groupby_series:
479
+ gb = gb["d"]
480
+
481
+ klass, msg = {
482
+ np.sum: (TypeError, "category type does not support sum operations"),
483
+ np.mean: (
484
+ TypeError,
485
+ "'Categorical' with dtype category does not support reduction 'mean'",
486
+ ),
487
+ }[groupby_func_np]
488
+
489
+ if klass is None:
490
+ getattr(gb, how)(groupby_func_np)
491
+ else:
492
+ with pytest.raises(klass, match=msg):
493
+ getattr(gb, how)(groupby_func_np)
494
+
495
+
496
+ @pytest.mark.parametrize("how", ["method", "agg", "transform"])
497
+ def test_groupby_raises_category_on_category(
498
+ how, by, groupby_series, groupby_func, observed, using_copy_on_write
499
+ ):
500
+ # GH#50749
501
+ df = DataFrame(
502
+ {
503
+ "a": Categorical(
504
+ ["a", "a", "a", "a", "b", "b", "b", "b", "c"],
505
+ categories=["a", "b", "c", "d"],
506
+ ordered=True,
507
+ ),
508
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
509
+ "c": range(9),
510
+ "d": Categorical(
511
+ ["a", "a", "a", "a", "b", "b", "c", "c", "c"],
512
+ categories=["a", "b", "c", "d"],
513
+ ordered=True,
514
+ ),
515
+ }
516
+ )
517
+ args = get_groupby_method_args(groupby_func, df)
518
+ gb = df.groupby(by=by, observed=observed)
519
+
520
+ if groupby_series:
521
+ gb = gb["d"]
522
+
523
+ if groupby_func == "corrwith":
524
+ assert not hasattr(gb, "corrwith")
525
+ return
526
+
527
+ empty_groups = any(group.empty for group in gb.groups.values())
528
+
529
+ klass, msg = {
530
+ "all": (None, ""),
531
+ "any": (None, ""),
532
+ "bfill": (None, ""),
533
+ "corrwith": (
534
+ TypeError,
535
+ r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'",
536
+ ),
537
+ "count": (None, ""),
538
+ "cumcount": (None, ""),
539
+ "cummax": (
540
+ (NotImplementedError, TypeError),
541
+ "(cummax is not supported for category dtype|"
542
+ + "category dtype not supported|"
543
+ + "category type does not support cummax operations)",
544
+ ),
545
+ "cummin": (
546
+ (NotImplementedError, TypeError),
547
+ "(cummin is not supported for category dtype|"
548
+ + "category dtype not supported|"
549
+ "category type does not support cummin operations)",
550
+ ),
551
+ "cumprod": (
552
+ (NotImplementedError, TypeError),
553
+ "(cumprod is not supported for category dtype|"
554
+ + "category dtype not supported|"
555
+ "category type does not support cumprod operations)",
556
+ ),
557
+ "cumsum": (
558
+ (NotImplementedError, TypeError),
559
+ "(cumsum is not supported for category dtype|"
560
+ + "category dtype not supported|"
561
+ + "category type does not support cumsum operations)",
562
+ ),
563
+ "diff": (TypeError, "unsupported operand type"),
564
+ "ffill": (None, ""),
565
+ "fillna": (
566
+ TypeError,
567
+ r"Cannot setitem on a Categorical with a new category \(0\), "
568
+ + "set the categories first",
569
+ )
570
+ if not using_copy_on_write
571
+ else (None, ""), # no-op with CoW
572
+ "first": (None, ""),
573
+ "idxmax": (ValueError, "attempt to get argmax of an empty sequence")
574
+ if empty_groups
575
+ else (None, ""),
576
+ "idxmin": (ValueError, "attempt to get argmin of an empty sequence")
577
+ if empty_groups
578
+ else (None, ""),
579
+ "last": (None, ""),
580
+ "max": (None, ""),
581
+ "mean": (
582
+ TypeError,
583
+ "'Categorical' with dtype category does not support reduction 'mean'",
584
+ ),
585
+ "median": (
586
+ TypeError,
587
+ "'Categorical' with dtype category does not support reduction 'median'",
588
+ ),
589
+ "min": (None, ""),
590
+ "ngroup": (None, ""),
591
+ "nunique": (None, ""),
592
+ "pct_change": (TypeError, "unsupported operand type"),
593
+ "prod": (TypeError, "category type does not support prod operations"),
594
+ "quantile": (TypeError, ""),
595
+ "rank": (None, ""),
596
+ "sem": (ValueError, "Cannot cast object dtype to float64"),
597
+ "shift": (None, ""),
598
+ "size": (None, ""),
599
+ "skew": (
600
+ TypeError,
601
+ "'Categorical' with dtype category does not support reduction 'skew'",
602
+ ),
603
+ "std": (ValueError, "Cannot cast object dtype to float64"),
604
+ "sum": (TypeError, "category type does not support sum operations"),
605
+ "var": (
606
+ TypeError,
607
+ "'Categorical' with dtype category does not support reduction 'var'",
608
+ ),
609
+ }[groupby_func]
610
+
611
+ if klass is None:
612
+ if how == "method":
613
+ getattr(gb, groupby_func)(*args)
614
+ elif how == "agg":
615
+ gb.agg(groupby_func, *args)
616
+ else:
617
+ gb.transform(groupby_func, *args)
618
+ else:
619
+ with pytest.raises(klass, match=msg):
620
+ if how == "method":
621
+ getattr(gb, groupby_func)(*args)
622
+ elif how == "agg":
623
+ gb.agg(groupby_func, *args)
624
+ else:
625
+ gb.transform(groupby_func, *args)
626
+
627
+
628
+ def test_subsetting_columns_axis_1_raises():
629
+ # GH 35443
630
+ df = DataFrame({"a": [1], "b": [2], "c": [3]})
631
+ gb = df.groupby("a", axis=1)
632
+ with pytest.raises(ValueError, match="Cannot subset columns when using axis=1"):
633
+ gb["b"]
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_rank.py ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ DataFrame,
9
+ NaT,
10
+ Series,
11
+ concat,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+
16
+ def test_rank_unordered_categorical_typeerror():
17
+ # GH#51034 should be TypeError, not NotImplementedError
18
+ cat = pd.Categorical([], ordered=False)
19
+ ser = Series(cat)
20
+ df = ser.to_frame()
21
+
22
+ msg = "Cannot perform rank with non-ordered Categorical"
23
+
24
+ gb = ser.groupby(cat)
25
+ with pytest.raises(TypeError, match=msg):
26
+ gb.rank()
27
+
28
+ gb2 = df.groupby(cat)
29
+ with pytest.raises(TypeError, match=msg):
30
+ gb2.rank()
31
+
32
+
33
+ def test_rank_apply():
34
+ lev1 = tm.rands_array(10, 100)
35
+ lev2 = tm.rands_array(10, 130)
36
+ lab1 = np.random.randint(0, 100, size=500)
37
+ lab2 = np.random.randint(0, 130, size=500)
38
+
39
+ df = DataFrame(
40
+ {
41
+ "value": np.random.randn(500),
42
+ "key1": lev1.take(lab1),
43
+ "key2": lev2.take(lab2),
44
+ }
45
+ )
46
+
47
+ result = df.groupby(["key1", "key2"]).value.rank()
48
+
49
+ expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
50
+ expected = concat(expected, axis=0)
51
+ expected = expected.reindex(result.index)
52
+ tm.assert_series_equal(result, expected)
53
+
54
+ result = df.groupby(["key1", "key2"]).value.rank(pct=True)
55
+
56
+ expected = [
57
+ piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
58
+ ]
59
+ expected = concat(expected, axis=0)
60
+ expected = expected.reindex(result.index)
61
+ tm.assert_series_equal(result, expected)
62
+
63
+
64
+ @pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
65
+ @pytest.mark.parametrize(
66
+ "vals",
67
+ [
68
+ np.array([2, 2, 8, 2, 6], dtype=dtype)
69
+ for dtype in ["i8", "i4", "i2", "i1", "u8", "u4", "u2", "u1", "f8", "f4", "f2"]
70
+ ]
71
+ + [
72
+ [
73
+ pd.Timestamp("2018-01-02"),
74
+ pd.Timestamp("2018-01-02"),
75
+ pd.Timestamp("2018-01-08"),
76
+ pd.Timestamp("2018-01-02"),
77
+ pd.Timestamp("2018-01-06"),
78
+ ],
79
+ [
80
+ pd.Timestamp("2018-01-02", tz="US/Pacific"),
81
+ pd.Timestamp("2018-01-02", tz="US/Pacific"),
82
+ pd.Timestamp("2018-01-08", tz="US/Pacific"),
83
+ pd.Timestamp("2018-01-02", tz="US/Pacific"),
84
+ pd.Timestamp("2018-01-06", tz="US/Pacific"),
85
+ ],
86
+ [
87
+ pd.Timestamp("2018-01-02") - pd.Timestamp(0),
88
+ pd.Timestamp("2018-01-02") - pd.Timestamp(0),
89
+ pd.Timestamp("2018-01-08") - pd.Timestamp(0),
90
+ pd.Timestamp("2018-01-02") - pd.Timestamp(0),
91
+ pd.Timestamp("2018-01-06") - pd.Timestamp(0),
92
+ ],
93
+ [
94
+ pd.Timestamp("2018-01-02").to_period("D"),
95
+ pd.Timestamp("2018-01-02").to_period("D"),
96
+ pd.Timestamp("2018-01-08").to_period("D"),
97
+ pd.Timestamp("2018-01-02").to_period("D"),
98
+ pd.Timestamp("2018-01-06").to_period("D"),
99
+ ],
100
+ ],
101
+ ids=lambda x: type(x[0]),
102
+ )
103
+ @pytest.mark.parametrize(
104
+ "ties_method,ascending,pct,exp",
105
+ [
106
+ ("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]),
107
+ ("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
108
+ ("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]),
109
+ ("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]),
110
+ ("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]),
111
+ ("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
112
+ ("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
113
+ ("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]),
114
+ ("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]),
115
+ ("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
116
+ ("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]),
117
+ ("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]),
118
+ ("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]),
119
+ ("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
120
+ ("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]),
121
+ ("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]),
122
+ ("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]),
123
+ ("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]),
124
+ ("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
125
+ ("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]),
126
+ ],
127
+ )
128
+ def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
129
+ key = np.repeat(grps, len(vals))
130
+
131
+ orig_vals = vals
132
+ vals = list(vals) * len(grps)
133
+ if isinstance(orig_vals, np.ndarray):
134
+ vals = np.array(vals, dtype=orig_vals.dtype)
135
+
136
+ df = DataFrame({"key": key, "val": vals})
137
+ result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct)
138
+
139
+ exp_df = DataFrame(exp * len(grps), columns=["val"])
140
+ tm.assert_frame_equal(result, exp_df)
141
+
142
+
143
+ @pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
144
+ @pytest.mark.parametrize(
145
+ "vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]]
146
+ )
147
+ @pytest.mark.parametrize(
148
+ "ties_method,ascending,na_option,exp",
149
+ [
150
+ ("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
151
+ ("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]),
152
+ ("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]),
153
+ ("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
154
+ ("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]),
155
+ ("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]),
156
+ ("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]),
157
+ ("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]),
158
+ ("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]),
159
+ ("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]),
160
+ ("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]),
161
+ ("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]),
162
+ ("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]),
163
+ ("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]),
164
+ ("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]),
165
+ ("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]),
166
+ ("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]),
167
+ ("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]),
168
+ ("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]),
169
+ ("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]),
170
+ ("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]),
171
+ ("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]),
172
+ ("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]),
173
+ ("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]),
174
+ ("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]),
175
+ ("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]),
176
+ ("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]),
177
+ ("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]),
178
+ ("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]),
179
+ ("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]),
180
+ ],
181
+ )
182
+ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
183
+ # GH 20561
184
+ key = np.repeat(grps, len(vals))
185
+ vals = vals * len(grps)
186
+ df = DataFrame({"key": key, "val": vals})
187
+ result = df.groupby("key").rank(
188
+ method=ties_method, ascending=ascending, na_option=na_option
189
+ )
190
+ exp_df = DataFrame(exp * len(grps), columns=["val"])
191
+ tm.assert_frame_equal(result, exp_df)
192
+
193
+
194
+ @pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
195
+ @pytest.mark.parametrize(
196
+ "vals",
197
+ [
198
+ np.array([2, 2, np.nan, 8, 2, 6, np.nan, np.nan], dtype=dtype)
199
+ for dtype in ["f8", "f4", "f2"]
200
+ ]
201
+ + [
202
+ [
203
+ pd.Timestamp("2018-01-02"),
204
+ pd.Timestamp("2018-01-02"),
205
+ np.nan,
206
+ pd.Timestamp("2018-01-08"),
207
+ pd.Timestamp("2018-01-02"),
208
+ pd.Timestamp("2018-01-06"),
209
+ np.nan,
210
+ np.nan,
211
+ ],
212
+ [
213
+ pd.Timestamp("2018-01-02", tz="US/Pacific"),
214
+ pd.Timestamp("2018-01-02", tz="US/Pacific"),
215
+ np.nan,
216
+ pd.Timestamp("2018-01-08", tz="US/Pacific"),
217
+ pd.Timestamp("2018-01-02", tz="US/Pacific"),
218
+ pd.Timestamp("2018-01-06", tz="US/Pacific"),
219
+ np.nan,
220
+ np.nan,
221
+ ],
222
+ [
223
+ pd.Timestamp("2018-01-02") - pd.Timestamp(0),
224
+ pd.Timestamp("2018-01-02") - pd.Timestamp(0),
225
+ np.nan,
226
+ pd.Timestamp("2018-01-08") - pd.Timestamp(0),
227
+ pd.Timestamp("2018-01-02") - pd.Timestamp(0),
228
+ pd.Timestamp("2018-01-06") - pd.Timestamp(0),
229
+ np.nan,
230
+ np.nan,
231
+ ],
232
+ [
233
+ pd.Timestamp("2018-01-02").to_period("D"),
234
+ pd.Timestamp("2018-01-02").to_period("D"),
235
+ np.nan,
236
+ pd.Timestamp("2018-01-08").to_period("D"),
237
+ pd.Timestamp("2018-01-02").to_period("D"),
238
+ pd.Timestamp("2018-01-06").to_period("D"),
239
+ np.nan,
240
+ np.nan,
241
+ ],
242
+ ],
243
+ ids=lambda x: type(x[0]),
244
+ )
245
+ @pytest.mark.parametrize(
246
+ "ties_method,ascending,na_option,pct,exp",
247
+ [
248
+ (
249
+ "average",
250
+ True,
251
+ "keep",
252
+ False,
253
+ [2.0, 2.0, np.nan, 5.0, 2.0, 4.0, np.nan, np.nan],
254
+ ),
255
+ (
256
+ "average",
257
+ True,
258
+ "keep",
259
+ True,
260
+ [0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan],
261
+ ),
262
+ (
263
+ "average",
264
+ False,
265
+ "keep",
266
+ False,
267
+ [4.0, 4.0, np.nan, 1.0, 4.0, 2.0, np.nan, np.nan],
268
+ ),
269
+ (
270
+ "average",
271
+ False,
272
+ "keep",
273
+ True,
274
+ [0.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan],
275
+ ),
276
+ ("min", True, "keep", False, [1.0, 1.0, np.nan, 5.0, 1.0, 4.0, np.nan, np.nan]),
277
+ ("min", True, "keep", True, [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
278
+ (
279
+ "min",
280
+ False,
281
+ "keep",
282
+ False,
283
+ [3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
284
+ ),
285
+ ("min", False, "keep", True, [0.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
286
+ ("max", True, "keep", False, [3.0, 3.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan]),
287
+ ("max", True, "keep", True, [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
288
+ (
289
+ "max",
290
+ False,
291
+ "keep",
292
+ False,
293
+ [5.0, 5.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
294
+ ),
295
+ ("max", False, "keep", True, [1.0, 1.0, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan]),
296
+ (
297
+ "first",
298
+ True,
299
+ "keep",
300
+ False,
301
+ [1.0, 2.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan],
302
+ ),
303
+ (
304
+ "first",
305
+ True,
306
+ "keep",
307
+ True,
308
+ [0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan],
309
+ ),
310
+ (
311
+ "first",
312
+ False,
313
+ "keep",
314
+ False,
315
+ [3.0, 4.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
316
+ ),
317
+ (
318
+ "first",
319
+ False,
320
+ "keep",
321
+ True,
322
+ [0.6, 0.8, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan],
323
+ ),
324
+ (
325
+ "dense",
326
+ True,
327
+ "keep",
328
+ False,
329
+ [1.0, 1.0, np.nan, 3.0, 1.0, 2.0, np.nan, np.nan],
330
+ ),
331
+ (
332
+ "dense",
333
+ True,
334
+ "keep",
335
+ True,
336
+ [
337
+ 1.0 / 3.0,
338
+ 1.0 / 3.0,
339
+ np.nan,
340
+ 3.0 / 3.0,
341
+ 1.0 / 3.0,
342
+ 2.0 / 3.0,
343
+ np.nan,
344
+ np.nan,
345
+ ],
346
+ ),
347
+ (
348
+ "dense",
349
+ False,
350
+ "keep",
351
+ False,
352
+ [3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
353
+ ),
354
+ (
355
+ "dense",
356
+ False,
357
+ "keep",
358
+ True,
359
+ [
360
+ 3.0 / 3.0,
361
+ 3.0 / 3.0,
362
+ np.nan,
363
+ 1.0 / 3.0,
364
+ 3.0 / 3.0,
365
+ 2.0 / 3.0,
366
+ np.nan,
367
+ np.nan,
368
+ ],
369
+ ),
370
+ ("average", True, "bottom", False, [2.0, 2.0, 7.0, 5.0, 2.0, 4.0, 7.0, 7.0]),
371
+ (
372
+ "average",
373
+ True,
374
+ "bottom",
375
+ True,
376
+ [0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875],
377
+ ),
378
+ ("average", False, "bottom", False, [4.0, 4.0, 7.0, 1.0, 4.0, 2.0, 7.0, 7.0]),
379
+ (
380
+ "average",
381
+ False,
382
+ "bottom",
383
+ True,
384
+ [0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875],
385
+ ),
386
+ ("min", True, "bottom", False, [1.0, 1.0, 6.0, 5.0, 1.0, 4.0, 6.0, 6.0]),
387
+ (
388
+ "min",
389
+ True,
390
+ "bottom",
391
+ True,
392
+ [0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75],
393
+ ),
394
+ ("min", False, "bottom", False, [3.0, 3.0, 6.0, 1.0, 3.0, 2.0, 6.0, 6.0]),
395
+ (
396
+ "min",
397
+ False,
398
+ "bottom",
399
+ True,
400
+ [0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75],
401
+ ),
402
+ ("max", True, "bottom", False, [3.0, 3.0, 8.0, 5.0, 3.0, 4.0, 8.0, 8.0]),
403
+ ("max", True, "bottom", True, [0.375, 0.375, 1.0, 0.625, 0.375, 0.5, 1.0, 1.0]),
404
+ ("max", False, "bottom", False, [5.0, 5.0, 8.0, 1.0, 5.0, 2.0, 8.0, 8.0]),
405
+ (
406
+ "max",
407
+ False,
408
+ "bottom",
409
+ True,
410
+ [0.625, 0.625, 1.0, 0.125, 0.625, 0.25, 1.0, 1.0],
411
+ ),
412
+ ("first", True, "bottom", False, [1.0, 2.0, 6.0, 5.0, 3.0, 4.0, 7.0, 8.0]),
413
+ (
414
+ "first",
415
+ True,
416
+ "bottom",
417
+ True,
418
+ [0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.0],
419
+ ),
420
+ ("first", False, "bottom", False, [3.0, 4.0, 6.0, 1.0, 5.0, 2.0, 7.0, 8.0]),
421
+ (
422
+ "first",
423
+ False,
424
+ "bottom",
425
+ True,
426
+ [0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.0],
427
+ ),
428
+ ("dense", True, "bottom", False, [1.0, 1.0, 4.0, 3.0, 1.0, 2.0, 4.0, 4.0]),
429
+ ("dense", True, "bottom", True, [0.25, 0.25, 1.0, 0.75, 0.25, 0.5, 1.0, 1.0]),
430
+ ("dense", False, "bottom", False, [3.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 4.0]),
431
+ ("dense", False, "bottom", True, [0.75, 0.75, 1.0, 0.25, 0.75, 0.5, 1.0, 1.0]),
432
+ ],
433
+ )
434
+ def test_rank_args_missing(grps, vals, ties_method, ascending, na_option, pct, exp):
435
+ key = np.repeat(grps, len(vals))
436
+
437
+ orig_vals = vals
438
+ vals = list(vals) * len(grps)
439
+ if isinstance(orig_vals, np.ndarray):
440
+ vals = np.array(vals, dtype=orig_vals.dtype)
441
+
442
+ df = DataFrame({"key": key, "val": vals})
443
+ result = df.groupby("key").rank(
444
+ method=ties_method, ascending=ascending, na_option=na_option, pct=pct
445
+ )
446
+
447
+ exp_df = DataFrame(exp * len(grps), columns=["val"])
448
+ tm.assert_frame_equal(result, exp_df)
449
+
450
+
451
+ @pytest.mark.parametrize(
452
+ "pct,exp", [(False, [3.0, 3.0, 3.0, 3.0, 3.0]), (True, [0.6, 0.6, 0.6, 0.6, 0.6])]
453
+ )
454
+ def test_rank_resets_each_group(pct, exp):
455
+ df = DataFrame(
456
+ {"key": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], "val": [1] * 10}
457
+ )
458
+ result = df.groupby("key").rank(pct=pct)
459
+ exp_df = DataFrame(exp * 2, columns=["val"])
460
+ tm.assert_frame_equal(result, exp_df)
461
+
462
+
463
+ @pytest.mark.parametrize(
464
+ "dtype", ["int64", "int32", "uint64", "uint32", "float64", "float32"]
465
+ )
466
+ @pytest.mark.parametrize("upper", [True, False])
467
+ def test_rank_avg_even_vals(dtype, upper):
468
+ if upper:
469
+ # use IntegerDtype/FloatingDtype
470
+ dtype = dtype[0].upper() + dtype[1:]
471
+ dtype = dtype.replace("Ui", "UI")
472
+ df = DataFrame({"key": ["a"] * 4, "val": [1] * 4})
473
+ df["val"] = df["val"].astype(dtype)
474
+ assert df["val"].dtype == dtype
475
+
476
+ result = df.groupby("key").rank()
477
+ exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"])
478
+ if upper:
479
+ exp_df = exp_df.astype("Float64")
480
+ tm.assert_frame_equal(result, exp_df)
481
+
482
+
483
+ @pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"])
484
+ @pytest.mark.parametrize("ascending", [True, False])
485
+ @pytest.mark.parametrize("na_option", ["keep", "top", "bottom"])
486
+ @pytest.mark.parametrize("pct", [True, False])
487
+ @pytest.mark.parametrize(
488
+ "vals", [["bar", "bar", "foo", "bar", "baz"], ["bar", np.nan, "foo", np.nan, "baz"]]
489
+ )
490
+ def test_rank_object_dtype(ties_method, ascending, na_option, pct, vals):
491
+ df = DataFrame({"key": ["foo"] * 5, "val": vals})
492
+ mask = df["val"].isna()
493
+
494
+ gb = df.groupby("key")
495
+ res = gb.rank(method=ties_method, ascending=ascending, na_option=na_option, pct=pct)
496
+
497
+ # construct our expected by using numeric values with the same ordering
498
+ if mask.any():
499
+ df2 = DataFrame({"key": ["foo"] * 5, "val": [0, np.nan, 2, np.nan, 1]})
500
+ else:
501
+ df2 = DataFrame({"key": ["foo"] * 5, "val": [0, 0, 2, 0, 1]})
502
+
503
+ gb2 = df2.groupby("key")
504
+ alt = gb2.rank(
505
+ method=ties_method, ascending=ascending, na_option=na_option, pct=pct
506
+ )
507
+
508
+ tm.assert_frame_equal(res, alt)
509
+
510
+
511
+ @pytest.mark.parametrize("na_option", [True, "bad", 1])
512
+ @pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"])
513
+ @pytest.mark.parametrize("ascending", [True, False])
514
+ @pytest.mark.parametrize("pct", [True, False])
515
+ @pytest.mark.parametrize(
516
+ "vals",
517
+ [
518
+ ["bar", "bar", "foo", "bar", "baz"],
519
+ ["bar", np.nan, "foo", np.nan, "baz"],
520
+ [1, np.nan, 2, np.nan, 3],
521
+ ],
522
+ )
523
+ def test_rank_naoption_raises(ties_method, ascending, na_option, pct, vals):
524
+ df = DataFrame({"key": ["foo"] * 5, "val": vals})
525
+ msg = "na_option must be one of 'keep', 'top', or 'bottom'"
526
+
527
+ with pytest.raises(ValueError, match=msg):
528
+ df.groupby("key").rank(
529
+ method=ties_method, ascending=ascending, na_option=na_option, pct=pct
530
+ )
531
+
532
+
533
+ def test_rank_empty_group():
534
+ # see gh-22519
535
+ column = "A"
536
+ df = DataFrame({"A": [0, 1, 0], "B": [1.0, np.nan, 2.0]})
537
+
538
+ result = df.groupby(column).B.rank(pct=True)
539
+ expected = Series([0.5, np.nan, 1.0], name="B")
540
+ tm.assert_series_equal(result, expected)
541
+
542
+ result = df.groupby(column).rank(pct=True)
543
+ expected = DataFrame({"B": [0.5, np.nan, 1.0]})
544
+ tm.assert_frame_equal(result, expected)
545
+
546
+
547
+ @pytest.mark.parametrize(
548
+ "input_key,input_value,output_value",
549
+ [
550
+ ([1, 2], [1, 1], [1.0, 1.0]),
551
+ ([1, 1, 2, 2], [1, 2, 1, 2], [0.5, 1.0, 0.5, 1.0]),
552
+ ([1, 1, 2, 2], [1, 2, 1, np.nan], [0.5, 1.0, 1.0, np.nan]),
553
+ ([1, 1, 2], [1, 2, np.nan], [0.5, 1.0, np.nan]),
554
+ ],
555
+ )
556
+ def test_rank_zero_div(input_key, input_value, output_value):
557
+ # GH 23666
558
+ df = DataFrame({"A": input_key, "B": input_value})
559
+
560
+ result = df.groupby("A").rank(method="dense", pct=True)
561
+ expected = DataFrame({"B": output_value})
562
+ tm.assert_frame_equal(result, expected)
563
+
564
+
565
+ def test_rank_min_int():
566
+ # GH-32859
567
+ df = DataFrame(
568
+ {
569
+ "grp": [1, 1, 2],
570
+ "int_col": [
571
+ np.iinfo(np.int64).min,
572
+ np.iinfo(np.int64).max,
573
+ np.iinfo(np.int64).min,
574
+ ],
575
+ "datetimelike": [NaT, datetime(2001, 1, 1), NaT],
576
+ }
577
+ )
578
+
579
+ result = df.groupby("grp").rank()
580
+ expected = DataFrame(
581
+ {"int_col": [1.0, 2.0, 1.0], "datetimelike": [np.NaN, 1.0, np.NaN]}
582
+ )
583
+
584
+ tm.assert_frame_equal(result, expected)
585
+
586
+
587
+ @pytest.mark.parametrize("use_nan", [True, False])
588
+ def test_rank_pct_equal_values_on_group_transition(use_nan):
589
+ # GH#40518
590
+ fill_value = np.nan if use_nan else 3
591
+ df = DataFrame(
592
+ [
593
+ [-1, 1],
594
+ [-1, 2],
595
+ [1, fill_value],
596
+ [-1, fill_value],
597
+ ],
598
+ columns=["group", "val"],
599
+ )
600
+ result = df.groupby(["group"])["val"].rank(
601
+ method="dense",
602
+ pct=True,
603
+ )
604
+ if use_nan:
605
+ expected = Series([0.5, 1, np.nan, np.nan], name="val")
606
+ else:
607
+ expected = Series([1 / 3, 2 / 3, 1, 1], name="val")
608
+
609
+ tm.assert_series_equal(result, expected)
610
+
611
+
612
+ def test_rank_multiindex():
613
+ # GH27721
614
+ df = concat(
615
+ {
616
+ "a": DataFrame({"col1": [3, 4], "col2": [1, 2]}),
617
+ "b": DataFrame({"col3": [5, 6], "col4": [7, 8]}),
618
+ },
619
+ axis=1,
620
+ )
621
+
622
+ gb = df.groupby(level=0, axis=1)
623
+ result = gb.rank(axis=1)
624
+
625
+ expected = concat(
626
+ [
627
+ df["a"].rank(axis=1),
628
+ df["b"].rank(axis=1),
629
+ ],
630
+ axis=1,
631
+ keys=["a", "b"],
632
+ )
633
+ tm.assert_frame_equal(result, expected)
634
+
635
+
636
+ def test_groupby_axis0_rank_axis1():
637
+ # GH#41320
638
+ df = DataFrame(
639
+ {0: [1, 3, 5, 7], 1: [2, 4, 6, 8], 2: [1.5, 3.5, 5.5, 7.5]},
640
+ index=["a", "a", "b", "b"],
641
+ )
642
+ gb = df.groupby(level=0, axis=0)
643
+
644
+ res = gb.rank(axis=1)
645
+
646
+ # This should match what we get when "manually" operating group-by-group
647
+ expected = concat([df.loc["a"].rank(axis=1), df.loc["b"].rank(axis=1)], axis=0)
648
+ tm.assert_frame_equal(res, expected)
649
+
650
+ # check that we haven't accidentally written a case that coincidentally
651
+ # matches rank(axis=0)
652
+ alt = gb.rank(axis=0)
653
+ assert not alt.equals(expected)
654
+
655
+
656
+ def test_groupby_axis0_cummax_axis1():
657
+ # case where groupby axis is 0 and axis keyword in transform is 1
658
+
659
+ # df has mixed dtype -> multiple blocks
660
+ df = DataFrame(
661
+ {0: [1, 3, 5, 7], 1: [2, 4, 6, 8], 2: [1.5, 3.5, 5.5, 7.5]},
662
+ index=["a", "a", "b", "b"],
663
+ )
664
+ gb = df.groupby(level=0, axis=0)
665
+
666
+ cmax = gb.cummax(axis=1)
667
+ expected = df[[0, 1]].astype(np.float64)
668
+ expected[2] = expected[1]
669
+ tm.assert_frame_equal(cmax, expected)
670
+
671
+
672
+ def test_non_unique_index():
673
+ # GH 16577
674
+ df = DataFrame(
675
+ {"A": [1.0, 2.0, 3.0, np.nan], "value": 1.0},
676
+ index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4,
677
+ )
678
+ result = df.groupby([df.index, "A"]).value.rank(ascending=True, pct=True)
679
+ expected = Series(
680
+ [1.0, 1.0, 1.0, np.nan],
681
+ index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4,
682
+ name="value",
683
+ )
684
+ tm.assert_series_equal(result, expected)
685
+
686
+
687
+ def test_rank_categorical():
688
+ cat = pd.Categorical(["a", "a", "b", np.nan, "c", "b"], ordered=True)
689
+ cat2 = pd.Categorical([1, 2, 3, np.nan, 4, 5], ordered=True)
690
+
691
+ df = DataFrame({"col1": [0, 1, 0, 1, 0, 1], "col2": cat, "col3": cat2})
692
+
693
+ gb = df.groupby("col1")
694
+
695
+ res = gb.rank()
696
+
697
+ expected = df.astype(object).groupby("col1").rank()
698
+ tm.assert_frame_equal(res, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/groupby/test_sample.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas import (
4
+ DataFrame,
5
+ Index,
6
+ Series,
7
+ )
8
+ import pandas._testing as tm
9
+
10
+
11
+ @pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])
12
+ def test_groupby_sample_balanced_groups_shape(n, frac):
13
+ values = [1] * 10 + [2] * 10
14
+ df = DataFrame({"a": values, "b": values})
15
+
16
+ result = df.groupby("a").sample(n=n, frac=frac)
17
+ values = [1] * 2 + [2] * 2
18
+ expected = DataFrame({"a": values, "b": values}, index=result.index)
19
+ tm.assert_frame_equal(result, expected)
20
+
21
+ result = df.groupby("a")["b"].sample(n=n, frac=frac)
22
+ expected = Series(values, name="b", index=result.index)
23
+ tm.assert_series_equal(result, expected)
24
+
25
+
26
+ def test_groupby_sample_unbalanced_groups_shape():
27
+ values = [1] * 10 + [2] * 20
28
+ df = DataFrame({"a": values, "b": values})
29
+
30
+ result = df.groupby("a").sample(n=5)
31
+ values = [1] * 5 + [2] * 5
32
+ expected = DataFrame({"a": values, "b": values}, index=result.index)
33
+ tm.assert_frame_equal(result, expected)
34
+
35
+ result = df.groupby("a")["b"].sample(n=5)
36
+ expected = Series(values, name="b", index=result.index)
37
+ tm.assert_series_equal(result, expected)
38
+
39
+
40
+ def test_groupby_sample_index_value_spans_groups():
41
+ values = [1] * 3 + [2] * 3
42
+ df = DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2])
43
+
44
+ result = df.groupby("a").sample(n=2)
45
+ values = [1] * 2 + [2] * 2
46
+ expected = DataFrame({"a": values, "b": values}, index=result.index)
47
+ tm.assert_frame_equal(result, expected)
48
+
49
+ result = df.groupby("a")["b"].sample(n=2)
50
+ expected = Series(values, name="b", index=result.index)
51
+ tm.assert_series_equal(result, expected)
52
+
53
+
54
+ def test_groupby_sample_n_and_frac_raises():
55
+ df = DataFrame({"a": [1, 2], "b": [1, 2]})
56
+ msg = "Please enter a value for `frac` OR `n`, not both"
57
+
58
+ with pytest.raises(ValueError, match=msg):
59
+ df.groupby("a").sample(n=1, frac=1.0)
60
+
61
+ with pytest.raises(ValueError, match=msg):
62
+ df.groupby("a")["b"].sample(n=1, frac=1.0)
63
+
64
+
65
+ def test_groupby_sample_frac_gt_one_without_replacement_raises():
66
+ df = DataFrame({"a": [1, 2], "b": [1, 2]})
67
+ msg = "Replace has to be set to `True` when upsampling the population `frac` > 1."
68
+
69
+ with pytest.raises(ValueError, match=msg):
70
+ df.groupby("a").sample(frac=1.5, replace=False)
71
+
72
+ with pytest.raises(ValueError, match=msg):
73
+ df.groupby("a")["b"].sample(frac=1.5, replace=False)
74
+
75
+
76
+ @pytest.mark.parametrize("n", [-1, 1.5])
77
+ def test_groupby_sample_invalid_n_raises(n):
78
+ df = DataFrame({"a": [1, 2], "b": [1, 2]})
79
+
80
+ if n < 0:
81
+ msg = "A negative number of rows requested. Please provide `n` >= 0."
82
+ else:
83
+ msg = "Only integers accepted as `n` values"
84
+
85
+ with pytest.raises(ValueError, match=msg):
86
+ df.groupby("a").sample(n=n)
87
+
88
+ with pytest.raises(ValueError, match=msg):
89
+ df.groupby("a")["b"].sample(n=n)
90
+
91
+
92
+ def test_groupby_sample_oversample():
93
+ values = [1] * 10 + [2] * 10
94
+ df = DataFrame({"a": values, "b": values})
95
+
96
+ result = df.groupby("a").sample(frac=2.0, replace=True)
97
+ values = [1] * 20 + [2] * 20
98
+ expected = DataFrame({"a": values, "b": values}, index=result.index)
99
+ tm.assert_frame_equal(result, expected)
100
+
101
+ result = df.groupby("a")["b"].sample(frac=2.0, replace=True)
102
+ expected = Series(values, name="b", index=result.index)
103
+ tm.assert_series_equal(result, expected)
104
+
105
+
106
+ def test_groupby_sample_without_n_or_frac():
107
+ values = [1] * 10 + [2] * 10
108
+ df = DataFrame({"a": values, "b": values})
109
+
110
+ result = df.groupby("a").sample(n=None, frac=None)
111
+ expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=result.index)
112
+ tm.assert_frame_equal(result, expected)
113
+
114
+ result = df.groupby("a")["b"].sample(n=None, frac=None)
115
+ expected = Series([1, 2], name="b", index=result.index)
116
+ tm.assert_series_equal(result, expected)
117
+
118
+
119
+ @pytest.mark.parametrize(
120
+ "index, expected_index",
121
+ [(["w", "x", "y", "z"], ["w", "w", "y", "y"]), ([3, 4, 5, 6], [3, 3, 5, 5])],
122
+ )
123
+ def test_groupby_sample_with_weights(index, expected_index):
124
+ # GH 39927 - tests for integer index needed
125
+ values = [1] * 2 + [2] * 2
126
+ df = DataFrame({"a": values, "b": values}, index=Index(index))
127
+
128
+ result = df.groupby("a").sample(n=2, replace=True, weights=[1, 0, 1, 0])
129
+ expected = DataFrame({"a": values, "b": values}, index=Index(expected_index))
130
+ tm.assert_frame_equal(result, expected)
131
+
132
+ result = df.groupby("a")["b"].sample(n=2, replace=True, weights=[1, 0, 1, 0])
133
+ expected = Series(values, name="b", index=Index(expected_index))
134
+ tm.assert_series_equal(result, expected)
135
+
136
+
137
+ def test_groupby_sample_with_selections():
138
+ # GH 39928
139
+ values = [1] * 10 + [2] * 10
140
+ df = DataFrame({"a": values, "b": values, "c": values})
141
+
142
+ result = df.groupby("a")[["b", "c"]].sample(n=None, frac=None)
143
+ expected = DataFrame({"b": [1, 2], "c": [1, 2]}, index=result.index)
144
+ tm.assert_frame_equal(result, expected)
145
+
146
+
147
+ def test_groupby_sample_with_empty_inputs():
148
+ # GH48459
149
+ df = DataFrame({"a": [], "b": []})
150
+ groupby_df = df.groupby("a")
151
+
152
+ result = groupby_df.sample()
153
+ expected = df
154
+ tm.assert_frame_equal(result, expected)