Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_apply_mutate.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_aggregate.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_cython.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_numba.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_other.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py +1672 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_cython.py +435 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_numba.py +392 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_other.py +675 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_corrwith.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_describe.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_groupby_shift_diff.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_is_monotonic.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_nlargest_nsmallest.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_nth.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_quantile.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_rank.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_sample.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_size.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_skew.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_value_counts.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_corrwith.py +24 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_describe.py +297 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_groupby_shift_diff.py +255 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_is_monotonic.py +78 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_nlargest_nsmallest.py +115 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_nth.py +921 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_quantile.py +496 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_rank.py +721 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_sample.py +154 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_size.py +130 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_skew.py +27 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_value_counts.py +1241 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/test_index_as_string.py +85 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/groupby/test_raises.py +716 -0
- parrot/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/feats.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/tensor_utils.py +144 -0
- parrot/lib/python3.10/site-packages/transformers/models/esm/tokenization_esm.py +143 -0
- parrot/lib/python3.10/site-packages/transformers/models/hubert/__init__.py +79 -0
- parrot/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/transformers/models/hubert/configuration_hubert.py +258 -0
- parrot/lib/python3.10/site-packages/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py +223 -0
- parrot/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py +249 -0
- parrot/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py +69 -0
- parrot/lib/python3.10/site-packages/transformers/models/hubert/modeling_hubert.py +1744 -0
- parrot/lib/python3.10/site-packages/transformers/models/hubert/modeling_tf_hubert.py +1673 -0
- parrot/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/__init__.cpython-310.pyc +0 -0
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/__pycache__/test_apply_mutate.cpython-310.pyc
ADDED
|
Binary file (5.08 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (186 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_aggregate.cpython-310.pyc
ADDED
|
Binary file (52.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_cython.cpython-310.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_numba.cpython-310.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__pycache__/test_other.cpython-310.pyc
ADDED
|
Binary file (20 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py
ADDED
|
@@ -0,0 +1,1672 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
test .agg behavior / note that .apply is tested generally in test_groupby.py
|
| 3 |
+
"""
|
| 4 |
+
import datetime
|
| 5 |
+
import functools
|
| 6 |
+
from functools import partial
|
| 7 |
+
import re
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import pytest
|
| 11 |
+
|
| 12 |
+
from pandas.errors import SpecificationError
|
| 13 |
+
|
| 14 |
+
from pandas.core.dtypes.common import is_integer_dtype
|
| 15 |
+
|
| 16 |
+
import pandas as pd
|
| 17 |
+
from pandas import (
|
| 18 |
+
DataFrame,
|
| 19 |
+
Index,
|
| 20 |
+
MultiIndex,
|
| 21 |
+
Series,
|
| 22 |
+
concat,
|
| 23 |
+
to_datetime,
|
| 24 |
+
)
|
| 25 |
+
import pandas._testing as tm
|
| 26 |
+
from pandas.core.groupby.grouper import Grouping
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def test_groupby_agg_no_extra_calls():
|
| 30 |
+
# GH#31760
|
| 31 |
+
df = DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]})
|
| 32 |
+
gb = df.groupby("key")["value"]
|
| 33 |
+
|
| 34 |
+
def dummy_func(x):
|
| 35 |
+
assert len(x) != 0
|
| 36 |
+
return x.sum()
|
| 37 |
+
|
| 38 |
+
gb.agg(dummy_func)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_agg_regression1(tsframe):
|
| 42 |
+
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
|
| 43 |
+
result = grouped.agg("mean")
|
| 44 |
+
expected = grouped.mean()
|
| 45 |
+
tm.assert_frame_equal(result, expected)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def test_agg_must_agg(df):
|
| 49 |
+
grouped = df.groupby("A")["C"]
|
| 50 |
+
|
| 51 |
+
msg = "Must produce aggregated value"
|
| 52 |
+
with pytest.raises(Exception, match=msg):
|
| 53 |
+
grouped.agg(lambda x: x.describe())
|
| 54 |
+
with pytest.raises(Exception, match=msg):
|
| 55 |
+
grouped.agg(lambda x: x.index[:2])
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def test_agg_ser_multi_key(df):
|
| 59 |
+
f = lambda x: x.sum()
|
| 60 |
+
results = df.C.groupby([df.A, df.B]).aggregate(f)
|
| 61 |
+
expected = df.groupby(["A", "B"]).sum()["C"]
|
| 62 |
+
tm.assert_series_equal(results, expected)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def test_groupby_aggregation_mixed_dtype():
|
| 66 |
+
# GH 6212
|
| 67 |
+
expected = DataFrame(
|
| 68 |
+
{
|
| 69 |
+
"v1": [5, 5, 7, np.nan, 3, 3, 4, 1],
|
| 70 |
+
"v2": [55, 55, 77, np.nan, 33, 33, 44, 11],
|
| 71 |
+
},
|
| 72 |
+
index=MultiIndex.from_tuples(
|
| 73 |
+
[
|
| 74 |
+
(1, 95),
|
| 75 |
+
(1, 99),
|
| 76 |
+
(2, 95),
|
| 77 |
+
(2, 99),
|
| 78 |
+
("big", "damp"),
|
| 79 |
+
("blue", "dry"),
|
| 80 |
+
("red", "red"),
|
| 81 |
+
("red", "wet"),
|
| 82 |
+
],
|
| 83 |
+
names=["by1", "by2"],
|
| 84 |
+
),
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
df = DataFrame(
|
| 88 |
+
{
|
| 89 |
+
"v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
|
| 90 |
+
"v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
|
| 91 |
+
"by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
|
| 92 |
+
"by2": [
|
| 93 |
+
"wet",
|
| 94 |
+
"dry",
|
| 95 |
+
99,
|
| 96 |
+
95,
|
| 97 |
+
np.nan,
|
| 98 |
+
"damp",
|
| 99 |
+
95,
|
| 100 |
+
99,
|
| 101 |
+
"red",
|
| 102 |
+
99,
|
| 103 |
+
np.nan,
|
| 104 |
+
np.nan,
|
| 105 |
+
],
|
| 106 |
+
}
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
g = df.groupby(["by1", "by2"])
|
| 110 |
+
result = g[["v1", "v2"]].mean()
|
| 111 |
+
tm.assert_frame_equal(result, expected)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def test_groupby_aggregation_multi_level_column():
|
| 115 |
+
# GH 29772
|
| 116 |
+
lst = [
|
| 117 |
+
[True, True, True, False],
|
| 118 |
+
[True, False, np.nan, False],
|
| 119 |
+
[True, True, np.nan, False],
|
| 120 |
+
[True, True, np.nan, False],
|
| 121 |
+
]
|
| 122 |
+
df = DataFrame(
|
| 123 |
+
data=lst,
|
| 124 |
+
columns=MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]),
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 128 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 129 |
+
gb = df.groupby(level=1, axis=1)
|
| 130 |
+
result = gb.sum(numeric_only=False)
|
| 131 |
+
expected = DataFrame({0: [2.0, True, True, True], 1: [1, 0, 1, 1]})
|
| 132 |
+
|
| 133 |
+
tm.assert_frame_equal(result, expected)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def test_agg_apply_corner(ts, tsframe):
|
| 137 |
+
# nothing to group, all NA
|
| 138 |
+
grouped = ts.groupby(ts * np.nan, group_keys=False)
|
| 139 |
+
assert ts.dtype == np.float64
|
| 140 |
+
|
| 141 |
+
# groupby float64 values results in a float64 Index
|
| 142 |
+
exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64))
|
| 143 |
+
tm.assert_series_equal(grouped.sum(), exp)
|
| 144 |
+
tm.assert_series_equal(grouped.agg("sum"), exp)
|
| 145 |
+
tm.assert_series_equal(grouped.apply("sum"), exp, check_index_type=False)
|
| 146 |
+
|
| 147 |
+
# DataFrame
|
| 148 |
+
grouped = tsframe.groupby(tsframe["A"] * np.nan, group_keys=False)
|
| 149 |
+
exp_df = DataFrame(
|
| 150 |
+
columns=tsframe.columns,
|
| 151 |
+
dtype=float,
|
| 152 |
+
index=Index([], name="A", dtype=np.float64),
|
| 153 |
+
)
|
| 154 |
+
tm.assert_frame_equal(grouped.sum(), exp_df)
|
| 155 |
+
tm.assert_frame_equal(grouped.agg("sum"), exp_df)
|
| 156 |
+
|
| 157 |
+
msg = "The behavior of DataFrame.sum with axis=None is deprecated"
|
| 158 |
+
with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
|
| 159 |
+
res = grouped.apply(np.sum)
|
| 160 |
+
tm.assert_frame_equal(res, exp_df)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def test_agg_grouping_is_list_tuple(ts):
|
| 164 |
+
df = DataFrame(
|
| 165 |
+
np.random.default_rng(2).standard_normal((30, 4)),
|
| 166 |
+
columns=Index(list("ABCD"), dtype=object),
|
| 167 |
+
index=pd.date_range("2000-01-01", periods=30, freq="B"),
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
grouped = df.groupby(lambda x: x.year)
|
| 171 |
+
grouper = grouped._grouper.groupings[0].grouping_vector
|
| 172 |
+
grouped._grouper.groupings[0] = Grouping(ts.index, list(grouper))
|
| 173 |
+
|
| 174 |
+
result = grouped.agg("mean")
|
| 175 |
+
expected = grouped.mean()
|
| 176 |
+
tm.assert_frame_equal(result, expected)
|
| 177 |
+
|
| 178 |
+
grouped._grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
|
| 179 |
+
|
| 180 |
+
result = grouped.agg("mean")
|
| 181 |
+
expected = grouped.mean()
|
| 182 |
+
tm.assert_frame_equal(result, expected)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def test_agg_python_multiindex(multiindex_dataframe_random_data):
|
| 186 |
+
grouped = multiindex_dataframe_random_data.groupby(["A", "B"])
|
| 187 |
+
|
| 188 |
+
result = grouped.agg("mean")
|
| 189 |
+
expected = grouped.mean()
|
| 190 |
+
tm.assert_frame_equal(result, expected)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
@pytest.mark.parametrize(
|
| 194 |
+
"groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]
|
| 195 |
+
)
|
| 196 |
+
def test_aggregate_str_func(tsframe, groupbyfunc):
|
| 197 |
+
grouped = tsframe.groupby(groupbyfunc)
|
| 198 |
+
|
| 199 |
+
# single series
|
| 200 |
+
result = grouped["A"].agg("std")
|
| 201 |
+
expected = grouped["A"].std()
|
| 202 |
+
tm.assert_series_equal(result, expected)
|
| 203 |
+
|
| 204 |
+
# group frame by function name
|
| 205 |
+
result = grouped.aggregate("var")
|
| 206 |
+
expected = grouped.var()
|
| 207 |
+
tm.assert_frame_equal(result, expected)
|
| 208 |
+
|
| 209 |
+
# group frame by function dict
|
| 210 |
+
result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"})
|
| 211 |
+
expected = DataFrame(
|
| 212 |
+
{
|
| 213 |
+
"A": grouped["A"].var(),
|
| 214 |
+
"B": grouped["B"].std(),
|
| 215 |
+
"C": grouped["C"].mean(),
|
| 216 |
+
"D": grouped["D"].sem(),
|
| 217 |
+
}
|
| 218 |
+
)
|
| 219 |
+
tm.assert_frame_equal(result, expected)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def test_std_masked_dtype(any_numeric_ea_dtype):
|
| 223 |
+
# GH#35516
|
| 224 |
+
df = DataFrame(
|
| 225 |
+
{
|
| 226 |
+
"a": [2, 1, 1, 1, 2, 2, 1],
|
| 227 |
+
"b": Series([pd.NA, 1, 2, 1, 1, 1, 2], dtype="Float64"),
|
| 228 |
+
}
|
| 229 |
+
)
|
| 230 |
+
result = df.groupby("a").std()
|
| 231 |
+
expected = DataFrame(
|
| 232 |
+
{"b": [0.57735, 0]}, index=Index([1, 2], name="a"), dtype="Float64"
|
| 233 |
+
)
|
| 234 |
+
tm.assert_frame_equal(result, expected)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def test_agg_str_with_kwarg_axis_1_raises(df, reduction_func):
|
| 238 |
+
gb = df.groupby(level=0)
|
| 239 |
+
warn_msg = f"DataFrameGroupBy.{reduction_func} with axis=1 is deprecated"
|
| 240 |
+
if reduction_func in ("idxmax", "idxmin"):
|
| 241 |
+
error = TypeError
|
| 242 |
+
msg = "'[<>]' not supported between instances of 'float' and 'str'"
|
| 243 |
+
warn = FutureWarning
|
| 244 |
+
else:
|
| 245 |
+
error = ValueError
|
| 246 |
+
msg = f"Operation {reduction_func} does not support axis=1"
|
| 247 |
+
warn = None
|
| 248 |
+
with pytest.raises(error, match=msg):
|
| 249 |
+
with tm.assert_produces_warning(warn, match=warn_msg):
|
| 250 |
+
gb.agg(reduction_func, axis=1)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
@pytest.mark.parametrize(
|
| 254 |
+
"func, expected, dtype, result_dtype_dict",
|
| 255 |
+
[
|
| 256 |
+
("sum", [5, 7, 9], "int64", {}),
|
| 257 |
+
("std", [4.5**0.5] * 3, int, {"i": float, "j": float, "k": float}),
|
| 258 |
+
("var", [4.5] * 3, int, {"i": float, "j": float, "k": float}),
|
| 259 |
+
("sum", [5, 7, 9], "Int64", {"j": "int64"}),
|
| 260 |
+
("std", [4.5**0.5] * 3, "Int64", {"i": float, "j": float, "k": float}),
|
| 261 |
+
("var", [4.5] * 3, "Int64", {"i": "float64", "j": "float64", "k": "float64"}),
|
| 262 |
+
],
|
| 263 |
+
)
|
| 264 |
+
def test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype_dict):
|
| 265 |
+
# GH#43209
|
| 266 |
+
df = DataFrame(
|
| 267 |
+
[[1, 2, 3, 4, 5, 6]] * 3,
|
| 268 |
+
columns=MultiIndex.from_product([["a", "b"], ["i", "j", "k"]]),
|
| 269 |
+
).astype({("a", "j"): dtype, ("b", "j"): dtype})
|
| 270 |
+
|
| 271 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 272 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 273 |
+
gb = df.groupby(level=1, axis=1)
|
| 274 |
+
result = gb.agg(func)
|
| 275 |
+
expected = DataFrame([expected] * 3, columns=["i", "j", "k"]).astype(
|
| 276 |
+
result_dtype_dict
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
tm.assert_frame_equal(result, expected)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@pytest.mark.parametrize(
|
| 283 |
+
"func, expected_data, result_dtype_dict",
|
| 284 |
+
[
|
| 285 |
+
("sum", [[2, 4], [10, 12], [18, 20]], {10: "int64", 20: "int64"}),
|
| 286 |
+
# std should ideally return Int64 / Float64 #43330
|
| 287 |
+
("std", [[2**0.5] * 2] * 3, "float64"),
|
| 288 |
+
("var", [[2] * 2] * 3, {10: "float64", 20: "float64"}),
|
| 289 |
+
],
|
| 290 |
+
)
|
| 291 |
+
def test_groupby_mixed_cols_axis1(func, expected_data, result_dtype_dict):
|
| 292 |
+
# GH#43209
|
| 293 |
+
df = DataFrame(
|
| 294 |
+
np.arange(12).reshape(3, 4),
|
| 295 |
+
index=Index([0, 1, 0], name="y"),
|
| 296 |
+
columns=Index([10, 20, 10, 20], name="x"),
|
| 297 |
+
dtype="int64",
|
| 298 |
+
).astype({10: "Int64"})
|
| 299 |
+
|
| 300 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 301 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 302 |
+
gb = df.groupby("x", axis=1)
|
| 303 |
+
result = gb.agg(func)
|
| 304 |
+
expected = DataFrame(
|
| 305 |
+
data=expected_data,
|
| 306 |
+
index=Index([0, 1, 0], name="y"),
|
| 307 |
+
columns=Index([10, 20], name="x"),
|
| 308 |
+
).astype(result_dtype_dict)
|
| 309 |
+
tm.assert_frame_equal(result, expected)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def test_aggregate_item_by_item(df):
|
| 313 |
+
grouped = df.groupby("A")
|
| 314 |
+
|
| 315 |
+
aggfun_0 = lambda ser: ser.size
|
| 316 |
+
result = grouped.agg(aggfun_0)
|
| 317 |
+
foosum = (df.A == "foo").sum()
|
| 318 |
+
barsum = (df.A == "bar").sum()
|
| 319 |
+
K = len(result.columns)
|
| 320 |
+
|
| 321 |
+
# GH5782
|
| 322 |
+
exp = Series(np.array([foosum] * K), index=list("BCD"), name="foo")
|
| 323 |
+
tm.assert_series_equal(result.xs("foo"), exp)
|
| 324 |
+
|
| 325 |
+
exp = Series(np.array([barsum] * K), index=list("BCD"), name="bar")
|
| 326 |
+
tm.assert_almost_equal(result.xs("bar"), exp)
|
| 327 |
+
|
| 328 |
+
def aggfun_1(ser):
|
| 329 |
+
return ser.size
|
| 330 |
+
|
| 331 |
+
result = DataFrame().groupby(df.A).agg(aggfun_1)
|
| 332 |
+
assert isinstance(result, DataFrame)
|
| 333 |
+
assert len(result) == 0
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def test_wrap_agg_out(three_group):
|
| 337 |
+
grouped = three_group.groupby(["A", "B"])
|
| 338 |
+
|
| 339 |
+
def func(ser):
|
| 340 |
+
if ser.dtype == object:
|
| 341 |
+
raise TypeError("Test error message")
|
| 342 |
+
return ser.sum()
|
| 343 |
+
|
| 344 |
+
with pytest.raises(TypeError, match="Test error message"):
|
| 345 |
+
grouped.aggregate(func)
|
| 346 |
+
result = grouped[["D", "E", "F"]].aggregate(func)
|
| 347 |
+
exp_grouped = three_group.loc[:, ["A", "B", "D", "E", "F"]]
|
| 348 |
+
expected = exp_grouped.groupby(["A", "B"]).aggregate(func)
|
| 349 |
+
tm.assert_frame_equal(result, expected)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def test_agg_multiple_functions_maintain_order(df):
|
| 353 |
+
# GH #610
|
| 354 |
+
funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)]
|
| 355 |
+
msg = "is currently using SeriesGroupBy.mean"
|
| 356 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 357 |
+
result = df.groupby("A")["C"].agg(funcs)
|
| 358 |
+
exp_cols = Index(["mean", "max", "min"])
|
| 359 |
+
|
| 360 |
+
tm.assert_index_equal(result.columns, exp_cols)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def test_series_index_name(df):
|
| 364 |
+
grouped = df.loc[:, ["C"]].groupby(df["A"])
|
| 365 |
+
result = grouped.agg(lambda x: x.mean())
|
| 366 |
+
assert result.index.name == "A"
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def test_agg_multiple_functions_same_name():
|
| 370 |
+
# GH 30880
|
| 371 |
+
df = DataFrame(
|
| 372 |
+
np.random.default_rng(2).standard_normal((1000, 3)),
|
| 373 |
+
index=pd.date_range("1/1/2012", freq="s", periods=1000),
|
| 374 |
+
columns=["A", "B", "C"],
|
| 375 |
+
)
|
| 376 |
+
result = df.resample("3min").agg(
|
| 377 |
+
{"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
|
| 378 |
+
)
|
| 379 |
+
expected_index = pd.date_range("1/1/2012", freq="3min", periods=6)
|
| 380 |
+
expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")])
|
| 381 |
+
expected_values = np.array(
|
| 382 |
+
[df.resample("3min").A.quantile(q=q).values for q in [0.9999, 0.1111]]
|
| 383 |
+
).T
|
| 384 |
+
expected = DataFrame(
|
| 385 |
+
expected_values, columns=expected_columns, index=expected_index
|
| 386 |
+
)
|
| 387 |
+
tm.assert_frame_equal(result, expected)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def test_agg_multiple_functions_same_name_with_ohlc_present():
|
| 391 |
+
# GH 30880
|
| 392 |
+
# ohlc expands dimensions, so different test to the above is required.
|
| 393 |
+
df = DataFrame(
|
| 394 |
+
np.random.default_rng(2).standard_normal((1000, 3)),
|
| 395 |
+
index=pd.date_range("1/1/2012", freq="s", periods=1000, name="dti"),
|
| 396 |
+
columns=Index(["A", "B", "C"], name="alpha"),
|
| 397 |
+
)
|
| 398 |
+
result = df.resample("3min").agg(
|
| 399 |
+
{"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
|
| 400 |
+
)
|
| 401 |
+
expected_index = pd.date_range("1/1/2012", freq="3min", periods=6, name="dti")
|
| 402 |
+
expected_columns = MultiIndex.from_tuples(
|
| 403 |
+
[
|
| 404 |
+
("A", "ohlc", "open"),
|
| 405 |
+
("A", "ohlc", "high"),
|
| 406 |
+
("A", "ohlc", "low"),
|
| 407 |
+
("A", "ohlc", "close"),
|
| 408 |
+
("A", "quantile", "A"),
|
| 409 |
+
("A", "quantile", "A"),
|
| 410 |
+
],
|
| 411 |
+
names=["alpha", None, None],
|
| 412 |
+
)
|
| 413 |
+
non_ohlc_expected_values = np.array(
|
| 414 |
+
[df.resample("3min").A.quantile(q=q).values for q in [0.9999, 0.1111]]
|
| 415 |
+
).T
|
| 416 |
+
expected_values = np.hstack(
|
| 417 |
+
[df.resample("3min").A.ohlc(), non_ohlc_expected_values]
|
| 418 |
+
)
|
| 419 |
+
expected = DataFrame(
|
| 420 |
+
expected_values, columns=expected_columns, index=expected_index
|
| 421 |
+
)
|
| 422 |
+
tm.assert_frame_equal(result, expected)
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def test_multiple_functions_tuples_and_non_tuples(df):
|
| 426 |
+
# #1359
|
| 427 |
+
# Columns B and C would cause partial failure
|
| 428 |
+
df = df.drop(columns=["B", "C"])
|
| 429 |
+
|
| 430 |
+
funcs = [("foo", "mean"), "std"]
|
| 431 |
+
ex_funcs = [("foo", "mean"), ("std", "std")]
|
| 432 |
+
|
| 433 |
+
result = df.groupby("A")["D"].agg(funcs)
|
| 434 |
+
expected = df.groupby("A")["D"].agg(ex_funcs)
|
| 435 |
+
tm.assert_frame_equal(result, expected)
|
| 436 |
+
|
| 437 |
+
result = df.groupby("A").agg(funcs)
|
| 438 |
+
expected = df.groupby("A").agg(ex_funcs)
|
| 439 |
+
tm.assert_frame_equal(result, expected)
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def test_more_flexible_frame_multi_function(df):
|
| 443 |
+
grouped = df.groupby("A")
|
| 444 |
+
|
| 445 |
+
exmean = grouped.agg({"C": "mean", "D": "mean"})
|
| 446 |
+
exstd = grouped.agg({"C": "std", "D": "std"})
|
| 447 |
+
|
| 448 |
+
expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)
|
| 449 |
+
expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
|
| 450 |
+
|
| 451 |
+
d = {"C": ["mean", "std"], "D": ["mean", "std"]}
|
| 452 |
+
result = grouped.aggregate(d)
|
| 453 |
+
|
| 454 |
+
tm.assert_frame_equal(result, expected)
|
| 455 |
+
|
| 456 |
+
# be careful
|
| 457 |
+
result = grouped.aggregate({"C": "mean", "D": ["mean", "std"]})
|
| 458 |
+
expected = grouped.aggregate({"C": "mean", "D": ["mean", "std"]})
|
| 459 |
+
tm.assert_frame_equal(result, expected)
|
| 460 |
+
|
| 461 |
+
def numpymean(x):
|
| 462 |
+
return np.mean(x)
|
| 463 |
+
|
| 464 |
+
def numpystd(x):
|
| 465 |
+
return np.std(x, ddof=1)
|
| 466 |
+
|
| 467 |
+
# this uses column selection & renaming
|
| 468 |
+
msg = r"nested renamer is not supported"
|
| 469 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 470 |
+
d = {"C": "mean", "D": {"foo": "mean", "bar": "std"}}
|
| 471 |
+
grouped.aggregate(d)
|
| 472 |
+
|
| 473 |
+
# But without renaming, these functions are OK
|
| 474 |
+
d = {"C": ["mean"], "D": [numpymean, numpystd]}
|
| 475 |
+
grouped.aggregate(d)
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def test_multi_function_flexible_mix(df):
|
| 479 |
+
# GH #1268
|
| 480 |
+
grouped = df.groupby("A")
|
| 481 |
+
|
| 482 |
+
# Expected
|
| 483 |
+
d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}}
|
| 484 |
+
# this uses column selection & renaming
|
| 485 |
+
msg = r"nested renamer is not supported"
|
| 486 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 487 |
+
grouped.aggregate(d)
|
| 488 |
+
|
| 489 |
+
# Test 1
|
| 490 |
+
d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
|
| 491 |
+
# this uses column selection & renaming
|
| 492 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 493 |
+
grouped.aggregate(d)
|
| 494 |
+
|
| 495 |
+
# Test 2
|
| 496 |
+
d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
|
| 497 |
+
# this uses column selection & renaming
|
| 498 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 499 |
+
grouped.aggregate(d)
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def test_groupby_agg_coercing_bools():
|
| 503 |
+
# issue 14873
|
| 504 |
+
dat = DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]})
|
| 505 |
+
gp = dat.groupby("a")
|
| 506 |
+
|
| 507 |
+
index = Index([1, 2], name="a")
|
| 508 |
+
|
| 509 |
+
result = gp["b"].aggregate(lambda x: (x != 0).all())
|
| 510 |
+
expected = Series([False, True], index=index, name="b")
|
| 511 |
+
tm.assert_series_equal(result, expected)
|
| 512 |
+
|
| 513 |
+
result = gp["c"].aggregate(lambda x: x.isnull().all())
|
| 514 |
+
expected = Series([True, False], index=index, name="c")
|
| 515 |
+
tm.assert_series_equal(result, expected)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
def test_groupby_agg_dict_with_getitem():
|
| 519 |
+
# issue 25471
|
| 520 |
+
dat = DataFrame({"A": ["A", "A", "B", "B", "B"], "B": [1, 2, 1, 1, 2]})
|
| 521 |
+
result = dat.groupby("A")[["B"]].agg({"B": "sum"})
|
| 522 |
+
|
| 523 |
+
expected = DataFrame({"B": [3, 4]}, index=["A", "B"]).rename_axis("A", axis=0)
|
| 524 |
+
|
| 525 |
+
tm.assert_frame_equal(result, expected)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def test_groupby_agg_dict_dup_columns():
|
| 529 |
+
# GH#55006
|
| 530 |
+
df = DataFrame(
|
| 531 |
+
[[1, 2, 3, 4], [1, 3, 4, 5], [2, 4, 5, 6]],
|
| 532 |
+
columns=["a", "b", "c", "c"],
|
| 533 |
+
)
|
| 534 |
+
gb = df.groupby("a")
|
| 535 |
+
result = gb.agg({"b": "sum"})
|
| 536 |
+
expected = DataFrame({"b": [5, 4]}, index=Index([1, 2], name="a"))
|
| 537 |
+
tm.assert_frame_equal(result, expected)
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
@pytest.mark.parametrize(
|
| 541 |
+
"op",
|
| 542 |
+
[
|
| 543 |
+
lambda x: x.sum(),
|
| 544 |
+
lambda x: x.cumsum(),
|
| 545 |
+
lambda x: x.transform("sum"),
|
| 546 |
+
lambda x: x.transform("cumsum"),
|
| 547 |
+
lambda x: x.agg("sum"),
|
| 548 |
+
lambda x: x.agg("cumsum"),
|
| 549 |
+
],
|
| 550 |
+
)
|
| 551 |
+
def test_bool_agg_dtype(op):
|
| 552 |
+
# GH 7001
|
| 553 |
+
# Bool sum aggregations result in int
|
| 554 |
+
df = DataFrame({"a": [1, 1], "b": [False, True]})
|
| 555 |
+
s = df.set_index("a")["b"]
|
| 556 |
+
|
| 557 |
+
result = op(df.groupby("a"))["b"].dtype
|
| 558 |
+
assert is_integer_dtype(result)
|
| 559 |
+
|
| 560 |
+
result = op(s.groupby("a")).dtype
|
| 561 |
+
assert is_integer_dtype(result)
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
@pytest.mark.parametrize(
|
| 565 |
+
"keys, agg_index",
|
| 566 |
+
[
|
| 567 |
+
(["a"], Index([1], name="a")),
|
| 568 |
+
(["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
|
| 569 |
+
],
|
| 570 |
+
)
|
| 571 |
+
@pytest.mark.parametrize(
|
| 572 |
+
"input_dtype", ["bool", "int32", "int64", "float32", "float64"]
|
| 573 |
+
)
|
| 574 |
+
@pytest.mark.parametrize(
|
| 575 |
+
"result_dtype", ["bool", "int32", "int64", "float32", "float64"]
|
| 576 |
+
)
|
| 577 |
+
@pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
|
| 578 |
+
def test_callable_result_dtype_frame(
|
| 579 |
+
keys, agg_index, input_dtype, result_dtype, method
|
| 580 |
+
):
|
| 581 |
+
# GH 21240
|
| 582 |
+
df = DataFrame({"a": [1], "b": [2], "c": [True]})
|
| 583 |
+
df["c"] = df["c"].astype(input_dtype)
|
| 584 |
+
op = getattr(df.groupby(keys)[["c"]], method)
|
| 585 |
+
result = op(lambda x: x.astype(result_dtype).iloc[0])
|
| 586 |
+
expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
|
| 587 |
+
expected = DataFrame({"c": [df["c"].iloc[0]]}, index=expected_index).astype(
|
| 588 |
+
result_dtype
|
| 589 |
+
)
|
| 590 |
+
if method == "apply":
|
| 591 |
+
expected.columns.names = [0]
|
| 592 |
+
tm.assert_frame_equal(result, expected)
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
@pytest.mark.parametrize(
|
| 596 |
+
"keys, agg_index",
|
| 597 |
+
[
|
| 598 |
+
(["a"], Index([1], name="a")),
|
| 599 |
+
(["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
|
| 600 |
+
],
|
| 601 |
+
)
|
| 602 |
+
@pytest.mark.parametrize("input", [True, 1, 1.0])
|
| 603 |
+
@pytest.mark.parametrize("dtype", [bool, int, float])
|
| 604 |
+
@pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
|
| 605 |
+
def test_callable_result_dtype_series(keys, agg_index, input, dtype, method):
|
| 606 |
+
# GH 21240
|
| 607 |
+
df = DataFrame({"a": [1], "b": [2], "c": [input]})
|
| 608 |
+
op = getattr(df.groupby(keys)["c"], method)
|
| 609 |
+
result = op(lambda x: x.astype(dtype).iloc[0])
|
| 610 |
+
expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
|
| 611 |
+
expected = Series([df["c"].iloc[0]], index=expected_index, name="c").astype(dtype)
|
| 612 |
+
tm.assert_series_equal(result, expected)
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
def test_order_aggregate_multiple_funcs():
|
| 616 |
+
# GH 25692
|
| 617 |
+
df = DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]})
|
| 618 |
+
|
| 619 |
+
res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"])
|
| 620 |
+
result = res.columns.levels[1]
|
| 621 |
+
|
| 622 |
+
expected = Index(["sum", "max", "mean", "ohlc", "min"])
|
| 623 |
+
|
| 624 |
+
tm.assert_index_equal(result, expected)
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def test_ohlc_ea_dtypes(any_numeric_ea_dtype):
|
| 628 |
+
# GH#37493
|
| 629 |
+
df = DataFrame(
|
| 630 |
+
{"a": [1, 1, 2, 3, 4, 4], "b": [22, 11, pd.NA, 10, 20, pd.NA]},
|
| 631 |
+
dtype=any_numeric_ea_dtype,
|
| 632 |
+
)
|
| 633 |
+
gb = df.groupby("a")
|
| 634 |
+
result = gb.ohlc()
|
| 635 |
+
expected = DataFrame(
|
| 636 |
+
[[22, 22, 11, 11], [pd.NA] * 4, [10] * 4, [20] * 4],
|
| 637 |
+
columns=MultiIndex.from_product([["b"], ["open", "high", "low", "close"]]),
|
| 638 |
+
index=Index([1, 2, 3, 4], dtype=any_numeric_ea_dtype, name="a"),
|
| 639 |
+
dtype=any_numeric_ea_dtype,
|
| 640 |
+
)
|
| 641 |
+
tm.assert_frame_equal(result, expected)
|
| 642 |
+
|
| 643 |
+
gb2 = df.groupby("a", as_index=False)
|
| 644 |
+
result2 = gb2.ohlc()
|
| 645 |
+
expected2 = expected.reset_index()
|
| 646 |
+
tm.assert_frame_equal(result2, expected2)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
@pytest.mark.parametrize("dtype", [np.int64, np.uint64])
|
| 650 |
+
@pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"])
|
| 651 |
+
def test_uint64_type_handling(dtype, how):
|
| 652 |
+
# GH 26310
|
| 653 |
+
df = DataFrame({"x": 6903052872240755750, "y": [1, 2]})
|
| 654 |
+
expected = df.groupby("y").agg({"x": how})
|
| 655 |
+
df.x = df.x.astype(dtype)
|
| 656 |
+
result = df.groupby("y").agg({"x": how})
|
| 657 |
+
if how not in ("mean", "median"):
|
| 658 |
+
# mean and median always result in floats
|
| 659 |
+
result.x = result.x.astype(np.int64)
|
| 660 |
+
tm.assert_frame_equal(result, expected, check_exact=True)
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
def test_func_duplicates_raises():
|
| 664 |
+
# GH28426
|
| 665 |
+
msg = "Function names"
|
| 666 |
+
df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
|
| 667 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 668 |
+
df.groupby("A").agg(["min", "min"])
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
@pytest.mark.parametrize(
|
| 672 |
+
"index",
|
| 673 |
+
[
|
| 674 |
+
pd.CategoricalIndex(list("abc")),
|
| 675 |
+
pd.interval_range(0, 3),
|
| 676 |
+
pd.period_range("2020", periods=3, freq="D"),
|
| 677 |
+
MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
|
| 678 |
+
],
|
| 679 |
+
)
|
| 680 |
+
def test_agg_index_has_complex_internals(index):
|
| 681 |
+
# GH 31223
|
| 682 |
+
df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
|
| 683 |
+
result = df.groupby("group").agg({"value": Series.nunique})
|
| 684 |
+
expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group")
|
| 685 |
+
tm.assert_frame_equal(result, expected)
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
def test_agg_split_block():
|
| 689 |
+
# https://github.com/pandas-dev/pandas/issues/31522
|
| 690 |
+
df = DataFrame(
|
| 691 |
+
{
|
| 692 |
+
"key1": ["a", "a", "b", "b", "a"],
|
| 693 |
+
"key2": ["one", "two", "one", "two", "one"],
|
| 694 |
+
"key3": ["three", "three", "three", "six", "six"],
|
| 695 |
+
}
|
| 696 |
+
)
|
| 697 |
+
result = df.groupby("key1").min()
|
| 698 |
+
expected = DataFrame(
|
| 699 |
+
{"key2": ["one", "one"], "key3": ["six", "six"]},
|
| 700 |
+
index=Index(["a", "b"], name="key1"),
|
| 701 |
+
)
|
| 702 |
+
tm.assert_frame_equal(result, expected)
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
def test_agg_split_object_part_datetime():
|
| 706 |
+
# https://github.com/pandas-dev/pandas/pull/31616
|
| 707 |
+
df = DataFrame(
|
| 708 |
+
{
|
| 709 |
+
"A": pd.date_range("2000", periods=4),
|
| 710 |
+
"B": ["a", "b", "c", "d"],
|
| 711 |
+
"C": [1, 2, 3, 4],
|
| 712 |
+
"D": ["b", "c", "d", "e"],
|
| 713 |
+
"E": pd.date_range("2000", periods=4),
|
| 714 |
+
"F": [1, 2, 3, 4],
|
| 715 |
+
}
|
| 716 |
+
).astype(object)
|
| 717 |
+
result = df.groupby([0, 0, 0, 0]).min()
|
| 718 |
+
expected = DataFrame(
|
| 719 |
+
{
|
| 720 |
+
"A": [pd.Timestamp("2000")],
|
| 721 |
+
"B": ["a"],
|
| 722 |
+
"C": [1],
|
| 723 |
+
"D": ["b"],
|
| 724 |
+
"E": [pd.Timestamp("2000")],
|
| 725 |
+
"F": [1],
|
| 726 |
+
},
|
| 727 |
+
index=np.array([0]),
|
| 728 |
+
dtype=object,
|
| 729 |
+
)
|
| 730 |
+
tm.assert_frame_equal(result, expected)
|
| 731 |
+
|
| 732 |
+
|
| 733 |
+
class TestNamedAggregationSeries:
|
| 734 |
+
def test_series_named_agg(self):
|
| 735 |
+
df = Series([1, 2, 3, 4])
|
| 736 |
+
gr = df.groupby([0, 0, 1, 1])
|
| 737 |
+
result = gr.agg(a="sum", b="min")
|
| 738 |
+
expected = DataFrame(
|
| 739 |
+
{"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=np.array([0, 1])
|
| 740 |
+
)
|
| 741 |
+
tm.assert_frame_equal(result, expected)
|
| 742 |
+
|
| 743 |
+
result = gr.agg(b="min", a="sum")
|
| 744 |
+
expected = expected[["b", "a"]]
|
| 745 |
+
tm.assert_frame_equal(result, expected)
|
| 746 |
+
|
| 747 |
+
def test_no_args_raises(self):
|
| 748 |
+
gr = Series([1, 2]).groupby([0, 1])
|
| 749 |
+
with pytest.raises(TypeError, match="Must provide"):
|
| 750 |
+
gr.agg()
|
| 751 |
+
|
| 752 |
+
# but we do allow this
|
| 753 |
+
result = gr.agg([])
|
| 754 |
+
expected = DataFrame(columns=[])
|
| 755 |
+
tm.assert_frame_equal(result, expected)
|
| 756 |
+
|
| 757 |
+
def test_series_named_agg_duplicates_no_raises(self):
|
| 758 |
+
# GH28426
|
| 759 |
+
gr = Series([1, 2, 3]).groupby([0, 0, 1])
|
| 760 |
+
grouped = gr.agg(a="sum", b="sum")
|
| 761 |
+
expected = DataFrame({"a": [3, 3], "b": [3, 3]}, index=np.array([0, 1]))
|
| 762 |
+
tm.assert_frame_equal(expected, grouped)
|
| 763 |
+
|
| 764 |
+
def test_mangled(self):
|
| 765 |
+
gr = Series([1, 2, 3]).groupby([0, 0, 1])
|
| 766 |
+
result = gr.agg(a=lambda x: 0, b=lambda x: 1)
|
| 767 |
+
expected = DataFrame({"a": [0, 0], "b": [1, 1]}, index=np.array([0, 1]))
|
| 768 |
+
tm.assert_frame_equal(result, expected)
|
| 769 |
+
|
| 770 |
+
@pytest.mark.parametrize(
|
| 771 |
+
"inp",
|
| 772 |
+
[
|
| 773 |
+
pd.NamedAgg(column="anything", aggfunc="min"),
|
| 774 |
+
("anything", "min"),
|
| 775 |
+
["anything", "min"],
|
| 776 |
+
],
|
| 777 |
+
)
|
| 778 |
+
def test_named_agg_nametuple(self, inp):
|
| 779 |
+
# GH34422
|
| 780 |
+
s = Series([1, 1, 2, 2, 3, 3, 4, 5])
|
| 781 |
+
msg = f"func is expected but received {type(inp).__name__}"
|
| 782 |
+
with pytest.raises(TypeError, match=msg):
|
| 783 |
+
s.groupby(s.values).agg(a=inp)
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
class TestNamedAggregationDataFrame:
|
| 787 |
+
def test_agg_relabel(self):
|
| 788 |
+
df = DataFrame(
|
| 789 |
+
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
|
| 790 |
+
)
|
| 791 |
+
result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max"))
|
| 792 |
+
expected = DataFrame(
|
| 793 |
+
{"a_max": [1, 3], "b_max": [6, 8]},
|
| 794 |
+
index=Index(["a", "b"], name="group"),
|
| 795 |
+
columns=["a_max", "b_max"],
|
| 796 |
+
)
|
| 797 |
+
tm.assert_frame_equal(result, expected)
|
| 798 |
+
|
| 799 |
+
# order invariance
|
| 800 |
+
p98 = functools.partial(np.percentile, q=98)
|
| 801 |
+
result = df.groupby("group").agg(
|
| 802 |
+
b_min=("B", "min"),
|
| 803 |
+
a_min=("A", "min"),
|
| 804 |
+
a_mean=("A", "mean"),
|
| 805 |
+
a_max=("A", "max"),
|
| 806 |
+
b_max=("B", "max"),
|
| 807 |
+
a_98=("A", p98),
|
| 808 |
+
)
|
| 809 |
+
expected = DataFrame(
|
| 810 |
+
{
|
| 811 |
+
"b_min": [5, 7],
|
| 812 |
+
"a_min": [0, 2],
|
| 813 |
+
"a_mean": [0.5, 2.5],
|
| 814 |
+
"a_max": [1, 3],
|
| 815 |
+
"b_max": [6, 8],
|
| 816 |
+
"a_98": [0.98, 2.98],
|
| 817 |
+
},
|
| 818 |
+
index=Index(["a", "b"], name="group"),
|
| 819 |
+
columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"],
|
| 820 |
+
)
|
| 821 |
+
tm.assert_frame_equal(result, expected)
|
| 822 |
+
|
| 823 |
+
def test_agg_relabel_non_identifier(self):
|
| 824 |
+
df = DataFrame(
|
| 825 |
+
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
|
| 826 |
+
)
|
| 827 |
+
|
| 828 |
+
result = df.groupby("group").agg(**{"my col": ("A", "max")})
|
| 829 |
+
expected = DataFrame({"my col": [1, 3]}, index=Index(["a", "b"], name="group"))
|
| 830 |
+
tm.assert_frame_equal(result, expected)
|
| 831 |
+
|
| 832 |
+
def test_duplicate_no_raises(self):
|
| 833 |
+
# GH 28426, if use same input function on same column,
|
| 834 |
+
# no error should raise
|
| 835 |
+
df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
|
| 836 |
+
|
| 837 |
+
grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min"))
|
| 838 |
+
expected = DataFrame({"a": [1, 3], "b": [1, 3]}, index=Index([0, 1], name="A"))
|
| 839 |
+
tm.assert_frame_equal(grouped, expected)
|
| 840 |
+
|
| 841 |
+
quant50 = functools.partial(np.percentile, q=50)
|
| 842 |
+
quant70 = functools.partial(np.percentile, q=70)
|
| 843 |
+
quant50.__name__ = "quant50"
|
| 844 |
+
quant70.__name__ = "quant70"
|
| 845 |
+
|
| 846 |
+
test = DataFrame({"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]})
|
| 847 |
+
|
| 848 |
+
grouped = test.groupby("col1").agg(
|
| 849 |
+
quantile_50=("col2", quant50), quantile_70=("col2", quant70)
|
| 850 |
+
)
|
| 851 |
+
expected = DataFrame(
|
| 852 |
+
{"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]},
|
| 853 |
+
index=Index(["a", "b"], name="col1"),
|
| 854 |
+
)
|
| 855 |
+
tm.assert_frame_equal(grouped, expected)
|
| 856 |
+
|
| 857 |
+
def test_agg_relabel_with_level(self):
|
| 858 |
+
df = DataFrame(
|
| 859 |
+
{"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]},
|
| 860 |
+
index=MultiIndex.from_product([["A", "B"], ["a", "b"]]),
|
| 861 |
+
)
|
| 862 |
+
result = df.groupby(level=0).agg(
|
| 863 |
+
aa=("A", "max"), bb=("A", "min"), cc=("B", "mean")
|
| 864 |
+
)
|
| 865 |
+
expected = DataFrame(
|
| 866 |
+
{"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"]
|
| 867 |
+
)
|
| 868 |
+
tm.assert_frame_equal(result, expected)
|
| 869 |
+
|
| 870 |
+
def test_agg_relabel_other_raises(self):
|
| 871 |
+
df = DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]})
|
| 872 |
+
grouped = df.groupby("A")
|
| 873 |
+
match = "Must provide"
|
| 874 |
+
with pytest.raises(TypeError, match=match):
|
| 875 |
+
grouped.agg(foo=1)
|
| 876 |
+
|
| 877 |
+
with pytest.raises(TypeError, match=match):
|
| 878 |
+
grouped.agg()
|
| 879 |
+
|
| 880 |
+
with pytest.raises(TypeError, match=match):
|
| 881 |
+
grouped.agg(a=("B", "max"), b=(1, 2, 3))
|
| 882 |
+
|
| 883 |
+
def test_missing_raises(self):
|
| 884 |
+
df = DataFrame({"A": [0, 1], "B": [1, 2]})
|
| 885 |
+
match = re.escape("Column(s) ['C'] do not exist")
|
| 886 |
+
with pytest.raises(KeyError, match=match):
|
| 887 |
+
df.groupby("A").agg(c=("C", "sum"))
|
| 888 |
+
|
| 889 |
+
def test_agg_namedtuple(self):
|
| 890 |
+
df = DataFrame({"A": [0, 1], "B": [1, 2]})
|
| 891 |
+
result = df.groupby("A").agg(
|
| 892 |
+
b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count")
|
| 893 |
+
)
|
| 894 |
+
expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count"))
|
| 895 |
+
tm.assert_frame_equal(result, expected)
|
| 896 |
+
|
| 897 |
+
def test_mangled(self):
|
| 898 |
+
df = DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]})
|
| 899 |
+
result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1))
|
| 900 |
+
expected = DataFrame({"b": [0, 0], "c": [1, 1]}, index=Index([0, 1], name="A"))
|
| 901 |
+
tm.assert_frame_equal(result, expected)
|
| 902 |
+
|
| 903 |
+
|
| 904 |
+
@pytest.mark.parametrize(
|
| 905 |
+
"agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3",
|
| 906 |
+
[
|
| 907 |
+
(
|
| 908 |
+
(("y", "A"), "max"),
|
| 909 |
+
(("y", "A"), np.mean),
|
| 910 |
+
(("y", "B"), "mean"),
|
| 911 |
+
[1, 3],
|
| 912 |
+
[0.5, 2.5],
|
| 913 |
+
[5.5, 7.5],
|
| 914 |
+
),
|
| 915 |
+
(
|
| 916 |
+
(("y", "A"), lambda x: max(x)),
|
| 917 |
+
(("y", "A"), lambda x: 1),
|
| 918 |
+
(("y", "B"), np.mean),
|
| 919 |
+
[1, 3],
|
| 920 |
+
[1, 1],
|
| 921 |
+
[5.5, 7.5],
|
| 922 |
+
),
|
| 923 |
+
(
|
| 924 |
+
pd.NamedAgg(("y", "A"), "max"),
|
| 925 |
+
pd.NamedAgg(("y", "B"), np.mean),
|
| 926 |
+
pd.NamedAgg(("y", "A"), lambda x: 1),
|
| 927 |
+
[1, 3],
|
| 928 |
+
[5.5, 7.5],
|
| 929 |
+
[1, 1],
|
| 930 |
+
),
|
| 931 |
+
],
|
| 932 |
+
)
|
| 933 |
+
def test_agg_relabel_multiindex_column(
|
| 934 |
+
agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3
|
| 935 |
+
):
|
| 936 |
+
# GH 29422, add tests for multiindex column cases
|
| 937 |
+
df = DataFrame(
|
| 938 |
+
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
|
| 939 |
+
)
|
| 940 |
+
df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
|
| 941 |
+
idx = Index(["a", "b"], name=("x", "group"))
|
| 942 |
+
|
| 943 |
+
result = df.groupby(("x", "group")).agg(a_max=(("y", "A"), "max"))
|
| 944 |
+
expected = DataFrame({"a_max": [1, 3]}, index=idx)
|
| 945 |
+
tm.assert_frame_equal(result, expected)
|
| 946 |
+
|
| 947 |
+
msg = "is currently using SeriesGroupBy.mean"
|
| 948 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 949 |
+
result = df.groupby(("x", "group")).agg(
|
| 950 |
+
col_1=agg_col1, col_2=agg_col2, col_3=agg_col3
|
| 951 |
+
)
|
| 952 |
+
expected = DataFrame(
|
| 953 |
+
{"col_1": agg_result1, "col_2": agg_result2, "col_3": agg_result3}, index=idx
|
| 954 |
+
)
|
| 955 |
+
tm.assert_frame_equal(result, expected)
|
| 956 |
+
|
| 957 |
+
|
| 958 |
+
def test_agg_relabel_multiindex_raises_not_exist():
|
| 959 |
+
# GH 29422, add test for raises scenario when aggregate column does not exist
|
| 960 |
+
df = DataFrame(
|
| 961 |
+
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
|
| 962 |
+
)
|
| 963 |
+
df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
|
| 964 |
+
|
| 965 |
+
with pytest.raises(KeyError, match="do not exist"):
|
| 966 |
+
df.groupby(("x", "group")).agg(a=(("Y", "a"), "max"))
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
def test_agg_relabel_multiindex_duplicates():
|
| 970 |
+
# GH29422, add test for raises scenario when getting duplicates
|
| 971 |
+
# GH28426, after this change, duplicates should also work if the relabelling is
|
| 972 |
+
# different
|
| 973 |
+
df = DataFrame(
|
| 974 |
+
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
|
| 975 |
+
)
|
| 976 |
+
df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
|
| 977 |
+
|
| 978 |
+
result = df.groupby(("x", "group")).agg(
|
| 979 |
+
a=(("y", "A"), "min"), b=(("y", "A"), "min")
|
| 980 |
+
)
|
| 981 |
+
idx = Index(["a", "b"], name=("x", "group"))
|
| 982 |
+
expected = DataFrame({"a": [0, 2], "b": [0, 2]}, index=idx)
|
| 983 |
+
tm.assert_frame_equal(result, expected)
|
| 984 |
+
|
| 985 |
+
|
| 986 |
+
@pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}])
|
| 987 |
+
def test_groupby_aggregate_empty_key(kwargs):
|
| 988 |
+
# GH: 32580
|
| 989 |
+
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
|
| 990 |
+
result = df.groupby("a").agg(kwargs)
|
| 991 |
+
expected = DataFrame(
|
| 992 |
+
[1, 4],
|
| 993 |
+
index=Index([1, 2], dtype="int64", name="a"),
|
| 994 |
+
columns=MultiIndex.from_tuples([["c", "min"]]),
|
| 995 |
+
)
|
| 996 |
+
tm.assert_frame_equal(result, expected)
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
def test_groupby_aggregate_empty_key_empty_return():
|
| 1000 |
+
# GH: 32580 Check if everything works, when return is empty
|
| 1001 |
+
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
|
| 1002 |
+
result = df.groupby("a").agg({"b": []})
|
| 1003 |
+
expected = DataFrame(columns=MultiIndex(levels=[["b"], []], codes=[[], []]))
|
| 1004 |
+
tm.assert_frame_equal(result, expected)
|
| 1005 |
+
|
| 1006 |
+
|
| 1007 |
+
def test_groupby_aggregate_empty_with_multiindex_frame():
|
| 1008 |
+
# GH 39178
|
| 1009 |
+
df = DataFrame(columns=["a", "b", "c"])
|
| 1010 |
+
result = df.groupby(["a", "b"], group_keys=False).agg(d=("c", list))
|
| 1011 |
+
expected = DataFrame(
|
| 1012 |
+
columns=["d"], index=MultiIndex([[], []], [[], []], names=["a", "b"])
|
| 1013 |
+
)
|
| 1014 |
+
tm.assert_frame_equal(result, expected)
|
| 1015 |
+
|
| 1016 |
+
|
| 1017 |
+
def test_grouby_agg_loses_results_with_as_index_false_relabel():
|
| 1018 |
+
# GH 32240: When the aggregate function relabels column names and
|
| 1019 |
+
# as_index=False is specified, the results are dropped.
|
| 1020 |
+
|
| 1021 |
+
df = DataFrame(
|
| 1022 |
+
{"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}
|
| 1023 |
+
)
|
| 1024 |
+
|
| 1025 |
+
grouped = df.groupby("key", as_index=False)
|
| 1026 |
+
result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
|
| 1027 |
+
expected = DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]})
|
| 1028 |
+
tm.assert_frame_equal(result, expected)
|
| 1029 |
+
|
| 1030 |
+
|
| 1031 |
+
def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():
|
| 1032 |
+
# GH 32240: When the aggregate function relabels column names and
|
| 1033 |
+
# as_index=False is specified, the results are dropped. Check if
|
| 1034 |
+
# multiindex is returned in the right order
|
| 1035 |
+
|
| 1036 |
+
df = DataFrame(
|
| 1037 |
+
{
|
| 1038 |
+
"key": ["x", "y", "x", "y", "x", "x"],
|
| 1039 |
+
"key1": ["a", "b", "c", "b", "a", "c"],
|
| 1040 |
+
"val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75],
|
| 1041 |
+
}
|
| 1042 |
+
)
|
| 1043 |
+
|
| 1044 |
+
grouped = df.groupby(["key", "key1"], as_index=False)
|
| 1045 |
+
result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
|
| 1046 |
+
expected = DataFrame(
|
| 1047 |
+
{"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]}
|
| 1048 |
+
)
|
| 1049 |
+
tm.assert_frame_equal(result, expected)
|
| 1050 |
+
|
| 1051 |
+
|
| 1052 |
+
@pytest.mark.parametrize(
|
| 1053 |
+
"func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]
|
| 1054 |
+
)
|
| 1055 |
+
def test_multiindex_custom_func(func):
|
| 1056 |
+
# GH 31777
|
| 1057 |
+
data = [[1, 4, 2], [5, 7, 1]]
|
| 1058 |
+
df = DataFrame(
|
| 1059 |
+
data,
|
| 1060 |
+
columns=MultiIndex.from_arrays(
|
| 1061 |
+
[[1, 1, 2], [3, 4, 3]], names=["Sisko", "Janeway"]
|
| 1062 |
+
),
|
| 1063 |
+
)
|
| 1064 |
+
result = df.groupby(np.array([0, 1])).agg(func)
|
| 1065 |
+
expected_dict = {
|
| 1066 |
+
(1, 3): {0: 1.0, 1: 5.0},
|
| 1067 |
+
(1, 4): {0: 4.0, 1: 7.0},
|
| 1068 |
+
(2, 3): {0: 2.0, 1: 1.0},
|
| 1069 |
+
}
|
| 1070 |
+
expected = DataFrame(expected_dict, index=np.array([0, 1]), columns=df.columns)
|
| 1071 |
+
tm.assert_frame_equal(result, expected)
|
| 1072 |
+
|
| 1073 |
+
|
| 1074 |
+
def myfunc(s):
|
| 1075 |
+
return np.percentile(s, q=0.90)
|
| 1076 |
+
|
| 1077 |
+
|
| 1078 |
+
@pytest.mark.parametrize("func", [lambda s: np.percentile(s, q=0.90), myfunc])
|
| 1079 |
+
def test_lambda_named_agg(func):
|
| 1080 |
+
# see gh-28467
|
| 1081 |
+
animals = DataFrame(
|
| 1082 |
+
{
|
| 1083 |
+
"kind": ["cat", "dog", "cat", "dog"],
|
| 1084 |
+
"height": [9.1, 6.0, 9.5, 34.0],
|
| 1085 |
+
"weight": [7.9, 7.5, 9.9, 198.0],
|
| 1086 |
+
}
|
| 1087 |
+
)
|
| 1088 |
+
|
| 1089 |
+
result = animals.groupby("kind").agg(
|
| 1090 |
+
mean_height=("height", "mean"), perc90=("height", func)
|
| 1091 |
+
)
|
| 1092 |
+
expected = DataFrame(
|
| 1093 |
+
[[9.3, 9.1036], [20.0, 6.252]],
|
| 1094 |
+
columns=["mean_height", "perc90"],
|
| 1095 |
+
index=Index(["cat", "dog"], name="kind"),
|
| 1096 |
+
)
|
| 1097 |
+
|
| 1098 |
+
tm.assert_frame_equal(result, expected)
|
| 1099 |
+
|
| 1100 |
+
|
| 1101 |
+
def test_aggregate_mixed_types():
|
| 1102 |
+
# GH 16916
|
| 1103 |
+
df = DataFrame(
|
| 1104 |
+
data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc")
|
| 1105 |
+
)
|
| 1106 |
+
df["grouping"] = ["group 1", "group 1", 2]
|
| 1107 |
+
result = df.groupby("grouping").aggregate(lambda x: x.tolist())
|
| 1108 |
+
expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]
|
| 1109 |
+
expected = DataFrame(
|
| 1110 |
+
expected_data,
|
| 1111 |
+
index=Index([2, "group 1"], dtype="object", name="grouping"),
|
| 1112 |
+
columns=Index(["X", "Y", "Z"], dtype="object"),
|
| 1113 |
+
)
|
| 1114 |
+
tm.assert_frame_equal(result, expected)
|
| 1115 |
+
|
| 1116 |
+
|
| 1117 |
+
@pytest.mark.xfail(reason="Not implemented;see GH 31256")
|
| 1118 |
+
def test_aggregate_udf_na_extension_type():
|
| 1119 |
+
# https://github.com/pandas-dev/pandas/pull/31359
|
| 1120 |
+
# This is currently failing to cast back to Int64Dtype.
|
| 1121 |
+
# The presence of the NA causes two problems
|
| 1122 |
+
# 1. NA is not an instance of Int64Dtype.type (numpy.int64)
|
| 1123 |
+
# 2. The presence of an NA forces object type, so the non-NA values is
|
| 1124 |
+
# a Python int rather than a NumPy int64. Python ints aren't
|
| 1125 |
+
# instances of numpy.int64.
|
| 1126 |
+
def aggfunc(x):
|
| 1127 |
+
if all(x > 2):
|
| 1128 |
+
return 1
|
| 1129 |
+
else:
|
| 1130 |
+
return pd.NA
|
| 1131 |
+
|
| 1132 |
+
df = DataFrame({"A": pd.array([1, 2, 3])})
|
| 1133 |
+
result = df.groupby([1, 1, 2]).agg(aggfunc)
|
| 1134 |
+
expected = DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2])
|
| 1135 |
+
tm.assert_frame_equal(result, expected)
|
| 1136 |
+
|
| 1137 |
+
|
| 1138 |
+
class TestLambdaMangling:
|
| 1139 |
+
def test_basic(self):
|
| 1140 |
+
df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
|
| 1141 |
+
result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})
|
| 1142 |
+
|
| 1143 |
+
expected = DataFrame(
|
| 1144 |
+
{("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]},
|
| 1145 |
+
index=Index([0, 1], name="A"),
|
| 1146 |
+
)
|
| 1147 |
+
tm.assert_frame_equal(result, expected)
|
| 1148 |
+
|
| 1149 |
+
def test_mangle_series_groupby(self):
|
| 1150 |
+
gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
|
| 1151 |
+
result = gr.agg([lambda x: 0, lambda x: 1])
|
| 1152 |
+
exp_data = {"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]}
|
| 1153 |
+
expected = DataFrame(exp_data, index=np.array([0, 1]))
|
| 1154 |
+
tm.assert_frame_equal(result, expected)
|
| 1155 |
+
|
| 1156 |
+
@pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.")
|
| 1157 |
+
def test_with_kwargs(self):
|
| 1158 |
+
f1 = lambda x, y, b=1: x.sum() + y + b
|
| 1159 |
+
f2 = lambda x, y, b=2: x.sum() + y * b
|
| 1160 |
+
result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
|
| 1161 |
+
expected = DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})
|
| 1162 |
+
tm.assert_frame_equal(result, expected)
|
| 1163 |
+
|
| 1164 |
+
result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
|
| 1165 |
+
expected = DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
|
| 1166 |
+
tm.assert_frame_equal(result, expected)
|
| 1167 |
+
|
| 1168 |
+
def test_agg_with_one_lambda(self):
|
| 1169 |
+
# GH 25719, write tests for DataFrameGroupby.agg with only one lambda
|
| 1170 |
+
df = DataFrame(
|
| 1171 |
+
{
|
| 1172 |
+
"kind": ["cat", "dog", "cat", "dog"],
|
| 1173 |
+
"height": [9.1, 6.0, 9.5, 34.0],
|
| 1174 |
+
"weight": [7.9, 7.5, 9.9, 198.0],
|
| 1175 |
+
}
|
| 1176 |
+
)
|
| 1177 |
+
|
| 1178 |
+
columns = ["height_sqr_min", "height_max", "weight_max"]
|
| 1179 |
+
expected = DataFrame(
|
| 1180 |
+
{
|
| 1181 |
+
"height_sqr_min": [82.81, 36.00],
|
| 1182 |
+
"height_max": [9.5, 34.0],
|
| 1183 |
+
"weight_max": [9.9, 198.0],
|
| 1184 |
+
},
|
| 1185 |
+
index=Index(["cat", "dog"], name="kind"),
|
| 1186 |
+
columns=columns,
|
| 1187 |
+
)
|
| 1188 |
+
|
| 1189 |
+
# check pd.NameAgg case
|
| 1190 |
+
result1 = df.groupby(by="kind").agg(
|
| 1191 |
+
height_sqr_min=pd.NamedAgg(
|
| 1192 |
+
column="height", aggfunc=lambda x: np.min(x**2)
|
| 1193 |
+
),
|
| 1194 |
+
height_max=pd.NamedAgg(column="height", aggfunc="max"),
|
| 1195 |
+
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
|
| 1196 |
+
)
|
| 1197 |
+
tm.assert_frame_equal(result1, expected)
|
| 1198 |
+
|
| 1199 |
+
# check agg(key=(col, aggfunc)) case
|
| 1200 |
+
result2 = df.groupby(by="kind").agg(
|
| 1201 |
+
height_sqr_min=("height", lambda x: np.min(x**2)),
|
| 1202 |
+
height_max=("height", "max"),
|
| 1203 |
+
weight_max=("weight", "max"),
|
| 1204 |
+
)
|
| 1205 |
+
tm.assert_frame_equal(result2, expected)
|
| 1206 |
+
|
| 1207 |
+
def test_agg_multiple_lambda(self):
|
| 1208 |
+
# GH25719, test for DataFrameGroupby.agg with multiple lambdas
|
| 1209 |
+
# with mixed aggfunc
|
| 1210 |
+
df = DataFrame(
|
| 1211 |
+
{
|
| 1212 |
+
"kind": ["cat", "dog", "cat", "dog"],
|
| 1213 |
+
"height": [9.1, 6.0, 9.5, 34.0],
|
| 1214 |
+
"weight": [7.9, 7.5, 9.9, 198.0],
|
| 1215 |
+
}
|
| 1216 |
+
)
|
| 1217 |
+
columns = [
|
| 1218 |
+
"height_sqr_min",
|
| 1219 |
+
"height_max",
|
| 1220 |
+
"weight_max",
|
| 1221 |
+
"height_max_2",
|
| 1222 |
+
"weight_min",
|
| 1223 |
+
]
|
| 1224 |
+
expected = DataFrame(
|
| 1225 |
+
{
|
| 1226 |
+
"height_sqr_min": [82.81, 36.00],
|
| 1227 |
+
"height_max": [9.5, 34.0],
|
| 1228 |
+
"weight_max": [9.9, 198.0],
|
| 1229 |
+
"height_max_2": [9.5, 34.0],
|
| 1230 |
+
"weight_min": [7.9, 7.5],
|
| 1231 |
+
},
|
| 1232 |
+
index=Index(["cat", "dog"], name="kind"),
|
| 1233 |
+
columns=columns,
|
| 1234 |
+
)
|
| 1235 |
+
|
| 1236 |
+
# check agg(key=(col, aggfunc)) case
|
| 1237 |
+
result1 = df.groupby(by="kind").agg(
|
| 1238 |
+
height_sqr_min=("height", lambda x: np.min(x**2)),
|
| 1239 |
+
height_max=("height", "max"),
|
| 1240 |
+
weight_max=("weight", "max"),
|
| 1241 |
+
height_max_2=("height", lambda x: np.max(x)),
|
| 1242 |
+
weight_min=("weight", lambda x: np.min(x)),
|
| 1243 |
+
)
|
| 1244 |
+
tm.assert_frame_equal(result1, expected)
|
| 1245 |
+
|
| 1246 |
+
# check pd.NamedAgg case
|
| 1247 |
+
result2 = df.groupby(by="kind").agg(
|
| 1248 |
+
height_sqr_min=pd.NamedAgg(
|
| 1249 |
+
column="height", aggfunc=lambda x: np.min(x**2)
|
| 1250 |
+
),
|
| 1251 |
+
height_max=pd.NamedAgg(column="height", aggfunc="max"),
|
| 1252 |
+
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
|
| 1253 |
+
height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),
|
| 1254 |
+
weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
|
| 1255 |
+
)
|
| 1256 |
+
tm.assert_frame_equal(result2, expected)
|
| 1257 |
+
|
| 1258 |
+
|
| 1259 |
+
def test_groupby_get_by_index():
|
| 1260 |
+
# GH 33439
|
| 1261 |
+
df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})
|
| 1262 |
+
res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])})
|
| 1263 |
+
expected = DataFrame({"A": ["S", "W"], "B": [1.0, 2.0]}).set_index("A")
|
| 1264 |
+
tm.assert_frame_equal(res, expected)
|
| 1265 |
+
|
| 1266 |
+
|
| 1267 |
+
@pytest.mark.parametrize(
|
| 1268 |
+
"grp_col_dict, exp_data",
|
| 1269 |
+
[
|
| 1270 |
+
({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}),
|
| 1271 |
+
({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}),
|
| 1272 |
+
({"nr": "min"}, {"nr": [1, 5]}),
|
| 1273 |
+
],
|
| 1274 |
+
)
|
| 1275 |
+
def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):
|
| 1276 |
+
# test single aggregations on ordered categorical cols GHGH27800
|
| 1277 |
+
|
| 1278 |
+
# create the result dataframe
|
| 1279 |
+
input_df = DataFrame(
|
| 1280 |
+
{
|
| 1281 |
+
"nr": [1, 2, 3, 4, 5, 6, 7, 8],
|
| 1282 |
+
"cat_ord": list("aabbccdd"),
|
| 1283 |
+
"cat": list("aaaabbbb"),
|
| 1284 |
+
}
|
| 1285 |
+
)
|
| 1286 |
+
|
| 1287 |
+
input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
|
| 1288 |
+
input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
|
| 1289 |
+
result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict)
|
| 1290 |
+
|
| 1291 |
+
# create expected dataframe
|
| 1292 |
+
cat_index = pd.CategoricalIndex(
|
| 1293 |
+
["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
|
| 1294 |
+
)
|
| 1295 |
+
|
| 1296 |
+
expected_df = DataFrame(data=exp_data, index=cat_index)
|
| 1297 |
+
|
| 1298 |
+
if "cat_ord" in expected_df:
|
| 1299 |
+
# ordered categorical columns should be preserved
|
| 1300 |
+
dtype = input_df["cat_ord"].dtype
|
| 1301 |
+
expected_df["cat_ord"] = expected_df["cat_ord"].astype(dtype)
|
| 1302 |
+
|
| 1303 |
+
tm.assert_frame_equal(result_df, expected_df)
|
| 1304 |
+
|
| 1305 |
+
|
| 1306 |
+
@pytest.mark.parametrize(
|
| 1307 |
+
"grp_col_dict, exp_data",
|
| 1308 |
+
[
|
| 1309 |
+
({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]),
|
| 1310 |
+
({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]),
|
| 1311 |
+
({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]),
|
| 1312 |
+
],
|
| 1313 |
+
)
|
| 1314 |
+
def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):
|
| 1315 |
+
# test combined aggregations on ordered categorical cols GH27800
|
| 1316 |
+
|
| 1317 |
+
# create the result dataframe
|
| 1318 |
+
input_df = DataFrame(
|
| 1319 |
+
{
|
| 1320 |
+
"nr": [1, 2, 3, 4, 5, 6, 7, 8],
|
| 1321 |
+
"cat_ord": list("aabbccdd"),
|
| 1322 |
+
"cat": list("aaaabbbb"),
|
| 1323 |
+
}
|
| 1324 |
+
)
|
| 1325 |
+
|
| 1326 |
+
input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
|
| 1327 |
+
input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
|
| 1328 |
+
result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict)
|
| 1329 |
+
|
| 1330 |
+
# create expected dataframe
|
| 1331 |
+
cat_index = pd.CategoricalIndex(
|
| 1332 |
+
["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
|
| 1333 |
+
)
|
| 1334 |
+
|
| 1335 |
+
# unpack the grp_col_dict to create the multi-index tuple
|
| 1336 |
+
# this tuple will be used to create the expected dataframe index
|
| 1337 |
+
multi_index_list = []
|
| 1338 |
+
for k, v in grp_col_dict.items():
|
| 1339 |
+
if isinstance(v, list):
|
| 1340 |
+
multi_index_list.extend([k, value] for value in v)
|
| 1341 |
+
else:
|
| 1342 |
+
multi_index_list.append([k, v])
|
| 1343 |
+
multi_index = MultiIndex.from_tuples(tuple(multi_index_list))
|
| 1344 |
+
|
| 1345 |
+
expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index)
|
| 1346 |
+
for col in expected_df.columns:
|
| 1347 |
+
if isinstance(col, tuple) and "cat_ord" in col:
|
| 1348 |
+
# ordered categorical should be preserved
|
| 1349 |
+
expected_df[col] = expected_df[col].astype(input_df["cat_ord"].dtype)
|
| 1350 |
+
|
| 1351 |
+
tm.assert_frame_equal(result_df, expected_df)
|
| 1352 |
+
|
| 1353 |
+
|
| 1354 |
+
def test_nonagg_agg():
|
| 1355 |
+
# GH 35490 - Single/Multiple agg of non-agg function give same results
|
| 1356 |
+
# TODO: agg should raise for functions that don't aggregate
|
| 1357 |
+
df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]})
|
| 1358 |
+
g = df.groupby("a")
|
| 1359 |
+
|
| 1360 |
+
result = g.agg(["cumsum"])
|
| 1361 |
+
result.columns = result.columns.droplevel(-1)
|
| 1362 |
+
expected = g.agg("cumsum")
|
| 1363 |
+
|
| 1364 |
+
tm.assert_frame_equal(result, expected)
|
| 1365 |
+
|
| 1366 |
+
|
| 1367 |
+
def test_aggregate_datetime_objects():
|
| 1368 |
+
# https://github.com/pandas-dev/pandas/issues/36003
|
| 1369 |
+
# ensure we don't raise an error but keep object dtype for out-of-bounds
|
| 1370 |
+
# datetimes
|
| 1371 |
+
df = DataFrame(
|
| 1372 |
+
{
|
| 1373 |
+
"A": ["X", "Y"],
|
| 1374 |
+
"B": [
|
| 1375 |
+
datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
|
| 1376 |
+
datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
|
| 1377 |
+
],
|
| 1378 |
+
}
|
| 1379 |
+
)
|
| 1380 |
+
result = df.groupby("A").B.max()
|
| 1381 |
+
expected = df.set_index("A")["B"]
|
| 1382 |
+
tm.assert_series_equal(result, expected)
|
| 1383 |
+
|
| 1384 |
+
|
| 1385 |
+
def test_groupby_index_object_dtype():
|
| 1386 |
+
# GH 40014
|
| 1387 |
+
df = DataFrame({"c0": ["x", "x", "x"], "c1": ["x", "x", "y"], "p": [0, 1, 2]})
|
| 1388 |
+
df.index = df.index.astype("O")
|
| 1389 |
+
grouped = df.groupby(["c0", "c1"])
|
| 1390 |
+
res = grouped.p.agg(lambda x: all(x > 0))
|
| 1391 |
+
# Check that providing a user-defined function in agg()
|
| 1392 |
+
# produces the correct index shape when using an object-typed index.
|
| 1393 |
+
expected_index = MultiIndex.from_tuples(
|
| 1394 |
+
[("x", "x"), ("x", "y")], names=("c0", "c1")
|
| 1395 |
+
)
|
| 1396 |
+
expected = Series([False, True], index=expected_index, name="p")
|
| 1397 |
+
tm.assert_series_equal(res, expected)
|
| 1398 |
+
|
| 1399 |
+
|
| 1400 |
+
def test_timeseries_groupby_agg():
|
| 1401 |
+
# GH#43290
|
| 1402 |
+
|
| 1403 |
+
def func(ser):
|
| 1404 |
+
if ser.isna().all():
|
| 1405 |
+
return None
|
| 1406 |
+
return np.sum(ser)
|
| 1407 |
+
|
| 1408 |
+
df = DataFrame([1.0], index=[pd.Timestamp("2018-01-16 00:00:00+00:00")])
|
| 1409 |
+
res = df.groupby(lambda x: 1).agg(func)
|
| 1410 |
+
|
| 1411 |
+
expected = DataFrame([[1.0]], index=[1])
|
| 1412 |
+
tm.assert_frame_equal(res, expected)
|
| 1413 |
+
|
| 1414 |
+
|
| 1415 |
+
def test_groupby_agg_precision(any_real_numeric_dtype):
|
| 1416 |
+
if any_real_numeric_dtype in tm.ALL_INT_NUMPY_DTYPES:
|
| 1417 |
+
max_value = np.iinfo(any_real_numeric_dtype).max
|
| 1418 |
+
if any_real_numeric_dtype in tm.FLOAT_NUMPY_DTYPES:
|
| 1419 |
+
max_value = np.finfo(any_real_numeric_dtype).max
|
| 1420 |
+
if any_real_numeric_dtype in tm.FLOAT_EA_DTYPES:
|
| 1421 |
+
max_value = np.finfo(any_real_numeric_dtype.lower()).max
|
| 1422 |
+
if any_real_numeric_dtype in tm.ALL_INT_EA_DTYPES:
|
| 1423 |
+
max_value = np.iinfo(any_real_numeric_dtype.lower()).max
|
| 1424 |
+
|
| 1425 |
+
df = DataFrame(
|
| 1426 |
+
{
|
| 1427 |
+
"key1": ["a"],
|
| 1428 |
+
"key2": ["b"],
|
| 1429 |
+
"key3": pd.array([max_value], dtype=any_real_numeric_dtype),
|
| 1430 |
+
}
|
| 1431 |
+
)
|
| 1432 |
+
arrays = [["a"], ["b"]]
|
| 1433 |
+
index = MultiIndex.from_arrays(arrays, names=("key1", "key2"))
|
| 1434 |
+
|
| 1435 |
+
expected = DataFrame(
|
| 1436 |
+
{"key3": pd.array([max_value], dtype=any_real_numeric_dtype)}, index=index
|
| 1437 |
+
)
|
| 1438 |
+
result = df.groupby(["key1", "key2"]).agg(lambda x: x)
|
| 1439 |
+
tm.assert_frame_equal(result, expected)
|
| 1440 |
+
|
| 1441 |
+
|
| 1442 |
+
def test_groupby_aggregate_directory(reduction_func):
|
| 1443 |
+
# GH#32793
|
| 1444 |
+
if reduction_func in ["corrwith", "nth"]:
|
| 1445 |
+
return None
|
| 1446 |
+
|
| 1447 |
+
obj = DataFrame([[0, 1], [0, np.nan]])
|
| 1448 |
+
|
| 1449 |
+
result_reduced_series = obj.groupby(0).agg(reduction_func)
|
| 1450 |
+
result_reduced_frame = obj.groupby(0).agg({1: reduction_func})
|
| 1451 |
+
|
| 1452 |
+
if reduction_func in ["size", "ngroup"]:
|
| 1453 |
+
# names are different: None / 1
|
| 1454 |
+
tm.assert_series_equal(
|
| 1455 |
+
result_reduced_series, result_reduced_frame[1], check_names=False
|
| 1456 |
+
)
|
| 1457 |
+
else:
|
| 1458 |
+
tm.assert_frame_equal(result_reduced_series, result_reduced_frame)
|
| 1459 |
+
tm.assert_series_equal(
|
| 1460 |
+
result_reduced_series.dtypes, result_reduced_frame.dtypes
|
| 1461 |
+
)
|
| 1462 |
+
|
| 1463 |
+
|
| 1464 |
+
def test_group_mean_timedelta_nat():
|
| 1465 |
+
# GH43132
|
| 1466 |
+
data = Series(["1 day", "3 days", "NaT"], dtype="timedelta64[ns]")
|
| 1467 |
+
expected = Series(["2 days"], dtype="timedelta64[ns]", index=np.array([0]))
|
| 1468 |
+
|
| 1469 |
+
result = data.groupby([0, 0, 0]).mean()
|
| 1470 |
+
|
| 1471 |
+
tm.assert_series_equal(result, expected)
|
| 1472 |
+
|
| 1473 |
+
|
| 1474 |
+
@pytest.mark.parametrize(
|
| 1475 |
+
"input_data, expected_output",
|
| 1476 |
+
[
|
| 1477 |
+
( # no timezone
|
| 1478 |
+
["2021-01-01T00:00", "NaT", "2021-01-01T02:00"],
|
| 1479 |
+
["2021-01-01T01:00"],
|
| 1480 |
+
),
|
| 1481 |
+
( # timezone
|
| 1482 |
+
["2021-01-01T00:00-0100", "NaT", "2021-01-01T02:00-0100"],
|
| 1483 |
+
["2021-01-01T01:00-0100"],
|
| 1484 |
+
),
|
| 1485 |
+
],
|
| 1486 |
+
)
|
| 1487 |
+
def test_group_mean_datetime64_nat(input_data, expected_output):
|
| 1488 |
+
# GH43132
|
| 1489 |
+
data = to_datetime(Series(input_data))
|
| 1490 |
+
expected = to_datetime(Series(expected_output, index=np.array([0])))
|
| 1491 |
+
|
| 1492 |
+
result = data.groupby([0, 0, 0]).mean()
|
| 1493 |
+
tm.assert_series_equal(result, expected)
|
| 1494 |
+
|
| 1495 |
+
|
| 1496 |
+
@pytest.mark.parametrize(
|
| 1497 |
+
"func, output", [("mean", [8 + 18j, 10 + 22j]), ("sum", [40 + 90j, 50 + 110j])]
|
| 1498 |
+
)
|
| 1499 |
+
def test_groupby_complex(func, output):
|
| 1500 |
+
# GH#43701
|
| 1501 |
+
data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))
|
| 1502 |
+
result = data.groupby(data.index % 2).agg(func)
|
| 1503 |
+
expected = Series(output)
|
| 1504 |
+
tm.assert_series_equal(result, expected)
|
| 1505 |
+
|
| 1506 |
+
|
| 1507 |
+
@pytest.mark.parametrize("func", ["min", "max", "var"])
|
| 1508 |
+
def test_groupby_complex_raises(func):
|
| 1509 |
+
# GH#43701
|
| 1510 |
+
data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))
|
| 1511 |
+
msg = "No matching signature found"
|
| 1512 |
+
with pytest.raises(TypeError, match=msg):
|
| 1513 |
+
data.groupby(data.index % 2).agg(func)
|
| 1514 |
+
|
| 1515 |
+
|
| 1516 |
+
@pytest.mark.parametrize(
|
| 1517 |
+
"func", [["min"], ["mean", "max"], {"b": "sum"}, {"b": "prod", "c": "median"}]
|
| 1518 |
+
)
|
| 1519 |
+
def test_multi_axis_1_raises(func):
|
| 1520 |
+
# GH#46995
|
| 1521 |
+
df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5], "c": [6, 7, 8]})
|
| 1522 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 1523 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 1524 |
+
gb = df.groupby("a", axis=1)
|
| 1525 |
+
with pytest.raises(NotImplementedError, match="axis other than 0 is not supported"):
|
| 1526 |
+
gb.agg(func)
|
| 1527 |
+
|
| 1528 |
+
|
| 1529 |
+
@pytest.mark.parametrize(
|
| 1530 |
+
"test, constant",
|
| 1531 |
+
[
|
| 1532 |
+
([[20, "A"], [20, "B"], [10, "C"]], {0: [10, 20], 1: ["C", ["A", "B"]]}),
|
| 1533 |
+
([[20, "A"], [20, "B"], [30, "C"]], {0: [20, 30], 1: [["A", "B"], "C"]}),
|
| 1534 |
+
([["a", 1], ["a", 1], ["b", 2], ["b", 3]], {0: ["a", "b"], 1: [1, [2, 3]]}),
|
| 1535 |
+
pytest.param(
|
| 1536 |
+
[["a", 1], ["a", 2], ["b", 3], ["b", 3]],
|
| 1537 |
+
{0: ["a", "b"], 1: [[1, 2], 3]},
|
| 1538 |
+
marks=pytest.mark.xfail,
|
| 1539 |
+
),
|
| 1540 |
+
],
|
| 1541 |
+
)
|
| 1542 |
+
def test_agg_of_mode_list(test, constant):
|
| 1543 |
+
# GH#25581
|
| 1544 |
+
df1 = DataFrame(test)
|
| 1545 |
+
result = df1.groupby(0).agg(Series.mode)
|
| 1546 |
+
# Mode usually only returns 1 value, but can return a list in the case of a tie.
|
| 1547 |
+
|
| 1548 |
+
expected = DataFrame(constant)
|
| 1549 |
+
expected = expected.set_index(0)
|
| 1550 |
+
|
| 1551 |
+
tm.assert_frame_equal(result, expected)
|
| 1552 |
+
|
| 1553 |
+
|
| 1554 |
+
def test_dataframe_groupy_agg_list_like_func_with_args():
|
| 1555 |
+
# GH#50624
|
| 1556 |
+
df = DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]})
|
| 1557 |
+
gb = df.groupby("y")
|
| 1558 |
+
|
| 1559 |
+
def foo1(x, a=1, c=0):
|
| 1560 |
+
return x.sum() + a + c
|
| 1561 |
+
|
| 1562 |
+
def foo2(x, b=2, c=0):
|
| 1563 |
+
return x.sum() + b + c
|
| 1564 |
+
|
| 1565 |
+
msg = r"foo1\(\) got an unexpected keyword argument 'b'"
|
| 1566 |
+
with pytest.raises(TypeError, match=msg):
|
| 1567 |
+
gb.agg([foo1, foo2], 3, b=3, c=4)
|
| 1568 |
+
|
| 1569 |
+
result = gb.agg([foo1, foo2], 3, c=4)
|
| 1570 |
+
expected = DataFrame(
|
| 1571 |
+
[[8, 8], [9, 9], [10, 10]],
|
| 1572 |
+
index=Index(["a", "b", "c"], name="y"),
|
| 1573 |
+
columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),
|
| 1574 |
+
)
|
| 1575 |
+
tm.assert_frame_equal(result, expected)
|
| 1576 |
+
|
| 1577 |
+
|
| 1578 |
+
def test_series_groupy_agg_list_like_func_with_args():
|
| 1579 |
+
# GH#50624
|
| 1580 |
+
s = Series([1, 2, 3])
|
| 1581 |
+
sgb = s.groupby(s)
|
| 1582 |
+
|
| 1583 |
+
def foo1(x, a=1, c=0):
|
| 1584 |
+
return x.sum() + a + c
|
| 1585 |
+
|
| 1586 |
+
def foo2(x, b=2, c=0):
|
| 1587 |
+
return x.sum() + b + c
|
| 1588 |
+
|
| 1589 |
+
msg = r"foo1\(\) got an unexpected keyword argument 'b'"
|
| 1590 |
+
with pytest.raises(TypeError, match=msg):
|
| 1591 |
+
sgb.agg([foo1, foo2], 3, b=3, c=4)
|
| 1592 |
+
|
| 1593 |
+
result = sgb.agg([foo1, foo2], 3, c=4)
|
| 1594 |
+
expected = DataFrame(
|
| 1595 |
+
[[8, 8], [9, 9], [10, 10]], index=Index([1, 2, 3]), columns=["foo1", "foo2"]
|
| 1596 |
+
)
|
| 1597 |
+
tm.assert_frame_equal(result, expected)
|
| 1598 |
+
|
| 1599 |
+
|
| 1600 |
+
def test_agg_groupings_selection():
|
| 1601 |
+
# GH#51186 - a selected grouping should be in the output of agg
|
| 1602 |
+
df = DataFrame({"a": [1, 1, 2], "b": [3, 3, 4], "c": [5, 6, 7]})
|
| 1603 |
+
gb = df.groupby(["a", "b"])
|
| 1604 |
+
selected_gb = gb[["b", "c"]]
|
| 1605 |
+
result = selected_gb.agg(lambda x: x.sum())
|
| 1606 |
+
index = MultiIndex(
|
| 1607 |
+
levels=[[1, 2], [3, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"]
|
| 1608 |
+
)
|
| 1609 |
+
expected = DataFrame({"b": [6, 4], "c": [11, 7]}, index=index)
|
| 1610 |
+
tm.assert_frame_equal(result, expected)
|
| 1611 |
+
|
| 1612 |
+
|
| 1613 |
+
def test_agg_multiple_with_as_index_false_subset_to_a_single_column():
|
| 1614 |
+
# GH#50724
|
| 1615 |
+
df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})
|
| 1616 |
+
gb = df.groupby("a", as_index=False)["b"]
|
| 1617 |
+
result = gb.agg(["sum", "mean"])
|
| 1618 |
+
expected = DataFrame({"a": [1, 2], "sum": [7, 5], "mean": [3.5, 5.0]})
|
| 1619 |
+
tm.assert_frame_equal(result, expected)
|
| 1620 |
+
|
| 1621 |
+
|
| 1622 |
+
def test_agg_with_as_index_false_with_list():
|
| 1623 |
+
# GH#52849
|
| 1624 |
+
df = DataFrame({"a1": [0, 0, 1], "a2": [2, 3, 3], "b": [4, 5, 6]})
|
| 1625 |
+
gb = df.groupby(by=["a1", "a2"], as_index=False)
|
| 1626 |
+
result = gb.agg(["sum"])
|
| 1627 |
+
|
| 1628 |
+
expected = DataFrame(
|
| 1629 |
+
data=[[0, 2, 4], [0, 3, 5], [1, 3, 6]],
|
| 1630 |
+
columns=MultiIndex.from_tuples([("a1", ""), ("a2", ""), ("b", "sum")]),
|
| 1631 |
+
)
|
| 1632 |
+
tm.assert_frame_equal(result, expected)
|
| 1633 |
+
|
| 1634 |
+
|
| 1635 |
+
def test_groupby_agg_extension_timedelta_cumsum_with_named_aggregation():
|
| 1636 |
+
# GH#41720
|
| 1637 |
+
expected = DataFrame(
|
| 1638 |
+
{
|
| 1639 |
+
"td": {
|
| 1640 |
+
0: pd.Timedelta("0 days 01:00:00"),
|
| 1641 |
+
1: pd.Timedelta("0 days 01:15:00"),
|
| 1642 |
+
2: pd.Timedelta("0 days 01:15:00"),
|
| 1643 |
+
}
|
| 1644 |
+
}
|
| 1645 |
+
)
|
| 1646 |
+
df = DataFrame(
|
| 1647 |
+
{
|
| 1648 |
+
"td": Series(
|
| 1649 |
+
["0 days 01:00:00", "0 days 00:15:00", "0 days 01:15:00"],
|
| 1650 |
+
dtype="timedelta64[ns]",
|
| 1651 |
+
),
|
| 1652 |
+
"grps": ["a", "a", "b"],
|
| 1653 |
+
}
|
| 1654 |
+
)
|
| 1655 |
+
gb = df.groupby("grps")
|
| 1656 |
+
result = gb.agg(td=("td", "cumsum"))
|
| 1657 |
+
tm.assert_frame_equal(result, expected)
|
| 1658 |
+
|
| 1659 |
+
|
| 1660 |
+
def test_groupby_aggregation_empty_group():
|
| 1661 |
+
# https://github.com/pandas-dev/pandas/issues/18869
|
| 1662 |
+
def func(x):
|
| 1663 |
+
if len(x) == 0:
|
| 1664 |
+
raise ValueError("length must not be 0")
|
| 1665 |
+
return len(x)
|
| 1666 |
+
|
| 1667 |
+
df = DataFrame(
|
| 1668 |
+
{"A": pd.Categorical(["a", "a"], categories=["a", "b", "c"]), "B": [1, 1]}
|
| 1669 |
+
)
|
| 1670 |
+
msg = "length must not be 0"
|
| 1671 |
+
with pytest.raises(ValueError, match=msg):
|
| 1672 |
+
df.groupby("A", observed=False).agg(func)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_cython.py
ADDED
|
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
test cython .agg behavior
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
from pandas.core.dtypes.common import (
|
| 9 |
+
is_float_dtype,
|
| 10 |
+
is_integer_dtype,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
import pandas as pd
|
| 14 |
+
from pandas import (
|
| 15 |
+
DataFrame,
|
| 16 |
+
Index,
|
| 17 |
+
NaT,
|
| 18 |
+
Series,
|
| 19 |
+
Timedelta,
|
| 20 |
+
Timestamp,
|
| 21 |
+
bdate_range,
|
| 22 |
+
)
|
| 23 |
+
import pandas._testing as tm
|
| 24 |
+
import pandas.core.common as com
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@pytest.mark.parametrize(
|
| 28 |
+
"op_name",
|
| 29 |
+
[
|
| 30 |
+
"count",
|
| 31 |
+
"sum",
|
| 32 |
+
"std",
|
| 33 |
+
"var",
|
| 34 |
+
"sem",
|
| 35 |
+
"mean",
|
| 36 |
+
pytest.param(
|
| 37 |
+
"median",
|
| 38 |
+
# ignore mean of empty slice
|
| 39 |
+
# and all-NaN
|
| 40 |
+
marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")],
|
| 41 |
+
),
|
| 42 |
+
"prod",
|
| 43 |
+
"min",
|
| 44 |
+
"max",
|
| 45 |
+
],
|
| 46 |
+
)
|
| 47 |
+
def test_cythonized_aggers(op_name):
|
| 48 |
+
data = {
|
| 49 |
+
"A": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1.0, np.nan, np.nan],
|
| 50 |
+
"B": ["A", "B"] * 6,
|
| 51 |
+
"C": np.random.default_rng(2).standard_normal(12),
|
| 52 |
+
}
|
| 53 |
+
df = DataFrame(data)
|
| 54 |
+
df.loc[2:10:2, "C"] = np.nan
|
| 55 |
+
|
| 56 |
+
op = lambda x: getattr(x, op_name)()
|
| 57 |
+
|
| 58 |
+
# single column
|
| 59 |
+
grouped = df.drop(["B"], axis=1).groupby("A")
|
| 60 |
+
exp = {cat: op(group["C"]) for cat, group in grouped}
|
| 61 |
+
exp = DataFrame({"C": exp})
|
| 62 |
+
exp.index.name = "A"
|
| 63 |
+
result = op(grouped)
|
| 64 |
+
tm.assert_frame_equal(result, exp)
|
| 65 |
+
|
| 66 |
+
# multiple columns
|
| 67 |
+
grouped = df.groupby(["A", "B"])
|
| 68 |
+
expd = {}
|
| 69 |
+
for (cat1, cat2), group in grouped:
|
| 70 |
+
expd.setdefault(cat1, {})[cat2] = op(group["C"])
|
| 71 |
+
exp = DataFrame(expd).T.stack(future_stack=True)
|
| 72 |
+
exp.index.names = ["A", "B"]
|
| 73 |
+
exp.name = "C"
|
| 74 |
+
|
| 75 |
+
result = op(grouped)["C"]
|
| 76 |
+
if op_name in ["sum", "prod"]:
|
| 77 |
+
tm.assert_series_equal(result, exp)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def test_cython_agg_boolean():
|
| 81 |
+
frame = DataFrame(
|
| 82 |
+
{
|
| 83 |
+
"a": np.random.default_rng(2).integers(0, 5, 50),
|
| 84 |
+
"b": np.random.default_rng(2).integers(0, 2, 50).astype("bool"),
|
| 85 |
+
}
|
| 86 |
+
)
|
| 87 |
+
result = frame.groupby("a")["b"].mean()
|
| 88 |
+
msg = "using SeriesGroupBy.mean"
|
| 89 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 90 |
+
# GH#53425
|
| 91 |
+
expected = frame.groupby("a")["b"].agg(np.mean)
|
| 92 |
+
|
| 93 |
+
tm.assert_series_equal(result, expected)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def test_cython_agg_nothing_to_agg():
|
| 97 |
+
frame = DataFrame(
|
| 98 |
+
{"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25}
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
|
| 102 |
+
with pytest.raises(TypeError, match=msg):
|
| 103 |
+
frame.groupby("a")["b"].mean(numeric_only=True)
|
| 104 |
+
|
| 105 |
+
frame = DataFrame(
|
| 106 |
+
{"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25}
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
result = frame[["b"]].groupby(frame["a"]).mean(numeric_only=True)
|
| 110 |
+
expected = DataFrame(
|
| 111 |
+
[], index=frame["a"].sort_values().drop_duplicates(), columns=[]
|
| 112 |
+
)
|
| 113 |
+
tm.assert_frame_equal(result, expected)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def test_cython_agg_nothing_to_agg_with_dates():
|
| 117 |
+
frame = DataFrame(
|
| 118 |
+
{
|
| 119 |
+
"a": np.random.default_rng(2).integers(0, 5, 50),
|
| 120 |
+
"b": ["foo", "bar"] * 25,
|
| 121 |
+
"dates": pd.date_range("now", periods=50, freq="min"),
|
| 122 |
+
}
|
| 123 |
+
)
|
| 124 |
+
msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
|
| 125 |
+
with pytest.raises(TypeError, match=msg):
|
| 126 |
+
frame.groupby("b").dates.mean(numeric_only=True)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def test_cython_agg_frame_columns():
|
| 130 |
+
# #2113
|
| 131 |
+
df = DataFrame({"x": [1, 2, 3], "y": [3, 4, 5]})
|
| 132 |
+
|
| 133 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 134 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 135 |
+
df.groupby(level=0, axis="columns").mean()
|
| 136 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 137 |
+
df.groupby(level=0, axis="columns").mean()
|
| 138 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 139 |
+
df.groupby(level=0, axis="columns").mean()
|
| 140 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 141 |
+
df.groupby(level=0, axis="columns").mean()
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def test_cython_agg_return_dict():
|
| 145 |
+
# GH 16741
|
| 146 |
+
df = DataFrame(
|
| 147 |
+
{
|
| 148 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
|
| 149 |
+
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
|
| 150 |
+
"C": np.random.default_rng(2).standard_normal(8),
|
| 151 |
+
"D": np.random.default_rng(2).standard_normal(8),
|
| 152 |
+
}
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
ts = df.groupby("A")["B"].agg(lambda x: x.value_counts().to_dict())
|
| 156 |
+
expected = Series(
|
| 157 |
+
[{"two": 1, "one": 1, "three": 1}, {"two": 2, "one": 2, "three": 1}],
|
| 158 |
+
index=Index(["bar", "foo"], name="A"),
|
| 159 |
+
name="B",
|
| 160 |
+
)
|
| 161 |
+
tm.assert_series_equal(ts, expected)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def test_cython_fail_agg():
|
| 165 |
+
dr = bdate_range("1/1/2000", periods=50)
|
| 166 |
+
ts = Series(["A", "B", "C", "D", "E"] * 10, index=dr)
|
| 167 |
+
|
| 168 |
+
grouped = ts.groupby(lambda x: x.month)
|
| 169 |
+
summed = grouped.sum()
|
| 170 |
+
msg = "using SeriesGroupBy.sum"
|
| 171 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 172 |
+
# GH#53425
|
| 173 |
+
expected = grouped.agg(np.sum)
|
| 174 |
+
tm.assert_series_equal(summed, expected)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@pytest.mark.parametrize(
|
| 178 |
+
"op, targop",
|
| 179 |
+
[
|
| 180 |
+
("mean", np.mean),
|
| 181 |
+
("median", np.median),
|
| 182 |
+
("var", np.var),
|
| 183 |
+
("sum", np.sum),
|
| 184 |
+
("prod", np.prod),
|
| 185 |
+
("min", np.min),
|
| 186 |
+
("max", np.max),
|
| 187 |
+
("first", lambda x: x.iloc[0]),
|
| 188 |
+
("last", lambda x: x.iloc[-1]),
|
| 189 |
+
],
|
| 190 |
+
)
|
| 191 |
+
def test__cython_agg_general(op, targop):
|
| 192 |
+
df = DataFrame(np.random.default_rng(2).standard_normal(1000))
|
| 193 |
+
labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float)
|
| 194 |
+
|
| 195 |
+
result = df.groupby(labels)._cython_agg_general(op, alt=None, numeric_only=True)
|
| 196 |
+
warn = FutureWarning if targop in com._cython_table else None
|
| 197 |
+
msg = f"using DataFrameGroupBy.{op}"
|
| 198 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 199 |
+
# GH#53425
|
| 200 |
+
expected = df.groupby(labels).agg(targop)
|
| 201 |
+
tm.assert_frame_equal(result, expected)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@pytest.mark.parametrize(
|
| 205 |
+
"op, targop",
|
| 206 |
+
[
|
| 207 |
+
("mean", np.mean),
|
| 208 |
+
("median", lambda x: np.median(x) if len(x) > 0 else np.nan),
|
| 209 |
+
("var", lambda x: np.var(x, ddof=1)),
|
| 210 |
+
("min", np.min),
|
| 211 |
+
("max", np.max),
|
| 212 |
+
],
|
| 213 |
+
)
|
| 214 |
+
def test_cython_agg_empty_buckets(op, targop, observed):
|
| 215 |
+
df = DataFrame([11, 12, 13])
|
| 216 |
+
grps = range(0, 55, 5)
|
| 217 |
+
|
| 218 |
+
# calling _cython_agg_general directly, instead of via the user API
|
| 219 |
+
# which sets different values for min_count, so do that here.
|
| 220 |
+
g = df.groupby(pd.cut(df[0], grps), observed=observed)
|
| 221 |
+
result = g._cython_agg_general(op, alt=None, numeric_only=True)
|
| 222 |
+
|
| 223 |
+
g = df.groupby(pd.cut(df[0], grps), observed=observed)
|
| 224 |
+
expected = g.agg(lambda x: targop(x))
|
| 225 |
+
tm.assert_frame_equal(result, expected)
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def test_cython_agg_empty_buckets_nanops(observed):
|
| 229 |
+
# GH-18869 can't call nanops on empty groups, so hardcode expected
|
| 230 |
+
# for these
|
| 231 |
+
df = DataFrame([11, 12, 13], columns=["a"])
|
| 232 |
+
grps = np.arange(0, 25, 5, dtype=int)
|
| 233 |
+
# add / sum
|
| 234 |
+
result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
|
| 235 |
+
"sum", alt=None, numeric_only=True
|
| 236 |
+
)
|
| 237 |
+
intervals = pd.interval_range(0, 20, freq=5)
|
| 238 |
+
expected = DataFrame(
|
| 239 |
+
{"a": [0, 0, 36, 0]},
|
| 240 |
+
index=pd.CategoricalIndex(intervals, name="a", ordered=True),
|
| 241 |
+
)
|
| 242 |
+
if observed:
|
| 243 |
+
expected = expected[expected.a != 0]
|
| 244 |
+
|
| 245 |
+
tm.assert_frame_equal(result, expected)
|
| 246 |
+
|
| 247 |
+
# prod
|
| 248 |
+
result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
|
| 249 |
+
"prod", alt=None, numeric_only=True
|
| 250 |
+
)
|
| 251 |
+
expected = DataFrame(
|
| 252 |
+
{"a": [1, 1, 1716, 1]},
|
| 253 |
+
index=pd.CategoricalIndex(intervals, name="a", ordered=True),
|
| 254 |
+
)
|
| 255 |
+
if observed:
|
| 256 |
+
expected = expected[expected.a != 1]
|
| 257 |
+
|
| 258 |
+
tm.assert_frame_equal(result, expected)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
@pytest.mark.parametrize("op", ["first", "last", "max", "min"])
|
| 262 |
+
@pytest.mark.parametrize(
|
| 263 |
+
"data", [Timestamp("2016-10-14 21:00:44.557"), Timedelta("17088 days 21:00:44.557")]
|
| 264 |
+
)
|
| 265 |
+
def test_cython_with_timestamp_and_nat(op, data):
|
| 266 |
+
# https://github.com/pandas-dev/pandas/issues/19526
|
| 267 |
+
df = DataFrame({"a": [0, 1], "b": [data, NaT]})
|
| 268 |
+
index = Index([0, 1], name="a")
|
| 269 |
+
|
| 270 |
+
# We will group by a and test the cython aggregations
|
| 271 |
+
expected = DataFrame({"b": [data, NaT]}, index=index)
|
| 272 |
+
|
| 273 |
+
result = df.groupby("a").aggregate(op)
|
| 274 |
+
tm.assert_frame_equal(expected, result)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
@pytest.mark.parametrize(
|
| 278 |
+
"agg",
|
| 279 |
+
[
|
| 280 |
+
"min",
|
| 281 |
+
"max",
|
| 282 |
+
"count",
|
| 283 |
+
"sum",
|
| 284 |
+
"prod",
|
| 285 |
+
"var",
|
| 286 |
+
"mean",
|
| 287 |
+
"median",
|
| 288 |
+
"ohlc",
|
| 289 |
+
"cumprod",
|
| 290 |
+
"cumsum",
|
| 291 |
+
"shift",
|
| 292 |
+
"any",
|
| 293 |
+
"all",
|
| 294 |
+
"quantile",
|
| 295 |
+
"first",
|
| 296 |
+
"last",
|
| 297 |
+
"rank",
|
| 298 |
+
"cummin",
|
| 299 |
+
"cummax",
|
| 300 |
+
],
|
| 301 |
+
)
|
| 302 |
+
def test_read_only_buffer_source_agg(agg):
|
| 303 |
+
# https://github.com/pandas-dev/pandas/issues/36014
|
| 304 |
+
df = DataFrame(
|
| 305 |
+
{
|
| 306 |
+
"sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0],
|
| 307 |
+
"species": ["setosa", "setosa", "setosa", "setosa", "setosa"],
|
| 308 |
+
}
|
| 309 |
+
)
|
| 310 |
+
df._mgr.arrays[0].flags.writeable = False
|
| 311 |
+
|
| 312 |
+
result = df.groupby(["species"]).agg({"sepal_length": agg})
|
| 313 |
+
expected = df.copy().groupby(["species"]).agg({"sepal_length": agg})
|
| 314 |
+
|
| 315 |
+
tm.assert_equal(result, expected)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
@pytest.mark.parametrize(
|
| 319 |
+
"op_name",
|
| 320 |
+
[
|
| 321 |
+
"count",
|
| 322 |
+
"sum",
|
| 323 |
+
"std",
|
| 324 |
+
"var",
|
| 325 |
+
"sem",
|
| 326 |
+
"mean",
|
| 327 |
+
"median",
|
| 328 |
+
"prod",
|
| 329 |
+
"min",
|
| 330 |
+
"max",
|
| 331 |
+
],
|
| 332 |
+
)
|
| 333 |
+
def test_cython_agg_nullable_int(op_name):
|
| 334 |
+
# ensure that the cython-based aggregations don't fail for nullable dtype
|
| 335 |
+
# (eg https://github.com/pandas-dev/pandas/issues/37415)
|
| 336 |
+
df = DataFrame(
|
| 337 |
+
{
|
| 338 |
+
"A": ["A", "B"] * 5,
|
| 339 |
+
"B": pd.array([1, 2, 3, 4, 5, 6, 7, 8, 9, pd.NA], dtype="Int64"),
|
| 340 |
+
}
|
| 341 |
+
)
|
| 342 |
+
result = getattr(df.groupby("A")["B"], op_name)()
|
| 343 |
+
df2 = df.assign(B=df["B"].astype("float64"))
|
| 344 |
+
expected = getattr(df2.groupby("A")["B"], op_name)()
|
| 345 |
+
if op_name in ("mean", "median"):
|
| 346 |
+
convert_integer = False
|
| 347 |
+
else:
|
| 348 |
+
convert_integer = True
|
| 349 |
+
expected = expected.convert_dtypes(convert_integer=convert_integer)
|
| 350 |
+
tm.assert_series_equal(result, expected)
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])
|
| 354 |
+
def test_count_masked_returns_masked_dtype(dtype):
|
| 355 |
+
df = DataFrame(
|
| 356 |
+
{
|
| 357 |
+
"A": [1, 1],
|
| 358 |
+
"B": pd.array([1, pd.NA], dtype=dtype),
|
| 359 |
+
"C": pd.array([1, 1], dtype=dtype),
|
| 360 |
+
}
|
| 361 |
+
)
|
| 362 |
+
result = df.groupby("A").count()
|
| 363 |
+
expected = DataFrame(
|
| 364 |
+
[[1, 2]], index=Index([1], name="A"), columns=["B", "C"], dtype="Int64"
|
| 365 |
+
)
|
| 366 |
+
tm.assert_frame_equal(result, expected)
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
@pytest.mark.parametrize("with_na", [True, False])
|
| 370 |
+
@pytest.mark.parametrize(
|
| 371 |
+
"op_name, action",
|
| 372 |
+
[
|
| 373 |
+
# ("count", "always_int"),
|
| 374 |
+
("sum", "large_int"),
|
| 375 |
+
# ("std", "always_float"),
|
| 376 |
+
("var", "always_float"),
|
| 377 |
+
# ("sem", "always_float"),
|
| 378 |
+
("mean", "always_float"),
|
| 379 |
+
("median", "always_float"),
|
| 380 |
+
("prod", "large_int"),
|
| 381 |
+
("min", "preserve"),
|
| 382 |
+
("max", "preserve"),
|
| 383 |
+
("first", "preserve"),
|
| 384 |
+
("last", "preserve"),
|
| 385 |
+
],
|
| 386 |
+
)
|
| 387 |
+
@pytest.mark.parametrize(
|
| 388 |
+
"data",
|
| 389 |
+
[
|
| 390 |
+
pd.array([1, 2, 3, 4], dtype="Int64"),
|
| 391 |
+
pd.array([1, 2, 3, 4], dtype="Int8"),
|
| 392 |
+
pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float32"),
|
| 393 |
+
pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64"),
|
| 394 |
+
pd.array([True, True, False, False], dtype="boolean"),
|
| 395 |
+
],
|
| 396 |
+
)
|
| 397 |
+
def test_cython_agg_EA_known_dtypes(data, op_name, action, with_na):
|
| 398 |
+
if with_na:
|
| 399 |
+
data[3] = pd.NA
|
| 400 |
+
|
| 401 |
+
df = DataFrame({"key": ["a", "a", "b", "b"], "col": data})
|
| 402 |
+
grouped = df.groupby("key")
|
| 403 |
+
|
| 404 |
+
if action == "always_int":
|
| 405 |
+
# always Int64
|
| 406 |
+
expected_dtype = pd.Int64Dtype()
|
| 407 |
+
elif action == "large_int":
|
| 408 |
+
# for any int/bool use Int64, for float preserve dtype
|
| 409 |
+
if is_float_dtype(data.dtype):
|
| 410 |
+
expected_dtype = data.dtype
|
| 411 |
+
elif is_integer_dtype(data.dtype):
|
| 412 |
+
# match the numpy dtype we'd get with the non-nullable analogue
|
| 413 |
+
expected_dtype = data.dtype
|
| 414 |
+
else:
|
| 415 |
+
expected_dtype = pd.Int64Dtype()
|
| 416 |
+
elif action == "always_float":
|
| 417 |
+
# for any int/bool use Float64, for float preserve dtype
|
| 418 |
+
if is_float_dtype(data.dtype):
|
| 419 |
+
expected_dtype = data.dtype
|
| 420 |
+
else:
|
| 421 |
+
expected_dtype = pd.Float64Dtype()
|
| 422 |
+
elif action == "preserve":
|
| 423 |
+
expected_dtype = data.dtype
|
| 424 |
+
|
| 425 |
+
result = getattr(grouped, op_name)()
|
| 426 |
+
assert result["col"].dtype == expected_dtype
|
| 427 |
+
|
| 428 |
+
result = grouped.aggregate(op_name)
|
| 429 |
+
assert result["col"].dtype == expected_dtype
|
| 430 |
+
|
| 431 |
+
result = getattr(grouped["col"], op_name)()
|
| 432 |
+
assert result.dtype == expected_dtype
|
| 433 |
+
|
| 434 |
+
result = grouped["col"].aggregate(op_name)
|
| 435 |
+
assert result.dtype == expected_dtype
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_numba.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas.errors import NumbaUtilError
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
DataFrame,
|
| 8 |
+
Index,
|
| 9 |
+
NamedAgg,
|
| 10 |
+
Series,
|
| 11 |
+
option_context,
|
| 12 |
+
)
|
| 13 |
+
import pandas._testing as tm
|
| 14 |
+
|
| 15 |
+
pytestmark = pytest.mark.single_cpu
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def test_correct_function_signature():
|
| 19 |
+
pytest.importorskip("numba")
|
| 20 |
+
|
| 21 |
+
def incorrect_function(x):
|
| 22 |
+
return sum(x) * 2.7
|
| 23 |
+
|
| 24 |
+
data = DataFrame(
|
| 25 |
+
{"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
|
| 26 |
+
columns=["key", "data"],
|
| 27 |
+
)
|
| 28 |
+
with pytest.raises(NumbaUtilError, match="The first 2"):
|
| 29 |
+
data.groupby("key").agg(incorrect_function, engine="numba")
|
| 30 |
+
|
| 31 |
+
with pytest.raises(NumbaUtilError, match="The first 2"):
|
| 32 |
+
data.groupby("key")["data"].agg(incorrect_function, engine="numba")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def test_check_nopython_kwargs():
|
| 36 |
+
pytest.importorskip("numba")
|
| 37 |
+
|
| 38 |
+
def incorrect_function(values, index):
|
| 39 |
+
return sum(values) * 2.7
|
| 40 |
+
|
| 41 |
+
data = DataFrame(
|
| 42 |
+
{"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
|
| 43 |
+
columns=["key", "data"],
|
| 44 |
+
)
|
| 45 |
+
with pytest.raises(NumbaUtilError, match="numba does not support"):
|
| 46 |
+
data.groupby("key").agg(incorrect_function, engine="numba", a=1)
|
| 47 |
+
|
| 48 |
+
with pytest.raises(NumbaUtilError, match="numba does not support"):
|
| 49 |
+
data.groupby("key")["data"].agg(incorrect_function, engine="numba", a=1)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@pytest.mark.filterwarnings("ignore")
|
| 53 |
+
# Filter warnings when parallel=True and the function can't be parallelized by Numba
|
| 54 |
+
@pytest.mark.parametrize("jit", [True, False])
|
| 55 |
+
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
|
| 56 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 57 |
+
def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
|
| 58 |
+
pytest.importorskip("numba")
|
| 59 |
+
|
| 60 |
+
def func_numba(values, index):
|
| 61 |
+
return np.mean(values) * 2.7
|
| 62 |
+
|
| 63 |
+
if jit:
|
| 64 |
+
# Test accepted jitted functions
|
| 65 |
+
import numba
|
| 66 |
+
|
| 67 |
+
func_numba = numba.jit(func_numba)
|
| 68 |
+
|
| 69 |
+
data = DataFrame(
|
| 70 |
+
{0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
|
| 71 |
+
)
|
| 72 |
+
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
|
| 73 |
+
grouped = data.groupby(0, as_index=as_index)
|
| 74 |
+
if pandas_obj == "Series":
|
| 75 |
+
grouped = grouped[1]
|
| 76 |
+
|
| 77 |
+
result = grouped.agg(func_numba, engine="numba", engine_kwargs=engine_kwargs)
|
| 78 |
+
expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")
|
| 79 |
+
|
| 80 |
+
tm.assert_equal(result, expected)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@pytest.mark.filterwarnings("ignore")
|
| 84 |
+
# Filter warnings when parallel=True and the function can't be parallelized by Numba
|
| 85 |
+
@pytest.mark.parametrize("jit", [True, False])
|
| 86 |
+
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
|
| 87 |
+
def test_cache(jit, pandas_obj, nogil, parallel, nopython):
|
| 88 |
+
# Test that the functions are cached correctly if we switch functions
|
| 89 |
+
pytest.importorskip("numba")
|
| 90 |
+
|
| 91 |
+
def func_1(values, index):
|
| 92 |
+
return np.mean(values) - 3.4
|
| 93 |
+
|
| 94 |
+
def func_2(values, index):
|
| 95 |
+
return np.mean(values) * 2.7
|
| 96 |
+
|
| 97 |
+
if jit:
|
| 98 |
+
import numba
|
| 99 |
+
|
| 100 |
+
func_1 = numba.jit(func_1)
|
| 101 |
+
func_2 = numba.jit(func_2)
|
| 102 |
+
|
| 103 |
+
data = DataFrame(
|
| 104 |
+
{0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
|
| 105 |
+
)
|
| 106 |
+
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
|
| 107 |
+
grouped = data.groupby(0)
|
| 108 |
+
if pandas_obj == "Series":
|
| 109 |
+
grouped = grouped[1]
|
| 110 |
+
|
| 111 |
+
result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)
|
| 112 |
+
expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")
|
| 113 |
+
tm.assert_equal(result, expected)
|
| 114 |
+
|
| 115 |
+
# Add func_2 to the cache
|
| 116 |
+
result = grouped.agg(func_2, engine="numba", engine_kwargs=engine_kwargs)
|
| 117 |
+
expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")
|
| 118 |
+
tm.assert_equal(result, expected)
|
| 119 |
+
|
| 120 |
+
# Retest func_1 which should use the cache
|
| 121 |
+
result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)
|
| 122 |
+
expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")
|
| 123 |
+
tm.assert_equal(result, expected)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def test_use_global_config():
|
| 127 |
+
pytest.importorskip("numba")
|
| 128 |
+
|
| 129 |
+
def func_1(values, index):
|
| 130 |
+
return np.mean(values) - 3.4
|
| 131 |
+
|
| 132 |
+
data = DataFrame(
|
| 133 |
+
{0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
|
| 134 |
+
)
|
| 135 |
+
grouped = data.groupby(0)
|
| 136 |
+
expected = grouped.agg(func_1, engine="numba")
|
| 137 |
+
with option_context("compute.use_numba", True):
|
| 138 |
+
result = grouped.agg(func_1, engine=None)
|
| 139 |
+
tm.assert_frame_equal(expected, result)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@pytest.mark.parametrize(
|
| 143 |
+
"agg_kwargs",
|
| 144 |
+
[
|
| 145 |
+
{"func": ["min", "max"]},
|
| 146 |
+
{"func": "min"},
|
| 147 |
+
{"func": {1: ["min", "max"], 2: "sum"}},
|
| 148 |
+
{"bmin": NamedAgg(column=1, aggfunc="min")},
|
| 149 |
+
],
|
| 150 |
+
)
|
| 151 |
+
def test_multifunc_numba_vs_cython_frame(agg_kwargs):
|
| 152 |
+
pytest.importorskip("numba")
|
| 153 |
+
data = DataFrame(
|
| 154 |
+
{
|
| 155 |
+
0: ["a", "a", "b", "b", "a"],
|
| 156 |
+
1: [1.0, 2.0, 3.0, 4.0, 5.0],
|
| 157 |
+
2: [1, 2, 3, 4, 5],
|
| 158 |
+
},
|
| 159 |
+
columns=[0, 1, 2],
|
| 160 |
+
)
|
| 161 |
+
grouped = data.groupby(0)
|
| 162 |
+
result = grouped.agg(**agg_kwargs, engine="numba")
|
| 163 |
+
expected = grouped.agg(**agg_kwargs, engine="cython")
|
| 164 |
+
tm.assert_frame_equal(result, expected)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
@pytest.mark.parametrize(
|
| 168 |
+
"agg_kwargs,expected_func",
|
| 169 |
+
[
|
| 170 |
+
({"func": lambda values, index: values.sum()}, "sum"),
|
| 171 |
+
# FIXME
|
| 172 |
+
pytest.param(
|
| 173 |
+
{
|
| 174 |
+
"func": [
|
| 175 |
+
lambda values, index: values.sum(),
|
| 176 |
+
lambda values, index: values.min(),
|
| 177 |
+
]
|
| 178 |
+
},
|
| 179 |
+
["sum", "min"],
|
| 180 |
+
marks=pytest.mark.xfail(
|
| 181 |
+
reason="This doesn't work yet! Fails in nopython pipeline!"
|
| 182 |
+
),
|
| 183 |
+
),
|
| 184 |
+
],
|
| 185 |
+
)
|
| 186 |
+
def test_multifunc_numba_udf_frame(agg_kwargs, expected_func):
|
| 187 |
+
pytest.importorskip("numba")
|
| 188 |
+
data = DataFrame(
|
| 189 |
+
{
|
| 190 |
+
0: ["a", "a", "b", "b", "a"],
|
| 191 |
+
1: [1.0, 2.0, 3.0, 4.0, 5.0],
|
| 192 |
+
2: [1, 2, 3, 4, 5],
|
| 193 |
+
},
|
| 194 |
+
columns=[0, 1, 2],
|
| 195 |
+
)
|
| 196 |
+
grouped = data.groupby(0)
|
| 197 |
+
result = grouped.agg(**agg_kwargs, engine="numba")
|
| 198 |
+
expected = grouped.agg(expected_func, engine="cython")
|
| 199 |
+
# check_dtype can be removed if GH 44952 is addressed
|
| 200 |
+
# Currently, UDFs still always return float64 while reductions can preserve dtype
|
| 201 |
+
tm.assert_frame_equal(result, expected, check_dtype=False)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@pytest.mark.parametrize(
|
| 205 |
+
"agg_kwargs",
|
| 206 |
+
[{"func": ["min", "max"]}, {"func": "min"}, {"min_val": "min", "max_val": "max"}],
|
| 207 |
+
)
|
| 208 |
+
def test_multifunc_numba_vs_cython_series(agg_kwargs):
|
| 209 |
+
pytest.importorskip("numba")
|
| 210 |
+
labels = ["a", "a", "b", "b", "a"]
|
| 211 |
+
data = Series([1.0, 2.0, 3.0, 4.0, 5.0])
|
| 212 |
+
grouped = data.groupby(labels)
|
| 213 |
+
agg_kwargs["engine"] = "numba"
|
| 214 |
+
result = grouped.agg(**agg_kwargs)
|
| 215 |
+
agg_kwargs["engine"] = "cython"
|
| 216 |
+
expected = grouped.agg(**agg_kwargs)
|
| 217 |
+
if isinstance(expected, DataFrame):
|
| 218 |
+
tm.assert_frame_equal(result, expected)
|
| 219 |
+
else:
|
| 220 |
+
tm.assert_series_equal(result, expected)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
@pytest.mark.single_cpu
|
| 224 |
+
@pytest.mark.parametrize(
|
| 225 |
+
"data,agg_kwargs",
|
| 226 |
+
[
|
| 227 |
+
(Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": ["min", "max"]}),
|
| 228 |
+
(Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": "min"}),
|
| 229 |
+
(
|
| 230 |
+
DataFrame(
|
| 231 |
+
{1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
|
| 232 |
+
),
|
| 233 |
+
{"func": ["min", "max"]},
|
| 234 |
+
),
|
| 235 |
+
(
|
| 236 |
+
DataFrame(
|
| 237 |
+
{1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
|
| 238 |
+
),
|
| 239 |
+
{"func": "min"},
|
| 240 |
+
),
|
| 241 |
+
(
|
| 242 |
+
DataFrame(
|
| 243 |
+
{1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
|
| 244 |
+
),
|
| 245 |
+
{"func": {1: ["min", "max"], 2: "sum"}},
|
| 246 |
+
),
|
| 247 |
+
(
|
| 248 |
+
DataFrame(
|
| 249 |
+
{1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
|
| 250 |
+
),
|
| 251 |
+
{"min_col": NamedAgg(column=1, aggfunc="min")},
|
| 252 |
+
),
|
| 253 |
+
],
|
| 254 |
+
)
|
| 255 |
+
def test_multifunc_numba_kwarg_propagation(data, agg_kwargs):
|
| 256 |
+
pytest.importorskip("numba")
|
| 257 |
+
labels = ["a", "a", "b", "b", "a"]
|
| 258 |
+
grouped = data.groupby(labels)
|
| 259 |
+
result = grouped.agg(**agg_kwargs, engine="numba", engine_kwargs={"parallel": True})
|
| 260 |
+
expected = grouped.agg(**agg_kwargs, engine="numba")
|
| 261 |
+
if isinstance(expected, DataFrame):
|
| 262 |
+
tm.assert_frame_equal(result, expected)
|
| 263 |
+
else:
|
| 264 |
+
tm.assert_series_equal(result, expected)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def test_args_not_cached():
|
| 268 |
+
# GH 41647
|
| 269 |
+
pytest.importorskip("numba")
|
| 270 |
+
|
| 271 |
+
def sum_last(values, index, n):
|
| 272 |
+
return values[-n:].sum()
|
| 273 |
+
|
| 274 |
+
df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]})
|
| 275 |
+
grouped_x = df.groupby("id")["x"]
|
| 276 |
+
result = grouped_x.agg(sum_last, 1, engine="numba")
|
| 277 |
+
expected = Series([1.0] * 2, name="x", index=Index([0, 1], name="id"))
|
| 278 |
+
tm.assert_series_equal(result, expected)
|
| 279 |
+
|
| 280 |
+
result = grouped_x.agg(sum_last, 2, engine="numba")
|
| 281 |
+
expected = Series([2.0] * 2, name="x", index=Index([0, 1], name="id"))
|
| 282 |
+
tm.assert_series_equal(result, expected)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def test_index_data_correctly_passed():
|
| 286 |
+
# GH 43133
|
| 287 |
+
pytest.importorskip("numba")
|
| 288 |
+
|
| 289 |
+
def f(values, index):
|
| 290 |
+
return np.mean(index)
|
| 291 |
+
|
| 292 |
+
df = DataFrame({"group": ["A", "A", "B"], "v": [4, 5, 6]}, index=[-1, -2, -3])
|
| 293 |
+
result = df.groupby("group").aggregate(f, engine="numba")
|
| 294 |
+
expected = DataFrame(
|
| 295 |
+
[-1.5, -3.0], columns=["v"], index=Index(["A", "B"], name="group")
|
| 296 |
+
)
|
| 297 |
+
tm.assert_frame_equal(result, expected)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def test_engine_kwargs_not_cached():
|
| 301 |
+
# If the user passes a different set of engine_kwargs don't return the same
|
| 302 |
+
# jitted function
|
| 303 |
+
pytest.importorskip("numba")
|
| 304 |
+
nogil = True
|
| 305 |
+
parallel = False
|
| 306 |
+
nopython = True
|
| 307 |
+
|
| 308 |
+
def func_kwargs(values, index):
|
| 309 |
+
return nogil + parallel + nopython
|
| 310 |
+
|
| 311 |
+
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
|
| 312 |
+
df = DataFrame({"value": [0, 0, 0]})
|
| 313 |
+
result = df.groupby(level=0).aggregate(
|
| 314 |
+
func_kwargs, engine="numba", engine_kwargs=engine_kwargs
|
| 315 |
+
)
|
| 316 |
+
expected = DataFrame({"value": [2.0, 2.0, 2.0]})
|
| 317 |
+
tm.assert_frame_equal(result, expected)
|
| 318 |
+
|
| 319 |
+
nogil = False
|
| 320 |
+
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
|
| 321 |
+
result = df.groupby(level=0).aggregate(
|
| 322 |
+
func_kwargs, engine="numba", engine_kwargs=engine_kwargs
|
| 323 |
+
)
|
| 324 |
+
expected = DataFrame({"value": [1.0, 1.0, 1.0]})
|
| 325 |
+
tm.assert_frame_equal(result, expected)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
@pytest.mark.filterwarnings("ignore")
|
| 329 |
+
def test_multiindex_one_key(nogil, parallel, nopython):
|
| 330 |
+
pytest.importorskip("numba")
|
| 331 |
+
|
| 332 |
+
def numba_func(values, index):
|
| 333 |
+
return 1
|
| 334 |
+
|
| 335 |
+
df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])
|
| 336 |
+
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
|
| 337 |
+
result = df.groupby("A").agg(
|
| 338 |
+
numba_func, engine="numba", engine_kwargs=engine_kwargs
|
| 339 |
+
)
|
| 340 |
+
expected = DataFrame([1.0], index=Index([1], name="A"), columns=["C"])
|
| 341 |
+
tm.assert_frame_equal(result, expected)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
|
| 345 |
+
pytest.importorskip("numba")
|
| 346 |
+
|
| 347 |
+
def numba_func(values, index):
|
| 348 |
+
return 1
|
| 349 |
+
|
| 350 |
+
df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])
|
| 351 |
+
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
|
| 352 |
+
with pytest.raises(NotImplementedError, match="more than 1 grouping labels"):
|
| 353 |
+
df.groupby(["A", "B"]).agg(
|
| 354 |
+
numba_func, engine="numba", engine_kwargs=engine_kwargs
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def test_multilabel_numba_vs_cython(numba_supported_reductions):
|
| 359 |
+
pytest.importorskip("numba")
|
| 360 |
+
reduction, kwargs = numba_supported_reductions
|
| 361 |
+
df = DataFrame(
|
| 362 |
+
{
|
| 363 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
|
| 364 |
+
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
|
| 365 |
+
"C": np.random.default_rng(2).standard_normal(8),
|
| 366 |
+
"D": np.random.default_rng(2).standard_normal(8),
|
| 367 |
+
}
|
| 368 |
+
)
|
| 369 |
+
gb = df.groupby(["A", "B"])
|
| 370 |
+
res_agg = gb.agg(reduction, engine="numba", **kwargs)
|
| 371 |
+
expected_agg = gb.agg(reduction, engine="cython", **kwargs)
|
| 372 |
+
tm.assert_frame_equal(res_agg, expected_agg)
|
| 373 |
+
# Test that calling the aggregation directly also works
|
| 374 |
+
direct_res = getattr(gb, reduction)(engine="numba", **kwargs)
|
| 375 |
+
direct_expected = getattr(gb, reduction)(engine="cython", **kwargs)
|
| 376 |
+
tm.assert_frame_equal(direct_res, direct_expected)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def test_multilabel_udf_numba_vs_cython():
|
| 380 |
+
pytest.importorskip("numba")
|
| 381 |
+
df = DataFrame(
|
| 382 |
+
{
|
| 383 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
|
| 384 |
+
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
|
| 385 |
+
"C": np.random.default_rng(2).standard_normal(8),
|
| 386 |
+
"D": np.random.default_rng(2).standard_normal(8),
|
| 387 |
+
}
|
| 388 |
+
)
|
| 389 |
+
gb = df.groupby(["A", "B"])
|
| 390 |
+
result = gb.agg(lambda values, index: values.min(), engine="numba")
|
| 391 |
+
expected = gb.agg(lambda x: x.min(), engine="cython")
|
| 392 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_other.py
ADDED
|
@@ -0,0 +1,675 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
test all other .agg behavior
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import datetime as dt
|
| 6 |
+
from functools import partial
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pytest
|
| 10 |
+
|
| 11 |
+
from pandas.errors import SpecificationError
|
| 12 |
+
|
| 13 |
+
import pandas as pd
|
| 14 |
+
from pandas import (
|
| 15 |
+
DataFrame,
|
| 16 |
+
Index,
|
| 17 |
+
MultiIndex,
|
| 18 |
+
PeriodIndex,
|
| 19 |
+
Series,
|
| 20 |
+
date_range,
|
| 21 |
+
period_range,
|
| 22 |
+
)
|
| 23 |
+
import pandas._testing as tm
|
| 24 |
+
|
| 25 |
+
from pandas.io.formats.printing import pprint_thing
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def test_agg_partial_failure_raises():
|
| 29 |
+
# GH#43741
|
| 30 |
+
|
| 31 |
+
df = DataFrame(
|
| 32 |
+
{
|
| 33 |
+
"data1": np.random.default_rng(2).standard_normal(5),
|
| 34 |
+
"data2": np.random.default_rng(2).standard_normal(5),
|
| 35 |
+
"key1": ["a", "a", "b", "b", "a"],
|
| 36 |
+
"key2": ["one", "two", "one", "two", "one"],
|
| 37 |
+
}
|
| 38 |
+
)
|
| 39 |
+
grouped = df.groupby("key1")
|
| 40 |
+
|
| 41 |
+
def peak_to_peak(arr):
|
| 42 |
+
return arr.max() - arr.min()
|
| 43 |
+
|
| 44 |
+
with pytest.raises(TypeError, match="unsupported operand type"):
|
| 45 |
+
grouped.agg([peak_to_peak])
|
| 46 |
+
|
| 47 |
+
with pytest.raises(TypeError, match="unsupported operand type"):
|
| 48 |
+
grouped.agg(peak_to_peak)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def test_agg_datetimes_mixed():
|
| 52 |
+
data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]]
|
| 53 |
+
|
| 54 |
+
df1 = DataFrame(
|
| 55 |
+
{
|
| 56 |
+
"key": [x[0] for x in data],
|
| 57 |
+
"date": [x[1] for x in data],
|
| 58 |
+
"value": [x[2] for x in data],
|
| 59 |
+
}
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
data = [
|
| 63 |
+
[
|
| 64 |
+
row[0],
|
| 65 |
+
(dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None),
|
| 66 |
+
row[2],
|
| 67 |
+
]
|
| 68 |
+
for row in data
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
df2 = DataFrame(
|
| 72 |
+
{
|
| 73 |
+
"key": [x[0] for x in data],
|
| 74 |
+
"date": [x[1] for x in data],
|
| 75 |
+
"value": [x[2] for x in data],
|
| 76 |
+
}
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
df1["weights"] = df1["value"] / df1["value"].sum()
|
| 80 |
+
gb1 = df1.groupby("date").aggregate("sum")
|
| 81 |
+
|
| 82 |
+
df2["weights"] = df1["value"] / df1["value"].sum()
|
| 83 |
+
gb2 = df2.groupby("date").aggregate("sum")
|
| 84 |
+
|
| 85 |
+
assert len(gb1) == len(gb2)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def test_agg_period_index():
|
| 89 |
+
prng = period_range("2012-1-1", freq="M", periods=3)
|
| 90 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)), index=prng)
|
| 91 |
+
rs = df.groupby(level=0).sum()
|
| 92 |
+
assert isinstance(rs.index, PeriodIndex)
|
| 93 |
+
|
| 94 |
+
# GH 3579
|
| 95 |
+
index = period_range(start="1999-01", periods=5, freq="M")
|
| 96 |
+
s1 = Series(np.random.default_rng(2).random(len(index)), index=index)
|
| 97 |
+
s2 = Series(np.random.default_rng(2).random(len(index)), index=index)
|
| 98 |
+
df = DataFrame.from_dict({"s1": s1, "s2": s2})
|
| 99 |
+
grouped = df.groupby(df.index.month)
|
| 100 |
+
list(grouped)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def test_agg_dict_parameter_cast_result_dtypes():
|
| 104 |
+
# GH 12821
|
| 105 |
+
|
| 106 |
+
df = DataFrame(
|
| 107 |
+
{
|
| 108 |
+
"class": ["A", "A", "B", "B", "C", "C", "D", "D"],
|
| 109 |
+
"time": date_range("1/1/2011", periods=8, freq="h"),
|
| 110 |
+
}
|
| 111 |
+
)
|
| 112 |
+
df.loc[[0, 1, 2, 5], "time"] = None
|
| 113 |
+
|
| 114 |
+
# test for `first` function
|
| 115 |
+
exp = df.loc[[0, 3, 4, 6]].set_index("class")
|
| 116 |
+
grouped = df.groupby("class")
|
| 117 |
+
tm.assert_frame_equal(grouped.first(), exp)
|
| 118 |
+
tm.assert_frame_equal(grouped.agg("first"), exp)
|
| 119 |
+
tm.assert_frame_equal(grouped.agg({"time": "first"}), exp)
|
| 120 |
+
tm.assert_series_equal(grouped.time.first(), exp["time"])
|
| 121 |
+
tm.assert_series_equal(grouped.time.agg("first"), exp["time"])
|
| 122 |
+
|
| 123 |
+
# test for `last` function
|
| 124 |
+
exp = df.loc[[0, 3, 4, 7]].set_index("class")
|
| 125 |
+
grouped = df.groupby("class")
|
| 126 |
+
tm.assert_frame_equal(grouped.last(), exp)
|
| 127 |
+
tm.assert_frame_equal(grouped.agg("last"), exp)
|
| 128 |
+
tm.assert_frame_equal(grouped.agg({"time": "last"}), exp)
|
| 129 |
+
tm.assert_series_equal(grouped.time.last(), exp["time"])
|
| 130 |
+
tm.assert_series_equal(grouped.time.agg("last"), exp["time"])
|
| 131 |
+
|
| 132 |
+
# count
|
| 133 |
+
exp = Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
|
| 134 |
+
tm.assert_series_equal(grouped.time.agg(len), exp)
|
| 135 |
+
tm.assert_series_equal(grouped.time.size(), exp)
|
| 136 |
+
|
| 137 |
+
exp = Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
|
| 138 |
+
tm.assert_series_equal(grouped.time.count(), exp)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def test_agg_cast_results_dtypes():
|
| 142 |
+
# similar to GH12821
|
| 143 |
+
# xref #11444
|
| 144 |
+
u = [dt.datetime(2015, x + 1, 1) for x in range(12)]
|
| 145 |
+
v = list("aaabbbbbbccd")
|
| 146 |
+
df = DataFrame({"X": v, "Y": u})
|
| 147 |
+
|
| 148 |
+
result = df.groupby("X")["Y"].agg(len)
|
| 149 |
+
expected = df.groupby("X")["Y"].count()
|
| 150 |
+
tm.assert_series_equal(result, expected)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def test_aggregate_float64_no_int64():
|
| 154 |
+
# see gh-11199
|
| 155 |
+
df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]})
|
| 156 |
+
|
| 157 |
+
expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
|
| 158 |
+
expected.index.name = "b"
|
| 159 |
+
|
| 160 |
+
result = df.groupby("b")[["a"]].mean()
|
| 161 |
+
tm.assert_frame_equal(result, expected)
|
| 162 |
+
|
| 163 |
+
expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
|
| 164 |
+
expected.index.name = "b"
|
| 165 |
+
|
| 166 |
+
result = df.groupby("b")[["a", "c"]].mean()
|
| 167 |
+
tm.assert_frame_equal(result, expected)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def test_aggregate_api_consistency():
|
| 171 |
+
# GH 9052
|
| 172 |
+
# make sure that the aggregates via dict
|
| 173 |
+
# are consistent
|
| 174 |
+
df = DataFrame(
|
| 175 |
+
{
|
| 176 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
|
| 177 |
+
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
|
| 178 |
+
"C": np.random.default_rng(2).standard_normal(8) + 1.0,
|
| 179 |
+
"D": np.arange(8),
|
| 180 |
+
}
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
grouped = df.groupby(["A", "B"])
|
| 184 |
+
c_mean = grouped["C"].mean()
|
| 185 |
+
c_sum = grouped["C"].sum()
|
| 186 |
+
d_mean = grouped["D"].mean()
|
| 187 |
+
d_sum = grouped["D"].sum()
|
| 188 |
+
|
| 189 |
+
result = grouped["D"].agg(["sum", "mean"])
|
| 190 |
+
expected = pd.concat([d_sum, d_mean], axis=1)
|
| 191 |
+
expected.columns = ["sum", "mean"]
|
| 192 |
+
tm.assert_frame_equal(result, expected, check_like=True)
|
| 193 |
+
|
| 194 |
+
result = grouped.agg(["sum", "mean"])
|
| 195 |
+
expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
|
| 196 |
+
expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]])
|
| 197 |
+
tm.assert_frame_equal(result, expected, check_like=True)
|
| 198 |
+
|
| 199 |
+
result = grouped[["D", "C"]].agg(["sum", "mean"])
|
| 200 |
+
expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
|
| 201 |
+
expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]])
|
| 202 |
+
tm.assert_frame_equal(result, expected, check_like=True)
|
| 203 |
+
|
| 204 |
+
result = grouped.agg({"C": "mean", "D": "sum"})
|
| 205 |
+
expected = pd.concat([d_sum, c_mean], axis=1)
|
| 206 |
+
tm.assert_frame_equal(result, expected, check_like=True)
|
| 207 |
+
|
| 208 |
+
result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]})
|
| 209 |
+
expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
|
| 210 |
+
expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]])
|
| 211 |
+
|
| 212 |
+
msg = r"Column\(s\) \['r', 'r2'\] do not exist"
|
| 213 |
+
with pytest.raises(KeyError, match=msg):
|
| 214 |
+
grouped[["D", "C"]].agg({"r": "sum", "r2": "mean"})
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def test_agg_dict_renaming_deprecation():
|
| 218 |
+
# 15931
|
| 219 |
+
df = DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})
|
| 220 |
+
|
| 221 |
+
msg = r"nested renamer is not supported"
|
| 222 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 223 |
+
df.groupby("A").agg(
|
| 224 |
+
{"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}}
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
msg = r"Column\(s\) \['ma'\] do not exist"
|
| 228 |
+
with pytest.raises(KeyError, match=msg):
|
| 229 |
+
df.groupby("A")[["B", "C"]].agg({"ma": "max"})
|
| 230 |
+
|
| 231 |
+
msg = r"nested renamer is not supported"
|
| 232 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 233 |
+
df.groupby("A").B.agg({"foo": "count"})
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def test_agg_compat():
|
| 237 |
+
# GH 12334
|
| 238 |
+
df = DataFrame(
|
| 239 |
+
{
|
| 240 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
|
| 241 |
+
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
|
| 242 |
+
"C": np.random.default_rng(2).standard_normal(8) + 1.0,
|
| 243 |
+
"D": np.arange(8),
|
| 244 |
+
}
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
g = df.groupby(["A", "B"])
|
| 248 |
+
|
| 249 |
+
msg = r"nested renamer is not supported"
|
| 250 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 251 |
+
g["D"].agg({"C": ["sum", "std"]})
|
| 252 |
+
|
| 253 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 254 |
+
g["D"].agg({"C": "sum", "D": "std"})
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def test_agg_nested_dicts():
|
| 258 |
+
# API change for disallowing these types of nested dicts
|
| 259 |
+
df = DataFrame(
|
| 260 |
+
{
|
| 261 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
|
| 262 |
+
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
|
| 263 |
+
"C": np.random.default_rng(2).standard_normal(8) + 1.0,
|
| 264 |
+
"D": np.arange(8),
|
| 265 |
+
}
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
g = df.groupby(["A", "B"])
|
| 269 |
+
|
| 270 |
+
msg = r"nested renamer is not supported"
|
| 271 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 272 |
+
g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}})
|
| 273 |
+
|
| 274 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 275 |
+
g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}})
|
| 276 |
+
|
| 277 |
+
# same name as the original column
|
| 278 |
+
# GH9052
|
| 279 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 280 |
+
g["D"].agg({"result1": np.sum, "result2": np.mean})
|
| 281 |
+
|
| 282 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 283 |
+
g["D"].agg({"D": np.sum, "result2": np.mean})
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def test_agg_item_by_item_raise_typeerror():
|
| 287 |
+
df = DataFrame(np.random.default_rng(2).integers(10, size=(20, 10)))
|
| 288 |
+
|
| 289 |
+
def raiseException(df):
|
| 290 |
+
pprint_thing("----------------------------------------")
|
| 291 |
+
pprint_thing(df.to_string())
|
| 292 |
+
raise TypeError("test")
|
| 293 |
+
|
| 294 |
+
with pytest.raises(TypeError, match="test"):
|
| 295 |
+
df.groupby(0).agg(raiseException)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def test_series_agg_multikey():
|
| 299 |
+
ts = Series(
|
| 300 |
+
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
|
| 301 |
+
)
|
| 302 |
+
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
|
| 303 |
+
|
| 304 |
+
result = grouped.agg("sum")
|
| 305 |
+
expected = grouped.sum()
|
| 306 |
+
tm.assert_series_equal(result, expected)
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def test_series_agg_multi_pure_python():
|
| 310 |
+
data = DataFrame(
|
| 311 |
+
{
|
| 312 |
+
"A": [
|
| 313 |
+
"foo",
|
| 314 |
+
"foo",
|
| 315 |
+
"foo",
|
| 316 |
+
"foo",
|
| 317 |
+
"bar",
|
| 318 |
+
"bar",
|
| 319 |
+
"bar",
|
| 320 |
+
"bar",
|
| 321 |
+
"foo",
|
| 322 |
+
"foo",
|
| 323 |
+
"foo",
|
| 324 |
+
],
|
| 325 |
+
"B": [
|
| 326 |
+
"one",
|
| 327 |
+
"one",
|
| 328 |
+
"one",
|
| 329 |
+
"two",
|
| 330 |
+
"one",
|
| 331 |
+
"one",
|
| 332 |
+
"one",
|
| 333 |
+
"two",
|
| 334 |
+
"two",
|
| 335 |
+
"two",
|
| 336 |
+
"one",
|
| 337 |
+
],
|
| 338 |
+
"C": [
|
| 339 |
+
"dull",
|
| 340 |
+
"dull",
|
| 341 |
+
"shiny",
|
| 342 |
+
"dull",
|
| 343 |
+
"dull",
|
| 344 |
+
"shiny",
|
| 345 |
+
"shiny",
|
| 346 |
+
"dull",
|
| 347 |
+
"shiny",
|
| 348 |
+
"shiny",
|
| 349 |
+
"shiny",
|
| 350 |
+
],
|
| 351 |
+
"D": np.random.default_rng(2).standard_normal(11),
|
| 352 |
+
"E": np.random.default_rng(2).standard_normal(11),
|
| 353 |
+
"F": np.random.default_rng(2).standard_normal(11),
|
| 354 |
+
}
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
def bad(x):
|
| 358 |
+
assert len(x.values.base) > 0
|
| 359 |
+
return "foo"
|
| 360 |
+
|
| 361 |
+
result = data.groupby(["A", "B"]).agg(bad)
|
| 362 |
+
expected = data.groupby(["A", "B"]).agg(lambda x: "foo")
|
| 363 |
+
tm.assert_frame_equal(result, expected)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def test_agg_consistency():
|
| 367 |
+
# agg with ([]) and () not consistent
|
| 368 |
+
# GH 6715
|
| 369 |
+
def P1(a):
|
| 370 |
+
return np.percentile(a.dropna(), q=1)
|
| 371 |
+
|
| 372 |
+
df = DataFrame(
|
| 373 |
+
{
|
| 374 |
+
"col1": [1, 2, 3, 4],
|
| 375 |
+
"col2": [10, 25, 26, 31],
|
| 376 |
+
"date": [
|
| 377 |
+
dt.date(2013, 2, 10),
|
| 378 |
+
dt.date(2013, 2, 10),
|
| 379 |
+
dt.date(2013, 2, 11),
|
| 380 |
+
dt.date(2013, 2, 11),
|
| 381 |
+
],
|
| 382 |
+
}
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
g = df.groupby("date")
|
| 386 |
+
|
| 387 |
+
expected = g.agg([P1])
|
| 388 |
+
expected.columns = expected.columns.levels[0]
|
| 389 |
+
|
| 390 |
+
result = g.agg(P1)
|
| 391 |
+
tm.assert_frame_equal(result, expected)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def test_agg_callables():
|
| 395 |
+
# GH 7929
|
| 396 |
+
df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64)
|
| 397 |
+
|
| 398 |
+
class fn_class:
|
| 399 |
+
def __call__(self, x):
|
| 400 |
+
return sum(x)
|
| 401 |
+
|
| 402 |
+
equiv_callables = [
|
| 403 |
+
sum,
|
| 404 |
+
np.sum,
|
| 405 |
+
lambda x: sum(x),
|
| 406 |
+
lambda x: x.sum(),
|
| 407 |
+
partial(sum),
|
| 408 |
+
fn_class(),
|
| 409 |
+
]
|
| 410 |
+
|
| 411 |
+
expected = df.groupby("foo").agg("sum")
|
| 412 |
+
for ecall in equiv_callables:
|
| 413 |
+
warn = FutureWarning if ecall is sum or ecall is np.sum else None
|
| 414 |
+
msg = "using DataFrameGroupBy.sum"
|
| 415 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 416 |
+
result = df.groupby("foo").agg(ecall)
|
| 417 |
+
tm.assert_frame_equal(result, expected)
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def test_agg_over_numpy_arrays():
|
| 421 |
+
# GH 3788
|
| 422 |
+
df = DataFrame(
|
| 423 |
+
[
|
| 424 |
+
[1, np.array([10, 20, 30])],
|
| 425 |
+
[1, np.array([40, 50, 60])],
|
| 426 |
+
[2, np.array([20, 30, 40])],
|
| 427 |
+
],
|
| 428 |
+
columns=["category", "arraydata"],
|
| 429 |
+
)
|
| 430 |
+
gb = df.groupby("category")
|
| 431 |
+
|
| 432 |
+
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
|
| 433 |
+
expected_index = Index([1, 2], name="category")
|
| 434 |
+
expected_column = ["arraydata"]
|
| 435 |
+
expected = DataFrame(expected_data, index=expected_index, columns=expected_column)
|
| 436 |
+
|
| 437 |
+
alt = gb.sum(numeric_only=False)
|
| 438 |
+
tm.assert_frame_equal(alt, expected)
|
| 439 |
+
|
| 440 |
+
result = gb.agg("sum", numeric_only=False)
|
| 441 |
+
tm.assert_frame_equal(result, expected)
|
| 442 |
+
|
| 443 |
+
# FIXME: the original version of this test called `gb.agg(sum)`
|
| 444 |
+
# and that raises TypeError if `numeric_only=False` is passed
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
@pytest.mark.parametrize("as_period", [True, False])
|
| 448 |
+
def test_agg_tzaware_non_datetime_result(as_period):
|
| 449 |
+
# discussed in GH#29589, fixed in GH#29641, operating on tzaware values
|
| 450 |
+
# with function that is not dtype-preserving
|
| 451 |
+
dti = date_range("2012-01-01", periods=4, tz="UTC")
|
| 452 |
+
if as_period:
|
| 453 |
+
dti = dti.tz_localize(None).to_period("D")
|
| 454 |
+
|
| 455 |
+
df = DataFrame({"a": [0, 0, 1, 1], "b": dti})
|
| 456 |
+
gb = df.groupby("a")
|
| 457 |
+
|
| 458 |
+
# Case that _does_ preserve the dtype
|
| 459 |
+
result = gb["b"].agg(lambda x: x.iloc[0])
|
| 460 |
+
expected = Series(dti[::2], name="b")
|
| 461 |
+
expected.index.name = "a"
|
| 462 |
+
tm.assert_series_equal(result, expected)
|
| 463 |
+
|
| 464 |
+
# Cases that do _not_ preserve the dtype
|
| 465 |
+
result = gb["b"].agg(lambda x: x.iloc[0].year)
|
| 466 |
+
expected = Series([2012, 2012], name="b")
|
| 467 |
+
expected.index.name = "a"
|
| 468 |
+
tm.assert_series_equal(result, expected)
|
| 469 |
+
|
| 470 |
+
result = gb["b"].agg(lambda x: x.iloc[-1] - x.iloc[0])
|
| 471 |
+
expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b")
|
| 472 |
+
expected.index.name = "a"
|
| 473 |
+
if as_period:
|
| 474 |
+
expected = Series([pd.offsets.Day(1), pd.offsets.Day(1)], name="b")
|
| 475 |
+
expected.index.name = "a"
|
| 476 |
+
tm.assert_series_equal(result, expected)
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
def test_agg_timezone_round_trip():
|
| 480 |
+
# GH 15426
|
| 481 |
+
ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
|
| 482 |
+
df = DataFrame({"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]})
|
| 483 |
+
|
| 484 |
+
result1 = df.groupby("a")["b"].agg("min").iloc[0]
|
| 485 |
+
result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0]
|
| 486 |
+
result3 = df.groupby("a")["b"].min().iloc[0]
|
| 487 |
+
|
| 488 |
+
assert result1 == ts
|
| 489 |
+
assert result2 == ts
|
| 490 |
+
assert result3 == ts
|
| 491 |
+
|
| 492 |
+
dates = [
|
| 493 |
+
pd.Timestamp(f"2016-01-0{i:d} 12:00:00", tz="US/Pacific") for i in range(1, 5)
|
| 494 |
+
]
|
| 495 |
+
df = DataFrame({"A": ["a", "b"] * 2, "B": dates})
|
| 496 |
+
grouped = df.groupby("A")
|
| 497 |
+
|
| 498 |
+
ts = df["B"].iloc[0]
|
| 499 |
+
assert ts == grouped.nth(0)["B"].iloc[0]
|
| 500 |
+
assert ts == grouped.head(1)["B"].iloc[0]
|
| 501 |
+
assert ts == grouped.first()["B"].iloc[0]
|
| 502 |
+
|
| 503 |
+
# GH#27110 applying iloc should return a DataFrame
|
| 504 |
+
msg = "DataFrameGroupBy.apply operated on the grouping columns"
|
| 505 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
| 506 |
+
assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1]
|
| 507 |
+
|
| 508 |
+
ts = df["B"].iloc[2]
|
| 509 |
+
assert ts == grouped.last()["B"].iloc[0]
|
| 510 |
+
|
| 511 |
+
# GH#27110 applying iloc should return a DataFrame
|
| 512 |
+
msg = "DataFrameGroupBy.apply operated on the grouping columns"
|
| 513 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
| 514 |
+
assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1]
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
def test_sum_uint64_overflow():
|
| 518 |
+
# see gh-14758
|
| 519 |
+
# Convert to uint64 and don't overflow
|
| 520 |
+
df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
|
| 521 |
+
df = df + 9223372036854775807
|
| 522 |
+
|
| 523 |
+
index = Index(
|
| 524 |
+
[9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64
|
| 525 |
+
)
|
| 526 |
+
expected = DataFrame(
|
| 527 |
+
{1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},
|
| 528 |
+
index=index,
|
| 529 |
+
dtype=object,
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
expected.index.name = 0
|
| 533 |
+
result = df.groupby(0).sum(numeric_only=False)
|
| 534 |
+
tm.assert_frame_equal(result, expected)
|
| 535 |
+
|
| 536 |
+
# out column is non-numeric, so with numeric_only=True it is dropped
|
| 537 |
+
result2 = df.groupby(0).sum(numeric_only=True)
|
| 538 |
+
expected2 = expected[[]]
|
| 539 |
+
tm.assert_frame_equal(result2, expected2)
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
@pytest.mark.parametrize(
|
| 543 |
+
"structure, expected",
|
| 544 |
+
[
|
| 545 |
+
(tuple, DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
|
| 546 |
+
(list, DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
|
| 547 |
+
(
|
| 548 |
+
lambda x: tuple(x),
|
| 549 |
+
DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),
|
| 550 |
+
),
|
| 551 |
+
(
|
| 552 |
+
lambda x: list(x),
|
| 553 |
+
DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),
|
| 554 |
+
),
|
| 555 |
+
],
|
| 556 |
+
)
|
| 557 |
+
def test_agg_structs_dataframe(structure, expected):
|
| 558 |
+
df = DataFrame(
|
| 559 |
+
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
result = df.groupby(["A", "B"]).aggregate(structure)
|
| 563 |
+
expected.index.names = ["A", "B"]
|
| 564 |
+
tm.assert_frame_equal(result, expected)
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
@pytest.mark.parametrize(
|
| 568 |
+
"structure, expected",
|
| 569 |
+
[
|
| 570 |
+
(tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
|
| 571 |
+
(list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
|
| 572 |
+
(lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
|
| 573 |
+
(lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
|
| 574 |
+
],
|
| 575 |
+
)
|
| 576 |
+
def test_agg_structs_series(structure, expected):
|
| 577 |
+
# Issue #18079
|
| 578 |
+
df = DataFrame(
|
| 579 |
+
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
result = df.groupby("A")["C"].aggregate(structure)
|
| 583 |
+
expected.index.name = "A"
|
| 584 |
+
tm.assert_series_equal(result, expected)
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
def test_agg_category_nansum(observed):
|
| 588 |
+
categories = ["a", "b", "c"]
|
| 589 |
+
df = DataFrame(
|
| 590 |
+
{"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}
|
| 591 |
+
)
|
| 592 |
+
msg = "using SeriesGroupBy.sum"
|
| 593 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 594 |
+
result = df.groupby("A", observed=observed).B.agg(np.nansum)
|
| 595 |
+
expected = Series(
|
| 596 |
+
[3, 3, 0],
|
| 597 |
+
index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),
|
| 598 |
+
name="B",
|
| 599 |
+
)
|
| 600 |
+
if observed:
|
| 601 |
+
expected = expected[expected != 0]
|
| 602 |
+
tm.assert_series_equal(result, expected)
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
def test_agg_list_like_func():
|
| 606 |
+
# GH 18473
|
| 607 |
+
df = DataFrame({"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]})
|
| 608 |
+
grouped = df.groupby("A", as_index=False, sort=False)
|
| 609 |
+
result = grouped.agg({"B": lambda x: list(x)})
|
| 610 |
+
expected = DataFrame(
|
| 611 |
+
{"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]}
|
| 612 |
+
)
|
| 613 |
+
tm.assert_frame_equal(result, expected)
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
def test_agg_lambda_with_timezone():
|
| 617 |
+
# GH 23683
|
| 618 |
+
df = DataFrame(
|
| 619 |
+
{
|
| 620 |
+
"tag": [1, 1],
|
| 621 |
+
"date": [
|
| 622 |
+
pd.Timestamp("2018-01-01", tz="UTC"),
|
| 623 |
+
pd.Timestamp("2018-01-02", tz="UTC"),
|
| 624 |
+
],
|
| 625 |
+
}
|
| 626 |
+
)
|
| 627 |
+
result = df.groupby("tag").agg({"date": lambda e: e.head(1)})
|
| 628 |
+
expected = DataFrame(
|
| 629 |
+
[pd.Timestamp("2018-01-01", tz="UTC")],
|
| 630 |
+
index=Index([1], name="tag"),
|
| 631 |
+
columns=["date"],
|
| 632 |
+
)
|
| 633 |
+
tm.assert_frame_equal(result, expected)
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
@pytest.mark.parametrize(
|
| 637 |
+
"err_cls",
|
| 638 |
+
[
|
| 639 |
+
NotImplementedError,
|
| 640 |
+
RuntimeError,
|
| 641 |
+
KeyError,
|
| 642 |
+
IndexError,
|
| 643 |
+
OSError,
|
| 644 |
+
ValueError,
|
| 645 |
+
ArithmeticError,
|
| 646 |
+
AttributeError,
|
| 647 |
+
],
|
| 648 |
+
)
|
| 649 |
+
def test_groupby_agg_err_catching(err_cls):
|
| 650 |
+
# make sure we suppress anything other than TypeError or AssertionError
|
| 651 |
+
# in _python_agg_general
|
| 652 |
+
|
| 653 |
+
# Use a non-standard EA to make sure we don't go down ndarray paths
|
| 654 |
+
from pandas.tests.extension.decimal.array import (
|
| 655 |
+
DecimalArray,
|
| 656 |
+
make_data,
|
| 657 |
+
to_decimal,
|
| 658 |
+
)
|
| 659 |
+
|
| 660 |
+
data = make_data()[:5]
|
| 661 |
+
df = DataFrame(
|
| 662 |
+
{"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
|
| 663 |
+
)
|
| 664 |
+
|
| 665 |
+
expected = Series(to_decimal([data[0], data[3]]))
|
| 666 |
+
|
| 667 |
+
def weird_func(x):
|
| 668 |
+
# weird function that raise something other than TypeError or IndexError
|
| 669 |
+
# in _python_agg_general
|
| 670 |
+
if len(x) == 0:
|
| 671 |
+
raise err_cls
|
| 672 |
+
return x.iloc[0]
|
| 673 |
+
|
| 674 |
+
result = df["decimals"].groupby(df["id1"]).agg(weird_func)
|
| 675 |
+
tm.assert_series_equal(result, expected, check_names=False)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (184 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_corrwith.cpython-310.pyc
ADDED
|
Binary file (986 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_describe.cpython-310.pyc
ADDED
|
Binary file (8.91 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_groupby_shift_diff.cpython-310.pyc
ADDED
|
Binary file (7.64 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_is_monotonic.cpython-310.pyc
ADDED
|
Binary file (1.87 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_nlargest_nsmallest.cpython-310.pyc
ADDED
|
Binary file (2.91 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_nth.cpython-310.pyc
ADDED
|
Binary file (21.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_quantile.cpython-310.pyc
ADDED
|
Binary file (14.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_rank.cpython-310.pyc
ADDED
|
Binary file (17.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_sample.cpython-310.pyc
ADDED
|
Binary file (4.91 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_size.cpython-310.pyc
ADDED
|
Binary file (4.94 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_skew.cpython-310.pyc
ADDED
|
Binary file (1.05 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/__pycache__/test_value_counts.cpython-310.pyc
ADDED
|
Binary file (25.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_corrwith.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from pandas import (
|
| 4 |
+
DataFrame,
|
| 5 |
+
Index,
|
| 6 |
+
Series,
|
| 7 |
+
)
|
| 8 |
+
import pandas._testing as tm
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def test_corrwith_with_1_axis():
|
| 12 |
+
# GH 47723
|
| 13 |
+
df = DataFrame({"a": [1, 1, 2], "b": [3, 7, 4]})
|
| 14 |
+
gb = df.groupby("a")
|
| 15 |
+
|
| 16 |
+
msg = "DataFrameGroupBy.corrwith with axis=1 is deprecated"
|
| 17 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 18 |
+
result = gb.corrwith(df, axis=1)
|
| 19 |
+
index = Index(
|
| 20 |
+
data=[(1, 0), (1, 1), (1, 2), (2, 2), (2, 0), (2, 1)],
|
| 21 |
+
name=("a", None),
|
| 22 |
+
)
|
| 23 |
+
expected = Series([np.nan] * 6, index=index)
|
| 24 |
+
tm.assert_series_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_describe.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import (
|
| 6 |
+
DataFrame,
|
| 7 |
+
Index,
|
| 8 |
+
MultiIndex,
|
| 9 |
+
Series,
|
| 10 |
+
Timestamp,
|
| 11 |
+
date_range,
|
| 12 |
+
)
|
| 13 |
+
import pandas._testing as tm
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def test_apply_describe_bug(multiindex_dataframe_random_data):
|
| 17 |
+
grouped = multiindex_dataframe_random_data.groupby(level="first")
|
| 18 |
+
grouped.describe() # it works!
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def test_series_describe_multikey():
|
| 22 |
+
ts = Series(
|
| 23 |
+
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
|
| 24 |
+
)
|
| 25 |
+
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
|
| 26 |
+
result = grouped.describe()
|
| 27 |
+
tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False)
|
| 28 |
+
tm.assert_series_equal(result["std"], grouped.std(), check_names=False)
|
| 29 |
+
tm.assert_series_equal(result["min"], grouped.min(), check_names=False)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def test_series_describe_single():
|
| 33 |
+
ts = Series(
|
| 34 |
+
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
|
| 35 |
+
)
|
| 36 |
+
grouped = ts.groupby(lambda x: x.month)
|
| 37 |
+
result = grouped.apply(lambda x: x.describe())
|
| 38 |
+
expected = grouped.describe().stack(future_stack=True)
|
| 39 |
+
tm.assert_series_equal(result, expected)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@pytest.mark.parametrize("keys", ["key1", ["key1", "key2"]])
|
| 43 |
+
def test_series_describe_as_index(as_index, keys):
|
| 44 |
+
# GH#49256
|
| 45 |
+
df = DataFrame(
|
| 46 |
+
{
|
| 47 |
+
"key1": ["one", "two", "two", "three", "two"],
|
| 48 |
+
"key2": ["one", "two", "two", "three", "two"],
|
| 49 |
+
"foo2": [1, 2, 4, 4, 6],
|
| 50 |
+
}
|
| 51 |
+
)
|
| 52 |
+
gb = df.groupby(keys, as_index=as_index)["foo2"]
|
| 53 |
+
result = gb.describe()
|
| 54 |
+
expected = DataFrame(
|
| 55 |
+
{
|
| 56 |
+
"key1": ["one", "three", "two"],
|
| 57 |
+
"count": [1.0, 1.0, 3.0],
|
| 58 |
+
"mean": [1.0, 4.0, 4.0],
|
| 59 |
+
"std": [np.nan, np.nan, 2.0],
|
| 60 |
+
"min": [1.0, 4.0, 2.0],
|
| 61 |
+
"25%": [1.0, 4.0, 3.0],
|
| 62 |
+
"50%": [1.0, 4.0, 4.0],
|
| 63 |
+
"75%": [1.0, 4.0, 5.0],
|
| 64 |
+
"max": [1.0, 4.0, 6.0],
|
| 65 |
+
}
|
| 66 |
+
)
|
| 67 |
+
if len(keys) == 2:
|
| 68 |
+
expected.insert(1, "key2", expected["key1"])
|
| 69 |
+
if as_index:
|
| 70 |
+
expected = expected.set_index(keys)
|
| 71 |
+
tm.assert_frame_equal(result, expected)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def test_frame_describe_multikey(tsframe):
|
| 75 |
+
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
|
| 76 |
+
result = grouped.describe()
|
| 77 |
+
desc_groups = []
|
| 78 |
+
for col in tsframe:
|
| 79 |
+
group = grouped[col].describe()
|
| 80 |
+
# GH 17464 - Remove duplicate MultiIndex levels
|
| 81 |
+
group_col = MultiIndex(
|
| 82 |
+
levels=[[col], group.columns],
|
| 83 |
+
codes=[[0] * len(group.columns), range(len(group.columns))],
|
| 84 |
+
)
|
| 85 |
+
group = DataFrame(group.values, columns=group_col, index=group.index)
|
| 86 |
+
desc_groups.append(group)
|
| 87 |
+
expected = pd.concat(desc_groups, axis=1)
|
| 88 |
+
tm.assert_frame_equal(result, expected)
|
| 89 |
+
|
| 90 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 91 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 92 |
+
groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
|
| 93 |
+
result = groupedT.describe()
|
| 94 |
+
expected = tsframe.describe().T
|
| 95 |
+
# reverting the change from https://github.com/pandas-dev/pandas/pull/35441/
|
| 96 |
+
expected.index = MultiIndex(
|
| 97 |
+
levels=[[0, 1], expected.index],
|
| 98 |
+
codes=[[0, 0, 1, 1], range(len(expected.index))],
|
| 99 |
+
)
|
| 100 |
+
tm.assert_frame_equal(result, expected)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def test_frame_describe_tupleindex():
|
| 104 |
+
# GH 14848 - regression from 0.19.0 to 0.19.1
|
| 105 |
+
df1 = DataFrame(
|
| 106 |
+
{
|
| 107 |
+
"x": [1, 2, 3, 4, 5] * 3,
|
| 108 |
+
"y": [10, 20, 30, 40, 50] * 3,
|
| 109 |
+
"z": [100, 200, 300, 400, 500] * 3,
|
| 110 |
+
}
|
| 111 |
+
)
|
| 112 |
+
df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
|
| 113 |
+
df2 = df1.rename(columns={"k": "key"})
|
| 114 |
+
msg = "Names should be list-like for a MultiIndex"
|
| 115 |
+
with pytest.raises(ValueError, match=msg):
|
| 116 |
+
df1.groupby("k").describe()
|
| 117 |
+
with pytest.raises(ValueError, match=msg):
|
| 118 |
+
df2.groupby("key").describe()
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def test_frame_describe_unstacked_format():
|
| 122 |
+
# GH 4792
|
| 123 |
+
prices = {
|
| 124 |
+
Timestamp("2011-01-06 10:59:05", tz=None): 24990,
|
| 125 |
+
Timestamp("2011-01-06 12:43:33", tz=None): 25499,
|
| 126 |
+
Timestamp("2011-01-06 12:54:09", tz=None): 25499,
|
| 127 |
+
}
|
| 128 |
+
volumes = {
|
| 129 |
+
Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
|
| 130 |
+
Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
|
| 131 |
+
Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
|
| 132 |
+
}
|
| 133 |
+
df = DataFrame({"PRICE": prices, "VOLUME": volumes})
|
| 134 |
+
result = df.groupby("PRICE").VOLUME.describe()
|
| 135 |
+
data = [
|
| 136 |
+
df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
|
| 137 |
+
df[df.PRICE == 25499].VOLUME.describe().values.tolist(),
|
| 138 |
+
]
|
| 139 |
+
expected = DataFrame(
|
| 140 |
+
data,
|
| 141 |
+
index=Index([24990, 25499], name="PRICE"),
|
| 142 |
+
columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
|
| 143 |
+
)
|
| 144 |
+
tm.assert_frame_equal(result, expected)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@pytest.mark.filterwarnings(
|
| 148 |
+
"ignore:"
|
| 149 |
+
"indexing past lexsort depth may impact performance:"
|
| 150 |
+
"pandas.errors.PerformanceWarning"
|
| 151 |
+
)
|
| 152 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 153 |
+
@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])
|
| 154 |
+
def test_describe_with_duplicate_output_column_names(as_index, keys):
|
| 155 |
+
# GH 35314
|
| 156 |
+
df = DataFrame(
|
| 157 |
+
{
|
| 158 |
+
"a1": [99, 99, 99, 88, 88, 88],
|
| 159 |
+
"a2": [99, 99, 99, 88, 88, 88],
|
| 160 |
+
"b": [1, 2, 3, 4, 5, 6],
|
| 161 |
+
"c": [10, 20, 30, 40, 50, 60],
|
| 162 |
+
},
|
| 163 |
+
columns=["a1", "a2", "b", "b"],
|
| 164 |
+
copy=False,
|
| 165 |
+
)
|
| 166 |
+
if keys == ["a1"]:
|
| 167 |
+
df = df.drop(columns="a2")
|
| 168 |
+
|
| 169 |
+
expected = (
|
| 170 |
+
DataFrame.from_records(
|
| 171 |
+
[
|
| 172 |
+
("b", "count", 3.0, 3.0),
|
| 173 |
+
("b", "mean", 5.0, 2.0),
|
| 174 |
+
("b", "std", 1.0, 1.0),
|
| 175 |
+
("b", "min", 4.0, 1.0),
|
| 176 |
+
("b", "25%", 4.5, 1.5),
|
| 177 |
+
("b", "50%", 5.0, 2.0),
|
| 178 |
+
("b", "75%", 5.5, 2.5),
|
| 179 |
+
("b", "max", 6.0, 3.0),
|
| 180 |
+
("b", "count", 3.0, 3.0),
|
| 181 |
+
("b", "mean", 5.0, 2.0),
|
| 182 |
+
("b", "std", 1.0, 1.0),
|
| 183 |
+
("b", "min", 4.0, 1.0),
|
| 184 |
+
("b", "25%", 4.5, 1.5),
|
| 185 |
+
("b", "50%", 5.0, 2.0),
|
| 186 |
+
("b", "75%", 5.5, 2.5),
|
| 187 |
+
("b", "max", 6.0, 3.0),
|
| 188 |
+
],
|
| 189 |
+
)
|
| 190 |
+
.set_index([0, 1])
|
| 191 |
+
.T
|
| 192 |
+
)
|
| 193 |
+
expected.columns.names = [None, None]
|
| 194 |
+
if len(keys) == 2:
|
| 195 |
+
expected.index = MultiIndex(
|
| 196 |
+
levels=[[88, 99], [88, 99]], codes=[[0, 1], [0, 1]], names=["a1", "a2"]
|
| 197 |
+
)
|
| 198 |
+
else:
|
| 199 |
+
expected.index = Index([88, 99], name="a1")
|
| 200 |
+
|
| 201 |
+
if not as_index:
|
| 202 |
+
expected = expected.reset_index()
|
| 203 |
+
|
| 204 |
+
result = df.groupby(keys, as_index=as_index).describe()
|
| 205 |
+
|
| 206 |
+
tm.assert_frame_equal(result, expected)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def test_describe_duplicate_columns():
|
| 210 |
+
# GH#50806
|
| 211 |
+
df = DataFrame([[0, 1, 2, 3]])
|
| 212 |
+
df.columns = [0, 1, 2, 0]
|
| 213 |
+
gb = df.groupby(df[1])
|
| 214 |
+
result = gb.describe(percentiles=[])
|
| 215 |
+
|
| 216 |
+
columns = ["count", "mean", "std", "min", "50%", "max"]
|
| 217 |
+
frames = [
|
| 218 |
+
DataFrame([[1.0, val, np.nan, val, val, val]], index=[1], columns=columns)
|
| 219 |
+
for val in (0.0, 2.0, 3.0)
|
| 220 |
+
]
|
| 221 |
+
expected = pd.concat(frames, axis=1)
|
| 222 |
+
expected.columns = MultiIndex(
|
| 223 |
+
levels=[[0, 2], columns],
|
| 224 |
+
codes=[6 * [0] + 6 * [1] + 6 * [0], 3 * list(range(6))],
|
| 225 |
+
)
|
| 226 |
+
expected.index.names = [1]
|
| 227 |
+
tm.assert_frame_equal(result, expected)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class TestGroupByNonCythonPaths:
|
| 231 |
+
# GH#5610 non-cython calls should not include the grouper
|
| 232 |
+
# Tests for code not expected to go through cython paths.
|
| 233 |
+
|
| 234 |
+
@pytest.fixture
|
| 235 |
+
def df(self):
|
| 236 |
+
df = DataFrame(
|
| 237 |
+
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]],
|
| 238 |
+
columns=["A", "B", "C"],
|
| 239 |
+
)
|
| 240 |
+
return df
|
| 241 |
+
|
| 242 |
+
@pytest.fixture
|
| 243 |
+
def gb(self, df):
|
| 244 |
+
gb = df.groupby("A")
|
| 245 |
+
return gb
|
| 246 |
+
|
| 247 |
+
@pytest.fixture
|
| 248 |
+
def gni(self, df):
|
| 249 |
+
gni = df.groupby("A", as_index=False)
|
| 250 |
+
return gni
|
| 251 |
+
|
| 252 |
+
def test_describe(self, df, gb, gni):
|
| 253 |
+
# describe
|
| 254 |
+
expected_index = Index([1, 3], name="A")
|
| 255 |
+
expected_col = MultiIndex(
|
| 256 |
+
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
|
| 257 |
+
codes=[[0] * 8, list(range(8))],
|
| 258 |
+
)
|
| 259 |
+
expected = DataFrame(
|
| 260 |
+
[
|
| 261 |
+
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
|
| 262 |
+
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
|
| 263 |
+
],
|
| 264 |
+
index=expected_index,
|
| 265 |
+
columns=expected_col,
|
| 266 |
+
)
|
| 267 |
+
result = gb.describe()
|
| 268 |
+
tm.assert_frame_equal(result, expected)
|
| 269 |
+
|
| 270 |
+
expected = expected.reset_index()
|
| 271 |
+
result = gni.describe()
|
| 272 |
+
tm.assert_frame_equal(result, expected)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
@pytest.mark.parametrize("dtype", [int, float, object])
|
| 276 |
+
@pytest.mark.parametrize(
|
| 277 |
+
"kwargs",
|
| 278 |
+
[
|
| 279 |
+
{"percentiles": [0.10, 0.20, 0.30], "include": "all", "exclude": None},
|
| 280 |
+
{"percentiles": [0.10, 0.20, 0.30], "include": None, "exclude": ["int"]},
|
| 281 |
+
{"percentiles": [0.10, 0.20, 0.30], "include": ["int"], "exclude": None},
|
| 282 |
+
],
|
| 283 |
+
)
|
| 284 |
+
def test_groupby_empty_dataset(dtype, kwargs):
|
| 285 |
+
# GH#41575
|
| 286 |
+
df = DataFrame([[1, 2, 3]], columns=["A", "B", "C"], dtype=dtype)
|
| 287 |
+
df["B"] = df["B"].astype(int)
|
| 288 |
+
df["C"] = df["C"].astype(float)
|
| 289 |
+
|
| 290 |
+
result = df.iloc[:0].groupby("A").describe(**kwargs)
|
| 291 |
+
expected = df.groupby("A").describe(**kwargs).reset_index(drop=True).iloc[:0]
|
| 292 |
+
tm.assert_frame_equal(result, expected)
|
| 293 |
+
|
| 294 |
+
result = df.iloc[:0].groupby("A").B.describe(**kwargs)
|
| 295 |
+
expected = df.groupby("A").B.describe(**kwargs).reset_index(drop=True).iloc[:0]
|
| 296 |
+
expected.index = Index([])
|
| 297 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_groupby_shift_diff.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
DataFrame,
|
| 6 |
+
NaT,
|
| 7 |
+
Series,
|
| 8 |
+
Timedelta,
|
| 9 |
+
Timestamp,
|
| 10 |
+
date_range,
|
| 11 |
+
)
|
| 12 |
+
import pandas._testing as tm
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def test_group_shift_with_null_key():
|
| 16 |
+
# This test is designed to replicate the segfault in issue #13813.
|
| 17 |
+
n_rows = 1200
|
| 18 |
+
|
| 19 |
+
# Generate a moderately large dataframe with occasional missing
|
| 20 |
+
# values in column `B`, and then group by [`A`, `B`]. This should
|
| 21 |
+
# force `-1` in `labels` array of `g._grouper.group_info` exactly
|
| 22 |
+
# at those places, where the group-by key is partially missing.
|
| 23 |
+
df = DataFrame(
|
| 24 |
+
[(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],
|
| 25 |
+
dtype=float,
|
| 26 |
+
columns=["A", "B", "Z"],
|
| 27 |
+
index=None,
|
| 28 |
+
)
|
| 29 |
+
g = df.groupby(["A", "B"])
|
| 30 |
+
|
| 31 |
+
expected = DataFrame(
|
| 32 |
+
[(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],
|
| 33 |
+
dtype=float,
|
| 34 |
+
columns=["Z"],
|
| 35 |
+
index=None,
|
| 36 |
+
)
|
| 37 |
+
result = g.shift(-1)
|
| 38 |
+
|
| 39 |
+
tm.assert_frame_equal(result, expected)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def test_group_shift_with_fill_value():
|
| 43 |
+
# GH #24128
|
| 44 |
+
n_rows = 24
|
| 45 |
+
df = DataFrame(
|
| 46 |
+
[(i % 12, i % 3, i) for i in range(n_rows)],
|
| 47 |
+
dtype=float,
|
| 48 |
+
columns=["A", "B", "Z"],
|
| 49 |
+
index=None,
|
| 50 |
+
)
|
| 51 |
+
g = df.groupby(["A", "B"])
|
| 52 |
+
|
| 53 |
+
expected = DataFrame(
|
| 54 |
+
[(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],
|
| 55 |
+
dtype=float,
|
| 56 |
+
columns=["Z"],
|
| 57 |
+
index=None,
|
| 58 |
+
)
|
| 59 |
+
result = g.shift(-1, fill_value=0)
|
| 60 |
+
|
| 61 |
+
tm.assert_frame_equal(result, expected)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def test_group_shift_lose_timezone():
|
| 65 |
+
# GH 30134
|
| 66 |
+
now_dt = Timestamp.utcnow().as_unit("ns")
|
| 67 |
+
df = DataFrame({"a": [1, 1], "date": now_dt})
|
| 68 |
+
result = df.groupby("a").shift(0).iloc[0]
|
| 69 |
+
expected = Series({"date": now_dt}, name=result.name)
|
| 70 |
+
tm.assert_series_equal(result, expected)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def test_group_diff_real_series(any_real_numpy_dtype):
|
| 74 |
+
df = DataFrame(
|
| 75 |
+
{"a": [1, 2, 3, 3, 2], "b": [1, 2, 3, 4, 5]},
|
| 76 |
+
dtype=any_real_numpy_dtype,
|
| 77 |
+
)
|
| 78 |
+
result = df.groupby("a")["b"].diff()
|
| 79 |
+
exp_dtype = "float"
|
| 80 |
+
if any_real_numpy_dtype in ["int8", "int16", "float32"]:
|
| 81 |
+
exp_dtype = "float32"
|
| 82 |
+
expected = Series([np.nan, np.nan, np.nan, 1.0, 3.0], dtype=exp_dtype, name="b")
|
| 83 |
+
tm.assert_series_equal(result, expected)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def test_group_diff_real_frame(any_real_numpy_dtype):
|
| 87 |
+
df = DataFrame(
|
| 88 |
+
{
|
| 89 |
+
"a": [1, 2, 3, 3, 2],
|
| 90 |
+
"b": [1, 2, 3, 4, 5],
|
| 91 |
+
"c": [1, 2, 3, 4, 6],
|
| 92 |
+
},
|
| 93 |
+
dtype=any_real_numpy_dtype,
|
| 94 |
+
)
|
| 95 |
+
result = df.groupby("a").diff()
|
| 96 |
+
exp_dtype = "float"
|
| 97 |
+
if any_real_numpy_dtype in ["int8", "int16", "float32"]:
|
| 98 |
+
exp_dtype = "float32"
|
| 99 |
+
expected = DataFrame(
|
| 100 |
+
{
|
| 101 |
+
"b": [np.nan, np.nan, np.nan, 1.0, 3.0],
|
| 102 |
+
"c": [np.nan, np.nan, np.nan, 1.0, 4.0],
|
| 103 |
+
},
|
| 104 |
+
dtype=exp_dtype,
|
| 105 |
+
)
|
| 106 |
+
tm.assert_frame_equal(result, expected)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@pytest.mark.parametrize(
|
| 110 |
+
"data",
|
| 111 |
+
[
|
| 112 |
+
[
|
| 113 |
+
Timestamp("2013-01-01"),
|
| 114 |
+
Timestamp("2013-01-02"),
|
| 115 |
+
Timestamp("2013-01-03"),
|
| 116 |
+
],
|
| 117 |
+
[Timedelta("5 days"), Timedelta("6 days"), Timedelta("7 days")],
|
| 118 |
+
],
|
| 119 |
+
)
|
| 120 |
+
def test_group_diff_datetimelike(data, unit):
|
| 121 |
+
df = DataFrame({"a": [1, 2, 2], "b": data})
|
| 122 |
+
df["b"] = df["b"].dt.as_unit(unit)
|
| 123 |
+
result = df.groupby("a")["b"].diff()
|
| 124 |
+
expected = Series([NaT, NaT, Timedelta("1 days")], name="b").dt.as_unit(unit)
|
| 125 |
+
tm.assert_series_equal(result, expected)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def test_group_diff_bool():
|
| 129 |
+
df = DataFrame({"a": [1, 2, 3, 3, 2], "b": [True, True, False, False, True]})
|
| 130 |
+
result = df.groupby("a")["b"].diff()
|
| 131 |
+
expected = Series([np.nan, np.nan, np.nan, False, False], name="b")
|
| 132 |
+
tm.assert_series_equal(result, expected)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def test_group_diff_object_raises(object_dtype):
|
| 136 |
+
df = DataFrame(
|
| 137 |
+
{"a": ["foo", "bar", "bar"], "b": ["baz", "foo", "foo"]}, dtype=object_dtype
|
| 138 |
+
)
|
| 139 |
+
with pytest.raises(TypeError, match=r"unsupported operand type\(s\) for -"):
|
| 140 |
+
df.groupby("a")["b"].diff()
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def test_empty_shift_with_fill():
|
| 144 |
+
# GH 41264, single-index check
|
| 145 |
+
df = DataFrame(columns=["a", "b", "c"])
|
| 146 |
+
shifted = df.groupby(["a"]).shift(1)
|
| 147 |
+
shifted_with_fill = df.groupby(["a"]).shift(1, fill_value=0)
|
| 148 |
+
tm.assert_frame_equal(shifted, shifted_with_fill)
|
| 149 |
+
tm.assert_index_equal(shifted.index, shifted_with_fill.index)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def test_multindex_empty_shift_with_fill():
|
| 153 |
+
# GH 41264, multi-index check
|
| 154 |
+
df = DataFrame(columns=["a", "b", "c"])
|
| 155 |
+
shifted = df.groupby(["a", "b"]).shift(1)
|
| 156 |
+
shifted_with_fill = df.groupby(["a", "b"]).shift(1, fill_value=0)
|
| 157 |
+
tm.assert_frame_equal(shifted, shifted_with_fill)
|
| 158 |
+
tm.assert_index_equal(shifted.index, shifted_with_fill.index)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def test_shift_periods_freq():
|
| 162 |
+
# GH 54093
|
| 163 |
+
data = {"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]}
|
| 164 |
+
df = DataFrame(data, index=date_range(start="20100101", periods=6))
|
| 165 |
+
result = df.groupby(df.index).shift(periods=-2, freq="D")
|
| 166 |
+
expected = DataFrame(data, index=date_range(start="2009-12-30", periods=6))
|
| 167 |
+
tm.assert_frame_equal(result, expected)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def test_shift_deprecate_freq_and_fill_value():
|
| 171 |
+
# GH 53832
|
| 172 |
+
data = {"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]}
|
| 173 |
+
df = DataFrame(data, index=date_range(start="20100101", periods=6))
|
| 174 |
+
msg = (
|
| 175 |
+
"Passing a 'freq' together with a 'fill_value' silently ignores the fill_value"
|
| 176 |
+
)
|
| 177 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 178 |
+
df.groupby(df.index).shift(periods=-2, freq="D", fill_value="1")
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def test_shift_disallow_suffix_if_periods_is_int():
|
| 182 |
+
# GH#44424
|
| 183 |
+
data = {"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]}
|
| 184 |
+
df = DataFrame(data)
|
| 185 |
+
msg = "Cannot specify `suffix` if `periods` is an int."
|
| 186 |
+
with pytest.raises(ValueError, match=msg):
|
| 187 |
+
df.groupby("b").shift(1, suffix="fails")
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def test_group_shift_with_multiple_periods():
|
| 191 |
+
# GH#44424
|
| 192 |
+
df = DataFrame({"a": [1, 2, 3, 3, 2], "b": [True, True, False, False, True]})
|
| 193 |
+
|
| 194 |
+
shifted_df = df.groupby("b")[["a"]].shift([0, 1])
|
| 195 |
+
expected_df = DataFrame(
|
| 196 |
+
{"a_0": [1, 2, 3, 3, 2], "a_1": [np.nan, 1.0, np.nan, 3.0, 2.0]}
|
| 197 |
+
)
|
| 198 |
+
tm.assert_frame_equal(shifted_df, expected_df)
|
| 199 |
+
|
| 200 |
+
# series
|
| 201 |
+
shifted_series = df.groupby("b")["a"].shift([0, 1])
|
| 202 |
+
tm.assert_frame_equal(shifted_series, expected_df)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def test_group_shift_with_multiple_periods_and_freq():
|
| 206 |
+
# GH#44424
|
| 207 |
+
df = DataFrame(
|
| 208 |
+
{"a": [1, 2, 3, 4, 5], "b": [True, True, False, False, True]},
|
| 209 |
+
index=date_range("1/1/2000", periods=5, freq="h"),
|
| 210 |
+
)
|
| 211 |
+
shifted_df = df.groupby("b")[["a"]].shift(
|
| 212 |
+
[0, 1],
|
| 213 |
+
freq="h",
|
| 214 |
+
)
|
| 215 |
+
expected_df = DataFrame(
|
| 216 |
+
{
|
| 217 |
+
"a_0": [1.0, 2.0, 3.0, 4.0, 5.0, np.nan],
|
| 218 |
+
"a_1": [
|
| 219 |
+
np.nan,
|
| 220 |
+
1.0,
|
| 221 |
+
2.0,
|
| 222 |
+
3.0,
|
| 223 |
+
4.0,
|
| 224 |
+
5.0,
|
| 225 |
+
],
|
| 226 |
+
},
|
| 227 |
+
index=date_range("1/1/2000", periods=6, freq="h"),
|
| 228 |
+
)
|
| 229 |
+
tm.assert_frame_equal(shifted_df, expected_df)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def test_group_shift_with_multiple_periods_and_fill_value():
|
| 233 |
+
# GH#44424
|
| 234 |
+
df = DataFrame(
|
| 235 |
+
{"a": [1, 2, 3, 4, 5], "b": [True, True, False, False, True]},
|
| 236 |
+
)
|
| 237 |
+
shifted_df = df.groupby("b")[["a"]].shift([0, 1], fill_value=-1)
|
| 238 |
+
expected_df = DataFrame(
|
| 239 |
+
{"a_0": [1, 2, 3, 4, 5], "a_1": [-1, 1, -1, 3, 2]},
|
| 240 |
+
)
|
| 241 |
+
tm.assert_frame_equal(shifted_df, expected_df)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def test_group_shift_with_multiple_periods_and_both_fill_and_freq_deprecated():
|
| 245 |
+
# GH#44424
|
| 246 |
+
df = DataFrame(
|
| 247 |
+
{"a": [1, 2, 3, 4, 5], "b": [True, True, False, False, True]},
|
| 248 |
+
index=date_range("1/1/2000", periods=5, freq="h"),
|
| 249 |
+
)
|
| 250 |
+
msg = (
|
| 251 |
+
"Passing a 'freq' together with a 'fill_value' silently ignores the "
|
| 252 |
+
"fill_value"
|
| 253 |
+
)
|
| 254 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 255 |
+
df.groupby("b")[["a"]].shift([1, 2], fill_value=1, freq="h")
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_is_monotonic.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
DataFrame,
|
| 6 |
+
Index,
|
| 7 |
+
Series,
|
| 8 |
+
)
|
| 9 |
+
import pandas._testing as tm
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@pytest.mark.parametrize(
|
| 13 |
+
"in_vals, out_vals",
|
| 14 |
+
[
|
| 15 |
+
# Basics: strictly increasing (T), strictly decreasing (F),
|
| 16 |
+
# abs val increasing (F), non-strictly increasing (T)
|
| 17 |
+
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]),
|
| 18 |
+
# Test with inf vals
|
| 19 |
+
(
|
| 20 |
+
[1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
|
| 21 |
+
[True, False, True, False],
|
| 22 |
+
),
|
| 23 |
+
# Test with nan vals; should always be False
|
| 24 |
+
(
|
| 25 |
+
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
|
| 26 |
+
[False, False, False, False],
|
| 27 |
+
),
|
| 28 |
+
],
|
| 29 |
+
)
|
| 30 |
+
def test_is_monotonic_increasing(in_vals, out_vals):
|
| 31 |
+
# GH 17015
|
| 32 |
+
source_dict = {
|
| 33 |
+
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
|
| 34 |
+
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
|
| 35 |
+
"C": in_vals,
|
| 36 |
+
}
|
| 37 |
+
df = DataFrame(source_dict)
|
| 38 |
+
result = df.groupby("B").C.is_monotonic_increasing
|
| 39 |
+
index = Index(list("abcd"), name="B")
|
| 40 |
+
expected = Series(index=index, data=out_vals, name="C")
|
| 41 |
+
tm.assert_series_equal(result, expected)
|
| 42 |
+
|
| 43 |
+
# Also check result equal to manually taking x.is_monotonic_increasing.
|
| 44 |
+
expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing)
|
| 45 |
+
tm.assert_series_equal(result, expected)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@pytest.mark.parametrize(
|
| 49 |
+
"in_vals, out_vals",
|
| 50 |
+
[
|
| 51 |
+
# Basics: strictly decreasing (T), strictly increasing (F),
|
| 52 |
+
# abs val decreasing (F), non-strictly increasing (T)
|
| 53 |
+
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]),
|
| 54 |
+
# Test with inf vals
|
| 55 |
+
(
|
| 56 |
+
[np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
|
| 57 |
+
[True, True, False, True],
|
| 58 |
+
),
|
| 59 |
+
# Test with nan vals; should always be False
|
| 60 |
+
(
|
| 61 |
+
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
|
| 62 |
+
[False, False, False, False],
|
| 63 |
+
),
|
| 64 |
+
],
|
| 65 |
+
)
|
| 66 |
+
def test_is_monotonic_decreasing(in_vals, out_vals):
|
| 67 |
+
# GH 17015
|
| 68 |
+
source_dict = {
|
| 69 |
+
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
|
| 70 |
+
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
|
| 71 |
+
"C": in_vals,
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
df = DataFrame(source_dict)
|
| 75 |
+
result = df.groupby("B").C.is_monotonic_decreasing
|
| 76 |
+
index = Index(list("abcd"), name="B")
|
| 77 |
+
expected = Series(index=index, data=out_vals, name="C")
|
| 78 |
+
tm.assert_series_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_nlargest_nsmallest.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
MultiIndex,
|
| 6 |
+
Series,
|
| 7 |
+
date_range,
|
| 8 |
+
)
|
| 9 |
+
import pandas._testing as tm
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_nlargest():
|
| 13 |
+
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
|
| 14 |
+
b = Series(list("a" * 5 + "b" * 5))
|
| 15 |
+
gb = a.groupby(b)
|
| 16 |
+
r = gb.nlargest(3)
|
| 17 |
+
e = Series(
|
| 18 |
+
[7, 5, 3, 10, 9, 6],
|
| 19 |
+
index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),
|
| 20 |
+
)
|
| 21 |
+
tm.assert_series_equal(r, e)
|
| 22 |
+
|
| 23 |
+
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
|
| 24 |
+
gb = a.groupby(b)
|
| 25 |
+
e = Series(
|
| 26 |
+
[3, 2, 1, 3, 3, 2],
|
| 27 |
+
index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),
|
| 28 |
+
)
|
| 29 |
+
tm.assert_series_equal(gb.nlargest(3, keep="last"), e)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def test_nlargest_mi_grouper():
|
| 33 |
+
# see gh-21411
|
| 34 |
+
npr = np.random.default_rng(2)
|
| 35 |
+
|
| 36 |
+
dts = date_range("20180101", periods=10)
|
| 37 |
+
iterables = [dts, ["one", "two"]]
|
| 38 |
+
|
| 39 |
+
idx = MultiIndex.from_product(iterables, names=["first", "second"])
|
| 40 |
+
s = Series(npr.standard_normal(20), index=idx)
|
| 41 |
+
|
| 42 |
+
result = s.groupby("first").nlargest(1)
|
| 43 |
+
|
| 44 |
+
exp_idx = MultiIndex.from_tuples(
|
| 45 |
+
[
|
| 46 |
+
(dts[0], dts[0], "one"),
|
| 47 |
+
(dts[1], dts[1], "one"),
|
| 48 |
+
(dts[2], dts[2], "one"),
|
| 49 |
+
(dts[3], dts[3], "two"),
|
| 50 |
+
(dts[4], dts[4], "one"),
|
| 51 |
+
(dts[5], dts[5], "one"),
|
| 52 |
+
(dts[6], dts[6], "one"),
|
| 53 |
+
(dts[7], dts[7], "one"),
|
| 54 |
+
(dts[8], dts[8], "one"),
|
| 55 |
+
(dts[9], dts[9], "one"),
|
| 56 |
+
],
|
| 57 |
+
names=["first", "first", "second"],
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
exp_values = [
|
| 61 |
+
0.18905338179353307,
|
| 62 |
+
-0.41306354339189344,
|
| 63 |
+
1.799707382720902,
|
| 64 |
+
0.7738065867276614,
|
| 65 |
+
0.28121066979764925,
|
| 66 |
+
0.9775674511260357,
|
| 67 |
+
-0.3288239040579627,
|
| 68 |
+
0.45495807124085547,
|
| 69 |
+
0.5452887139646817,
|
| 70 |
+
0.12682784711186987,
|
| 71 |
+
]
|
| 72 |
+
|
| 73 |
+
expected = Series(exp_values, index=exp_idx)
|
| 74 |
+
tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def test_nsmallest():
|
| 78 |
+
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
|
| 79 |
+
b = Series(list("a" * 5 + "b" * 5))
|
| 80 |
+
gb = a.groupby(b)
|
| 81 |
+
r = gb.nsmallest(3)
|
| 82 |
+
e = Series(
|
| 83 |
+
[1, 2, 3, 0, 4, 6],
|
| 84 |
+
index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),
|
| 85 |
+
)
|
| 86 |
+
tm.assert_series_equal(r, e)
|
| 87 |
+
|
| 88 |
+
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
|
| 89 |
+
gb = a.groupby(b)
|
| 90 |
+
e = Series(
|
| 91 |
+
[0, 1, 1, 0, 1, 2],
|
| 92 |
+
index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),
|
| 93 |
+
)
|
| 94 |
+
tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@pytest.mark.parametrize(
|
| 98 |
+
"data, groups",
|
| 99 |
+
[([0, 1, 2, 3], [0, 0, 1, 1]), ([0], [0])],
|
| 100 |
+
)
|
| 101 |
+
@pytest.mark.parametrize("dtype", [None, *tm.ALL_INT_NUMPY_DTYPES])
|
| 102 |
+
@pytest.mark.parametrize("method", ["nlargest", "nsmallest"])
|
| 103 |
+
def test_nlargest_and_smallest_noop(data, groups, dtype, method):
|
| 104 |
+
# GH 15272, GH 16345, GH 29129
|
| 105 |
+
# Test nlargest/smallest when it results in a noop,
|
| 106 |
+
# i.e. input is sorted and group size <= n
|
| 107 |
+
if dtype is not None:
|
| 108 |
+
data = np.array(data, dtype=dtype)
|
| 109 |
+
if method == "nlargest":
|
| 110 |
+
data = list(reversed(data))
|
| 111 |
+
ser = Series(data, name="a")
|
| 112 |
+
result = getattr(ser.groupby(groups), method)(n=2)
|
| 113 |
+
expidx = np.array(groups, dtype=int) if isinstance(groups, list) else groups
|
| 114 |
+
expected = Series(data, index=MultiIndex.from_arrays([expidx, ser.index]), name="a")
|
| 115 |
+
tm.assert_series_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_nth.py
ADDED
|
@@ -0,0 +1,921 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import (
|
| 6 |
+
DataFrame,
|
| 7 |
+
Index,
|
| 8 |
+
MultiIndex,
|
| 9 |
+
Series,
|
| 10 |
+
Timestamp,
|
| 11 |
+
isna,
|
| 12 |
+
)
|
| 13 |
+
import pandas._testing as tm
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def test_first_last_nth(df):
|
| 17 |
+
# tests for first / last / nth
|
| 18 |
+
grouped = df.groupby("A")
|
| 19 |
+
first = grouped.first()
|
| 20 |
+
expected = df.loc[[1, 0], ["B", "C", "D"]]
|
| 21 |
+
expected.index = Index(["bar", "foo"], name="A")
|
| 22 |
+
expected = expected.sort_index()
|
| 23 |
+
tm.assert_frame_equal(first, expected)
|
| 24 |
+
|
| 25 |
+
nth = grouped.nth(0)
|
| 26 |
+
expected = df.loc[[0, 1]]
|
| 27 |
+
tm.assert_frame_equal(nth, expected)
|
| 28 |
+
|
| 29 |
+
last = grouped.last()
|
| 30 |
+
expected = df.loc[[5, 7], ["B", "C", "D"]]
|
| 31 |
+
expected.index = Index(["bar", "foo"], name="A")
|
| 32 |
+
tm.assert_frame_equal(last, expected)
|
| 33 |
+
|
| 34 |
+
nth = grouped.nth(-1)
|
| 35 |
+
expected = df.iloc[[5, 7]]
|
| 36 |
+
tm.assert_frame_equal(nth, expected)
|
| 37 |
+
|
| 38 |
+
nth = grouped.nth(1)
|
| 39 |
+
expected = df.iloc[[2, 3]]
|
| 40 |
+
tm.assert_frame_equal(nth, expected)
|
| 41 |
+
|
| 42 |
+
# it works!
|
| 43 |
+
grouped["B"].first()
|
| 44 |
+
grouped["B"].last()
|
| 45 |
+
grouped["B"].nth(0)
|
| 46 |
+
|
| 47 |
+
df = df.copy()
|
| 48 |
+
df.loc[df["A"] == "foo", "B"] = np.nan
|
| 49 |
+
grouped = df.groupby("A")
|
| 50 |
+
assert isna(grouped["B"].first()["foo"])
|
| 51 |
+
assert isna(grouped["B"].last()["foo"])
|
| 52 |
+
assert isna(grouped["B"].nth(0).iloc[0])
|
| 53 |
+
|
| 54 |
+
# v0.14.0 whatsnew
|
| 55 |
+
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
|
| 56 |
+
g = df.groupby("A")
|
| 57 |
+
result = g.first()
|
| 58 |
+
expected = df.iloc[[1, 2]].set_index("A")
|
| 59 |
+
tm.assert_frame_equal(result, expected)
|
| 60 |
+
|
| 61 |
+
expected = df.iloc[[1, 2]]
|
| 62 |
+
result = g.nth(0, dropna="any")
|
| 63 |
+
tm.assert_frame_equal(result, expected)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@pytest.mark.parametrize("method", ["first", "last"])
|
| 67 |
+
def test_first_last_with_na_object(method, nulls_fixture):
|
| 68 |
+
# https://github.com/pandas-dev/pandas/issues/32123
|
| 69 |
+
groups = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby("a")
|
| 70 |
+
result = getattr(groups, method)()
|
| 71 |
+
|
| 72 |
+
if method == "first":
|
| 73 |
+
values = [1, 3]
|
| 74 |
+
else:
|
| 75 |
+
values = [2, 3]
|
| 76 |
+
|
| 77 |
+
values = np.array(values, dtype=result["b"].dtype)
|
| 78 |
+
idx = Index([1, 2], name="a")
|
| 79 |
+
expected = DataFrame({"b": values}, index=idx)
|
| 80 |
+
|
| 81 |
+
tm.assert_frame_equal(result, expected)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@pytest.mark.parametrize("index", [0, -1])
|
| 85 |
+
def test_nth_with_na_object(index, nulls_fixture):
|
| 86 |
+
# https://github.com/pandas-dev/pandas/issues/32123
|
| 87 |
+
df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]})
|
| 88 |
+
groups = df.groupby("a")
|
| 89 |
+
result = groups.nth(index)
|
| 90 |
+
expected = df.iloc[[0, 2]] if index == 0 else df.iloc[[1, 3]]
|
| 91 |
+
tm.assert_frame_equal(result, expected)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@pytest.mark.parametrize("method", ["first", "last"])
|
| 95 |
+
def test_first_last_with_None(method):
|
| 96 |
+
# https://github.com/pandas-dev/pandas/issues/32800
|
| 97 |
+
# None should be preserved as object dtype
|
| 98 |
+
df = DataFrame.from_dict({"id": ["a"], "value": [None]})
|
| 99 |
+
groups = df.groupby("id", as_index=False)
|
| 100 |
+
result = getattr(groups, method)()
|
| 101 |
+
|
| 102 |
+
tm.assert_frame_equal(result, df)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@pytest.mark.parametrize("method", ["first", "last"])
|
| 106 |
+
@pytest.mark.parametrize(
|
| 107 |
+
"df, expected",
|
| 108 |
+
[
|
| 109 |
+
(
|
| 110 |
+
DataFrame({"id": "a", "value": [None, "foo", np.nan]}),
|
| 111 |
+
DataFrame({"value": ["foo"]}, index=Index(["a"], name="id")),
|
| 112 |
+
),
|
| 113 |
+
(
|
| 114 |
+
DataFrame({"id": "a", "value": [np.nan]}, dtype=object),
|
| 115 |
+
DataFrame({"value": [None]}, index=Index(["a"], name="id")),
|
| 116 |
+
),
|
| 117 |
+
],
|
| 118 |
+
)
|
| 119 |
+
def test_first_last_with_None_expanded(method, df, expected):
|
| 120 |
+
# GH 32800, 38286
|
| 121 |
+
result = getattr(df.groupby("id"), method)()
|
| 122 |
+
tm.assert_frame_equal(result, expected)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def test_first_last_nth_dtypes():
|
| 126 |
+
df = DataFrame(
|
| 127 |
+
{
|
| 128 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
|
| 129 |
+
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
|
| 130 |
+
"C": np.random.default_rng(2).standard_normal(8),
|
| 131 |
+
"D": np.array(np.random.default_rng(2).standard_normal(8), dtype="float32"),
|
| 132 |
+
}
|
| 133 |
+
)
|
| 134 |
+
df["E"] = True
|
| 135 |
+
df["F"] = 1
|
| 136 |
+
|
| 137 |
+
# tests for first / last / nth
|
| 138 |
+
grouped = df.groupby("A")
|
| 139 |
+
first = grouped.first()
|
| 140 |
+
expected = df.loc[[1, 0], ["B", "C", "D", "E", "F"]]
|
| 141 |
+
expected.index = Index(["bar", "foo"], name="A")
|
| 142 |
+
expected = expected.sort_index()
|
| 143 |
+
tm.assert_frame_equal(first, expected)
|
| 144 |
+
|
| 145 |
+
last = grouped.last()
|
| 146 |
+
expected = df.loc[[5, 7], ["B", "C", "D", "E", "F"]]
|
| 147 |
+
expected.index = Index(["bar", "foo"], name="A")
|
| 148 |
+
expected = expected.sort_index()
|
| 149 |
+
tm.assert_frame_equal(last, expected)
|
| 150 |
+
|
| 151 |
+
nth = grouped.nth(1)
|
| 152 |
+
expected = df.iloc[[2, 3]]
|
| 153 |
+
tm.assert_frame_equal(nth, expected)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def test_first_last_nth_dtypes2():
|
| 157 |
+
# GH 2763, first/last shifting dtypes
|
| 158 |
+
idx = list(range(10))
|
| 159 |
+
idx.append(9)
|
| 160 |
+
ser = Series(data=range(11), index=idx, name="IntCol")
|
| 161 |
+
assert ser.dtype == "int64"
|
| 162 |
+
f = ser.groupby(level=0).first()
|
| 163 |
+
assert f.dtype == "int64"
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def test_first_last_nth_nan_dtype():
|
| 167 |
+
# GH 33591
|
| 168 |
+
df = DataFrame({"data": ["A"], "nans": Series([None], dtype=object)})
|
| 169 |
+
grouped = df.groupby("data")
|
| 170 |
+
|
| 171 |
+
expected = df.set_index("data").nans
|
| 172 |
+
tm.assert_series_equal(grouped.nans.first(), expected)
|
| 173 |
+
tm.assert_series_equal(grouped.nans.last(), expected)
|
| 174 |
+
|
| 175 |
+
expected = df.nans
|
| 176 |
+
tm.assert_series_equal(grouped.nans.nth(-1), expected)
|
| 177 |
+
tm.assert_series_equal(grouped.nans.nth(0), expected)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def test_first_strings_timestamps():
|
| 181 |
+
# GH 11244
|
| 182 |
+
test = DataFrame(
|
| 183 |
+
{
|
| 184 |
+
Timestamp("2012-01-01 00:00:00"): ["a", "b"],
|
| 185 |
+
Timestamp("2012-01-02 00:00:00"): ["c", "d"],
|
| 186 |
+
"name": ["e", "e"],
|
| 187 |
+
"aaaa": ["f", "g"],
|
| 188 |
+
}
|
| 189 |
+
)
|
| 190 |
+
result = test.groupby("name").first()
|
| 191 |
+
expected = DataFrame(
|
| 192 |
+
[["a", "c", "f"]],
|
| 193 |
+
columns=Index([Timestamp("2012-01-01"), Timestamp("2012-01-02"), "aaaa"]),
|
| 194 |
+
index=Index(["e"], name="name"),
|
| 195 |
+
)
|
| 196 |
+
tm.assert_frame_equal(result, expected)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def test_nth():
|
| 200 |
+
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
|
| 201 |
+
gb = df.groupby("A")
|
| 202 |
+
|
| 203 |
+
tm.assert_frame_equal(gb.nth(0), df.iloc[[0, 2]])
|
| 204 |
+
tm.assert_frame_equal(gb.nth(1), df.iloc[[1]])
|
| 205 |
+
tm.assert_frame_equal(gb.nth(2), df.loc[[]])
|
| 206 |
+
tm.assert_frame_equal(gb.nth(-1), df.iloc[[1, 2]])
|
| 207 |
+
tm.assert_frame_equal(gb.nth(-2), df.iloc[[0]])
|
| 208 |
+
tm.assert_frame_equal(gb.nth(-3), df.loc[[]])
|
| 209 |
+
tm.assert_series_equal(gb.B.nth(0), df.B.iloc[[0, 2]])
|
| 210 |
+
tm.assert_series_equal(gb.B.nth(1), df.B.iloc[[1]])
|
| 211 |
+
tm.assert_frame_equal(gb[["B"]].nth(0), df[["B"]].iloc[[0, 2]])
|
| 212 |
+
|
| 213 |
+
tm.assert_frame_equal(gb.nth(0, dropna="any"), df.iloc[[1, 2]])
|
| 214 |
+
tm.assert_frame_equal(gb.nth(-1, dropna="any"), df.iloc[[1, 2]])
|
| 215 |
+
|
| 216 |
+
tm.assert_frame_equal(gb.nth(7, dropna="any"), df.iloc[:0])
|
| 217 |
+
tm.assert_frame_equal(gb.nth(2, dropna="any"), df.iloc[:0])
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def test_nth2():
|
| 221 |
+
# out of bounds, regression from 0.13.1
|
| 222 |
+
# GH 6621
|
| 223 |
+
df = DataFrame(
|
| 224 |
+
{
|
| 225 |
+
"color": {0: "green", 1: "green", 2: "red", 3: "red", 4: "red"},
|
| 226 |
+
"food": {0: "ham", 1: "eggs", 2: "eggs", 3: "ham", 4: "pork"},
|
| 227 |
+
"two": {
|
| 228 |
+
0: 1.5456590000000001,
|
| 229 |
+
1: -0.070345000000000005,
|
| 230 |
+
2: -2.4004539999999999,
|
| 231 |
+
3: 0.46206000000000003,
|
| 232 |
+
4: 0.52350799999999997,
|
| 233 |
+
},
|
| 234 |
+
"one": {
|
| 235 |
+
0: 0.56573799999999996,
|
| 236 |
+
1: -0.9742360000000001,
|
| 237 |
+
2: 1.033801,
|
| 238 |
+
3: -0.78543499999999999,
|
| 239 |
+
4: 0.70422799999999997,
|
| 240 |
+
},
|
| 241 |
+
}
|
| 242 |
+
).set_index(["color", "food"])
|
| 243 |
+
|
| 244 |
+
result = df.groupby(level=0, as_index=False).nth(2)
|
| 245 |
+
expected = df.iloc[[-1]]
|
| 246 |
+
tm.assert_frame_equal(result, expected)
|
| 247 |
+
|
| 248 |
+
result = df.groupby(level=0, as_index=False).nth(3)
|
| 249 |
+
expected = df.loc[[]]
|
| 250 |
+
tm.assert_frame_equal(result, expected)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def test_nth3():
|
| 254 |
+
# GH 7559
|
| 255 |
+
# from the vbench
|
| 256 |
+
df = DataFrame(np.random.default_rng(2).integers(1, 10, (100, 2)), dtype="int64")
|
| 257 |
+
ser = df[1]
|
| 258 |
+
gb = df[0]
|
| 259 |
+
expected = ser.groupby(gb).first()
|
| 260 |
+
expected2 = ser.groupby(gb).apply(lambda x: x.iloc[0])
|
| 261 |
+
tm.assert_series_equal(expected2, expected, check_names=False)
|
| 262 |
+
assert expected.name == 1
|
| 263 |
+
assert expected2.name == 1
|
| 264 |
+
|
| 265 |
+
# validate first
|
| 266 |
+
v = ser[gb == 1].iloc[0]
|
| 267 |
+
assert expected.iloc[0] == v
|
| 268 |
+
assert expected2.iloc[0] == v
|
| 269 |
+
|
| 270 |
+
with pytest.raises(ValueError, match="For a DataFrame"):
|
| 271 |
+
ser.groupby(gb, sort=False).nth(0, dropna=True)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def test_nth4():
|
| 275 |
+
# doc example
|
| 276 |
+
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
|
| 277 |
+
gb = df.groupby("A")
|
| 278 |
+
result = gb.B.nth(0, dropna="all")
|
| 279 |
+
expected = df.B.iloc[[1, 2]]
|
| 280 |
+
tm.assert_series_equal(result, expected)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def test_nth5():
|
| 284 |
+
# test multiple nth values
|
| 285 |
+
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]], columns=["A", "B"])
|
| 286 |
+
gb = df.groupby("A")
|
| 287 |
+
|
| 288 |
+
tm.assert_frame_equal(gb.nth(0), df.iloc[[0, 3]])
|
| 289 |
+
tm.assert_frame_equal(gb.nth([0]), df.iloc[[0, 3]])
|
| 290 |
+
tm.assert_frame_equal(gb.nth([0, 1]), df.iloc[[0, 1, 3, 4]])
|
| 291 |
+
tm.assert_frame_equal(gb.nth([0, -1]), df.iloc[[0, 2, 3, 4]])
|
| 292 |
+
tm.assert_frame_equal(gb.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]])
|
| 293 |
+
tm.assert_frame_equal(gb.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]])
|
| 294 |
+
tm.assert_frame_equal(gb.nth([2]), df.iloc[[2]])
|
| 295 |
+
tm.assert_frame_equal(gb.nth([3, 4]), df.loc[[]])
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def test_nth_bdays(unit):
|
| 299 |
+
business_dates = pd.date_range(
|
| 300 |
+
start="4/1/2014", end="6/30/2014", freq="B", unit=unit
|
| 301 |
+
)
|
| 302 |
+
df = DataFrame(1, index=business_dates, columns=["a", "b"])
|
| 303 |
+
# get the first, fourth and last two business days for each month
|
| 304 |
+
key = [df.index.year, df.index.month]
|
| 305 |
+
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
|
| 306 |
+
expected_dates = pd.to_datetime(
|
| 307 |
+
[
|
| 308 |
+
"2014/4/1",
|
| 309 |
+
"2014/4/4",
|
| 310 |
+
"2014/4/29",
|
| 311 |
+
"2014/4/30",
|
| 312 |
+
"2014/5/1",
|
| 313 |
+
"2014/5/6",
|
| 314 |
+
"2014/5/29",
|
| 315 |
+
"2014/5/30",
|
| 316 |
+
"2014/6/2",
|
| 317 |
+
"2014/6/5",
|
| 318 |
+
"2014/6/27",
|
| 319 |
+
"2014/6/30",
|
| 320 |
+
]
|
| 321 |
+
).as_unit(unit)
|
| 322 |
+
expected = DataFrame(1, columns=["a", "b"], index=expected_dates)
|
| 323 |
+
tm.assert_frame_equal(result, expected)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def test_nth_multi_grouper(three_group):
|
| 327 |
+
# PR 9090, related to issue 8979
|
| 328 |
+
# test nth on multiple groupers
|
| 329 |
+
grouped = three_group.groupby(["A", "B"])
|
| 330 |
+
result = grouped.nth(0)
|
| 331 |
+
expected = three_group.iloc[[0, 3, 4, 7]]
|
| 332 |
+
tm.assert_frame_equal(result, expected)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
@pytest.mark.parametrize(
|
| 336 |
+
"data, expected_first, expected_last",
|
| 337 |
+
[
|
| 338 |
+
(
|
| 339 |
+
{
|
| 340 |
+
"id": ["A"],
|
| 341 |
+
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
|
| 342 |
+
"foo": [1],
|
| 343 |
+
},
|
| 344 |
+
{
|
| 345 |
+
"id": ["A"],
|
| 346 |
+
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
|
| 347 |
+
"foo": [1],
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"id": ["A"],
|
| 351 |
+
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
|
| 352 |
+
"foo": [1],
|
| 353 |
+
},
|
| 354 |
+
),
|
| 355 |
+
(
|
| 356 |
+
{
|
| 357 |
+
"id": ["A", "B", "A"],
|
| 358 |
+
"time": [
|
| 359 |
+
Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
|
| 360 |
+
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
|
| 361 |
+
Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
|
| 362 |
+
],
|
| 363 |
+
"foo": [1, 2, 3],
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"id": ["A", "B"],
|
| 367 |
+
"time": [
|
| 368 |
+
Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
|
| 369 |
+
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
|
| 370 |
+
],
|
| 371 |
+
"foo": [1, 2],
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"id": ["A", "B"],
|
| 375 |
+
"time": [
|
| 376 |
+
Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
|
| 377 |
+
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
|
| 378 |
+
],
|
| 379 |
+
"foo": [3, 2],
|
| 380 |
+
},
|
| 381 |
+
),
|
| 382 |
+
],
|
| 383 |
+
)
|
| 384 |
+
def test_first_last_tz(data, expected_first, expected_last):
|
| 385 |
+
# GH15884
|
| 386 |
+
# Test that the timezone is retained when calling first
|
| 387 |
+
# or last on groupby with as_index=False
|
| 388 |
+
|
| 389 |
+
df = DataFrame(data)
|
| 390 |
+
|
| 391 |
+
result = df.groupby("id", as_index=False).first()
|
| 392 |
+
expected = DataFrame(expected_first)
|
| 393 |
+
cols = ["id", "time", "foo"]
|
| 394 |
+
tm.assert_frame_equal(result[cols], expected[cols])
|
| 395 |
+
|
| 396 |
+
result = df.groupby("id", as_index=False)["time"].first()
|
| 397 |
+
tm.assert_frame_equal(result, expected[["id", "time"]])
|
| 398 |
+
|
| 399 |
+
result = df.groupby("id", as_index=False).last()
|
| 400 |
+
expected = DataFrame(expected_last)
|
| 401 |
+
cols = ["id", "time", "foo"]
|
| 402 |
+
tm.assert_frame_equal(result[cols], expected[cols])
|
| 403 |
+
|
| 404 |
+
result = df.groupby("id", as_index=False)["time"].last()
|
| 405 |
+
tm.assert_frame_equal(result, expected[["id", "time"]])
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
@pytest.mark.parametrize(
|
| 409 |
+
"method, ts, alpha",
|
| 410 |
+
[
|
| 411 |
+
["first", Timestamp("2013-01-01", tz="US/Eastern"), "a"],
|
| 412 |
+
["last", Timestamp("2013-01-02", tz="US/Eastern"), "b"],
|
| 413 |
+
],
|
| 414 |
+
)
|
| 415 |
+
def test_first_last_tz_multi_column(method, ts, alpha, unit):
|
| 416 |
+
# GH 21603
|
| 417 |
+
category_string = Series(list("abc")).astype("category")
|
| 418 |
+
dti = pd.date_range("20130101", periods=3, tz="US/Eastern", unit=unit)
|
| 419 |
+
df = DataFrame(
|
| 420 |
+
{
|
| 421 |
+
"group": [1, 1, 2],
|
| 422 |
+
"category_string": category_string,
|
| 423 |
+
"datetimetz": dti,
|
| 424 |
+
}
|
| 425 |
+
)
|
| 426 |
+
result = getattr(df.groupby("group"), method)()
|
| 427 |
+
expected = DataFrame(
|
| 428 |
+
{
|
| 429 |
+
"category_string": pd.Categorical(
|
| 430 |
+
[alpha, "c"], dtype=category_string.dtype
|
| 431 |
+
),
|
| 432 |
+
"datetimetz": [ts, Timestamp("2013-01-03", tz="US/Eastern")],
|
| 433 |
+
},
|
| 434 |
+
index=Index([1, 2], name="group"),
|
| 435 |
+
)
|
| 436 |
+
expected["datetimetz"] = expected["datetimetz"].dt.as_unit(unit)
|
| 437 |
+
tm.assert_frame_equal(result, expected)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
@pytest.mark.parametrize(
|
| 441 |
+
"values",
|
| 442 |
+
[
|
| 443 |
+
pd.array([True, False], dtype="boolean"),
|
| 444 |
+
pd.array([1, 2], dtype="Int64"),
|
| 445 |
+
pd.to_datetime(["2020-01-01", "2020-02-01"]),
|
| 446 |
+
pd.to_timedelta([1, 2], unit="D"),
|
| 447 |
+
],
|
| 448 |
+
)
|
| 449 |
+
@pytest.mark.parametrize("function", ["first", "last", "min", "max"])
|
| 450 |
+
def test_first_last_extension_array_keeps_dtype(values, function):
|
| 451 |
+
# https://github.com/pandas-dev/pandas/issues/33071
|
| 452 |
+
# https://github.com/pandas-dev/pandas/issues/32194
|
| 453 |
+
df = DataFrame({"a": [1, 2], "b": values})
|
| 454 |
+
grouped = df.groupby("a")
|
| 455 |
+
idx = Index([1, 2], name="a")
|
| 456 |
+
expected_series = Series(values, name="b", index=idx)
|
| 457 |
+
expected_frame = DataFrame({"b": values}, index=idx)
|
| 458 |
+
|
| 459 |
+
result_series = getattr(grouped["b"], function)()
|
| 460 |
+
tm.assert_series_equal(result_series, expected_series)
|
| 461 |
+
|
| 462 |
+
result_frame = grouped.agg({"b": function})
|
| 463 |
+
tm.assert_frame_equal(result_frame, expected_frame)
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def test_nth_multi_index_as_expected():
|
| 467 |
+
# PR 9090, related to issue 8979
|
| 468 |
+
# test nth on MultiIndex
|
| 469 |
+
three_group = DataFrame(
|
| 470 |
+
{
|
| 471 |
+
"A": [
|
| 472 |
+
"foo",
|
| 473 |
+
"foo",
|
| 474 |
+
"foo",
|
| 475 |
+
"foo",
|
| 476 |
+
"bar",
|
| 477 |
+
"bar",
|
| 478 |
+
"bar",
|
| 479 |
+
"bar",
|
| 480 |
+
"foo",
|
| 481 |
+
"foo",
|
| 482 |
+
"foo",
|
| 483 |
+
],
|
| 484 |
+
"B": [
|
| 485 |
+
"one",
|
| 486 |
+
"one",
|
| 487 |
+
"one",
|
| 488 |
+
"two",
|
| 489 |
+
"one",
|
| 490 |
+
"one",
|
| 491 |
+
"one",
|
| 492 |
+
"two",
|
| 493 |
+
"two",
|
| 494 |
+
"two",
|
| 495 |
+
"one",
|
| 496 |
+
],
|
| 497 |
+
"C": [
|
| 498 |
+
"dull",
|
| 499 |
+
"dull",
|
| 500 |
+
"shiny",
|
| 501 |
+
"dull",
|
| 502 |
+
"dull",
|
| 503 |
+
"shiny",
|
| 504 |
+
"shiny",
|
| 505 |
+
"dull",
|
| 506 |
+
"shiny",
|
| 507 |
+
"shiny",
|
| 508 |
+
"shiny",
|
| 509 |
+
],
|
| 510 |
+
}
|
| 511 |
+
)
|
| 512 |
+
grouped = three_group.groupby(["A", "B"])
|
| 513 |
+
result = grouped.nth(0)
|
| 514 |
+
expected = three_group.iloc[[0, 3, 4, 7]]
|
| 515 |
+
tm.assert_frame_equal(result, expected)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
@pytest.mark.parametrize(
|
| 519 |
+
"op, n, expected_rows",
|
| 520 |
+
[
|
| 521 |
+
("head", -1, [0]),
|
| 522 |
+
("head", 0, []),
|
| 523 |
+
("head", 1, [0, 2]),
|
| 524 |
+
("head", 7, [0, 1, 2]),
|
| 525 |
+
("tail", -1, [1]),
|
| 526 |
+
("tail", 0, []),
|
| 527 |
+
("tail", 1, [1, 2]),
|
| 528 |
+
("tail", 7, [0, 1, 2]),
|
| 529 |
+
],
|
| 530 |
+
)
|
| 531 |
+
@pytest.mark.parametrize("columns", [None, [], ["A"], ["B"], ["A", "B"]])
|
| 532 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 533 |
+
def test_groupby_head_tail(op, n, expected_rows, columns, as_index):
|
| 534 |
+
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
|
| 535 |
+
g = df.groupby("A", as_index=as_index)
|
| 536 |
+
expected = df.iloc[expected_rows]
|
| 537 |
+
if columns is not None:
|
| 538 |
+
g = g[columns]
|
| 539 |
+
expected = expected[columns]
|
| 540 |
+
result = getattr(g, op)(n)
|
| 541 |
+
tm.assert_frame_equal(result, expected)
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
@pytest.mark.parametrize(
|
| 545 |
+
"op, n, expected_cols",
|
| 546 |
+
[
|
| 547 |
+
("head", -1, [0]),
|
| 548 |
+
("head", 0, []),
|
| 549 |
+
("head", 1, [0, 2]),
|
| 550 |
+
("head", 7, [0, 1, 2]),
|
| 551 |
+
("tail", -1, [1]),
|
| 552 |
+
("tail", 0, []),
|
| 553 |
+
("tail", 1, [1, 2]),
|
| 554 |
+
("tail", 7, [0, 1, 2]),
|
| 555 |
+
],
|
| 556 |
+
)
|
| 557 |
+
def test_groupby_head_tail_axis_1(op, n, expected_cols):
|
| 558 |
+
# GH 9772
|
| 559 |
+
df = DataFrame(
|
| 560 |
+
[[1, 2, 3], [1, 4, 5], [2, 6, 7], [3, 8, 9]], columns=["A", "B", "C"]
|
| 561 |
+
)
|
| 562 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 563 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 564 |
+
g = df.groupby([0, 0, 1], axis=1)
|
| 565 |
+
expected = df.iloc[:, expected_cols]
|
| 566 |
+
result = getattr(g, op)(n)
|
| 567 |
+
tm.assert_frame_equal(result, expected)
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
def test_group_selection_cache():
|
| 571 |
+
# GH 12839 nth, head, and tail should return same result consistently
|
| 572 |
+
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
|
| 573 |
+
expected = df.iloc[[0, 2]]
|
| 574 |
+
|
| 575 |
+
g = df.groupby("A")
|
| 576 |
+
result1 = g.head(n=2)
|
| 577 |
+
result2 = g.nth(0)
|
| 578 |
+
tm.assert_frame_equal(result1, df)
|
| 579 |
+
tm.assert_frame_equal(result2, expected)
|
| 580 |
+
|
| 581 |
+
g = df.groupby("A")
|
| 582 |
+
result1 = g.tail(n=2)
|
| 583 |
+
result2 = g.nth(0)
|
| 584 |
+
tm.assert_frame_equal(result1, df)
|
| 585 |
+
tm.assert_frame_equal(result2, expected)
|
| 586 |
+
|
| 587 |
+
g = df.groupby("A")
|
| 588 |
+
result1 = g.nth(0)
|
| 589 |
+
result2 = g.head(n=2)
|
| 590 |
+
tm.assert_frame_equal(result1, expected)
|
| 591 |
+
tm.assert_frame_equal(result2, df)
|
| 592 |
+
|
| 593 |
+
g = df.groupby("A")
|
| 594 |
+
result1 = g.nth(0)
|
| 595 |
+
result2 = g.tail(n=2)
|
| 596 |
+
tm.assert_frame_equal(result1, expected)
|
| 597 |
+
tm.assert_frame_equal(result2, df)
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
def test_nth_empty():
|
| 601 |
+
# GH 16064
|
| 602 |
+
df = DataFrame(index=[0], columns=["a", "b", "c"])
|
| 603 |
+
result = df.groupby("a").nth(10)
|
| 604 |
+
expected = df.iloc[:0]
|
| 605 |
+
tm.assert_frame_equal(result, expected)
|
| 606 |
+
|
| 607 |
+
result = df.groupby(["a", "b"]).nth(10)
|
| 608 |
+
expected = df.iloc[:0]
|
| 609 |
+
tm.assert_frame_equal(result, expected)
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
def test_nth_column_order():
|
| 613 |
+
# GH 20760
|
| 614 |
+
# Check that nth preserves column order
|
| 615 |
+
df = DataFrame(
|
| 616 |
+
[[1, "b", 100], [1, "a", 50], [1, "a", np.nan], [2, "c", 200], [2, "d", 150]],
|
| 617 |
+
columns=["A", "C", "B"],
|
| 618 |
+
)
|
| 619 |
+
result = df.groupby("A").nth(0)
|
| 620 |
+
expected = df.iloc[[0, 3]]
|
| 621 |
+
tm.assert_frame_equal(result, expected)
|
| 622 |
+
|
| 623 |
+
result = df.groupby("A").nth(-1, dropna="any")
|
| 624 |
+
expected = df.iloc[[1, 4]]
|
| 625 |
+
tm.assert_frame_equal(result, expected)
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
@pytest.mark.parametrize("dropna", [None, "any", "all"])
|
| 629 |
+
def test_nth_nan_in_grouper(dropna):
|
| 630 |
+
# GH 26011
|
| 631 |
+
df = DataFrame(
|
| 632 |
+
{
|
| 633 |
+
"a": [np.nan, "a", np.nan, "b", np.nan],
|
| 634 |
+
"b": [0, 2, 4, 6, 8],
|
| 635 |
+
"c": [1, 3, 5, 7, 9],
|
| 636 |
+
}
|
| 637 |
+
)
|
| 638 |
+
result = df.groupby("a").nth(0, dropna=dropna)
|
| 639 |
+
expected = df.iloc[[1, 3]]
|
| 640 |
+
|
| 641 |
+
tm.assert_frame_equal(result, expected)
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
@pytest.mark.parametrize("dropna", [None, "any", "all"])
|
| 645 |
+
def test_nth_nan_in_grouper_series(dropna):
|
| 646 |
+
# GH 26454
|
| 647 |
+
df = DataFrame(
|
| 648 |
+
{
|
| 649 |
+
"a": [np.nan, "a", np.nan, "b", np.nan],
|
| 650 |
+
"b": [0, 2, 4, 6, 8],
|
| 651 |
+
}
|
| 652 |
+
)
|
| 653 |
+
result = df.groupby("a")["b"].nth(0, dropna=dropna)
|
| 654 |
+
expected = df["b"].iloc[[1, 3]]
|
| 655 |
+
|
| 656 |
+
tm.assert_series_equal(result, expected)
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
def test_first_categorical_and_datetime_data_nat():
|
| 660 |
+
# GH 20520
|
| 661 |
+
df = DataFrame(
|
| 662 |
+
{
|
| 663 |
+
"group": ["first", "first", "second", "third", "third"],
|
| 664 |
+
"time": 5 * [np.datetime64("NaT")],
|
| 665 |
+
"categories": Series(["a", "b", "c", "a", "b"], dtype="category"),
|
| 666 |
+
}
|
| 667 |
+
)
|
| 668 |
+
result = df.groupby("group").first()
|
| 669 |
+
expected = DataFrame(
|
| 670 |
+
{
|
| 671 |
+
"time": 3 * [np.datetime64("NaT")],
|
| 672 |
+
"categories": Series(["a", "c", "a"]).astype(
|
| 673 |
+
pd.CategoricalDtype(["a", "b", "c"])
|
| 674 |
+
),
|
| 675 |
+
}
|
| 676 |
+
)
|
| 677 |
+
expected.index = Index(["first", "second", "third"], name="group")
|
| 678 |
+
tm.assert_frame_equal(result, expected)
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
def test_first_multi_key_groupby_categorical():
|
| 682 |
+
# GH 22512
|
| 683 |
+
df = DataFrame(
|
| 684 |
+
{
|
| 685 |
+
"A": [1, 1, 1, 2, 2],
|
| 686 |
+
"B": [100, 100, 200, 100, 100],
|
| 687 |
+
"C": ["apple", "orange", "mango", "mango", "orange"],
|
| 688 |
+
"D": ["jupiter", "mercury", "mars", "venus", "venus"],
|
| 689 |
+
}
|
| 690 |
+
)
|
| 691 |
+
df = df.astype({"D": "category"})
|
| 692 |
+
result = df.groupby(by=["A", "B"]).first()
|
| 693 |
+
expected = DataFrame(
|
| 694 |
+
{
|
| 695 |
+
"C": ["apple", "mango", "mango"],
|
| 696 |
+
"D": Series(["jupiter", "mars", "venus"]).astype(
|
| 697 |
+
pd.CategoricalDtype(["jupiter", "mars", "mercury", "venus"])
|
| 698 |
+
),
|
| 699 |
+
}
|
| 700 |
+
)
|
| 701 |
+
expected.index = MultiIndex.from_tuples(
|
| 702 |
+
[(1, 100), (1, 200), (2, 100)], names=["A", "B"]
|
| 703 |
+
)
|
| 704 |
+
tm.assert_frame_equal(result, expected)
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
@pytest.mark.parametrize("method", ["first", "last", "nth"])
|
| 708 |
+
def test_groupby_last_first_nth_with_none(method, nulls_fixture):
|
| 709 |
+
# GH29645
|
| 710 |
+
expected = Series(["y"])
|
| 711 |
+
data = Series(
|
| 712 |
+
[nulls_fixture, nulls_fixture, nulls_fixture, "y", nulls_fixture],
|
| 713 |
+
index=[0, 0, 0, 0, 0],
|
| 714 |
+
).groupby(level=0)
|
| 715 |
+
|
| 716 |
+
if method == "nth":
|
| 717 |
+
result = getattr(data, method)(3)
|
| 718 |
+
else:
|
| 719 |
+
result = getattr(data, method)()
|
| 720 |
+
|
| 721 |
+
tm.assert_series_equal(result, expected)
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
@pytest.mark.parametrize(
|
| 725 |
+
"arg, expected_rows",
|
| 726 |
+
[
|
| 727 |
+
[slice(None, 3, 2), [0, 1, 4, 5]],
|
| 728 |
+
[slice(None, -2), [0, 2, 5]],
|
| 729 |
+
[[slice(None, 2), slice(-2, None)], [0, 1, 2, 3, 4, 6, 7]],
|
| 730 |
+
[[0, 1, slice(-2, None)], [0, 1, 2, 3, 4, 6, 7]],
|
| 731 |
+
],
|
| 732 |
+
)
|
| 733 |
+
def test_slice(slice_test_df, slice_test_grouped, arg, expected_rows):
|
| 734 |
+
# Test slices GH #42947
|
| 735 |
+
|
| 736 |
+
result = slice_test_grouped.nth[arg]
|
| 737 |
+
equivalent = slice_test_grouped.nth(arg)
|
| 738 |
+
expected = slice_test_df.iloc[expected_rows]
|
| 739 |
+
|
| 740 |
+
tm.assert_frame_equal(result, expected)
|
| 741 |
+
tm.assert_frame_equal(equivalent, expected)
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
def test_nth_indexed(slice_test_df, slice_test_grouped):
|
| 745 |
+
# Test index notation GH #44688
|
| 746 |
+
|
| 747 |
+
result = slice_test_grouped.nth[0, 1, -2:]
|
| 748 |
+
equivalent = slice_test_grouped.nth([0, 1, slice(-2, None)])
|
| 749 |
+
expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]]
|
| 750 |
+
|
| 751 |
+
tm.assert_frame_equal(result, expected)
|
| 752 |
+
tm.assert_frame_equal(equivalent, expected)
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
def test_invalid_argument(slice_test_grouped):
|
| 756 |
+
# Test for error on invalid argument
|
| 757 |
+
|
| 758 |
+
with pytest.raises(TypeError, match="Invalid index"):
|
| 759 |
+
slice_test_grouped.nth(3.14)
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def test_negative_step(slice_test_grouped):
|
| 763 |
+
# Test for error on negative slice step
|
| 764 |
+
|
| 765 |
+
with pytest.raises(ValueError, match="Invalid step"):
|
| 766 |
+
slice_test_grouped.nth(slice(None, None, -1))
|
| 767 |
+
|
| 768 |
+
|
| 769 |
+
def test_np_ints(slice_test_df, slice_test_grouped):
|
| 770 |
+
# Test np ints work
|
| 771 |
+
|
| 772 |
+
result = slice_test_grouped.nth(np.array([0, 1]))
|
| 773 |
+
expected = slice_test_df.iloc[[0, 1, 2, 3, 4]]
|
| 774 |
+
tm.assert_frame_equal(result, expected)
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
def test_groupby_nth_with_column_axis():
|
| 778 |
+
# GH43926
|
| 779 |
+
df = DataFrame(
|
| 780 |
+
[
|
| 781 |
+
[4, 5, 6],
|
| 782 |
+
[8, 8, 7],
|
| 783 |
+
],
|
| 784 |
+
index=["z", "y"],
|
| 785 |
+
columns=["C", "B", "A"],
|
| 786 |
+
)
|
| 787 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 788 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 789 |
+
gb = df.groupby(df.iloc[1], axis=1)
|
| 790 |
+
result = gb.nth(0)
|
| 791 |
+
expected = df.iloc[:, [0, 2]]
|
| 792 |
+
tm.assert_frame_equal(result, expected)
|
| 793 |
+
|
| 794 |
+
|
| 795 |
+
def test_groupby_nth_interval():
|
| 796 |
+
# GH#24205
|
| 797 |
+
idx_result = MultiIndex(
|
| 798 |
+
[
|
| 799 |
+
pd.CategoricalIndex([pd.Interval(0, 1), pd.Interval(1, 2)]),
|
| 800 |
+
pd.CategoricalIndex([pd.Interval(0, 10), pd.Interval(10, 20)]),
|
| 801 |
+
],
|
| 802 |
+
[[0, 0, 0, 1, 1], [0, 1, 1, 0, -1]],
|
| 803 |
+
)
|
| 804 |
+
df_result = DataFrame({"col": range(len(idx_result))}, index=idx_result)
|
| 805 |
+
result = df_result.groupby(level=[0, 1], observed=False).nth(0)
|
| 806 |
+
val_expected = [0, 1, 3]
|
| 807 |
+
idx_expected = MultiIndex(
|
| 808 |
+
[
|
| 809 |
+
pd.CategoricalIndex([pd.Interval(0, 1), pd.Interval(1, 2)]),
|
| 810 |
+
pd.CategoricalIndex([pd.Interval(0, 10), pd.Interval(10, 20)]),
|
| 811 |
+
],
|
| 812 |
+
[[0, 0, 1], [0, 1, 0]],
|
| 813 |
+
)
|
| 814 |
+
expected = DataFrame(val_expected, index=idx_expected, columns=["col"])
|
| 815 |
+
tm.assert_frame_equal(result, expected)
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
@pytest.mark.parametrize(
|
| 819 |
+
"start, stop, expected_values, expected_columns",
|
| 820 |
+
[
|
| 821 |
+
(None, None, [0, 1, 2, 3, 4], list("ABCDE")),
|
| 822 |
+
(None, 1, [0, 3], list("AD")),
|
| 823 |
+
(None, 9, [0, 1, 2, 3, 4], list("ABCDE")),
|
| 824 |
+
(None, -1, [0, 1, 3], list("ABD")),
|
| 825 |
+
(1, None, [1, 2, 4], list("BCE")),
|
| 826 |
+
(1, -1, [1], list("B")),
|
| 827 |
+
(-1, None, [2, 4], list("CE")),
|
| 828 |
+
(-1, 2, [4], list("E")),
|
| 829 |
+
],
|
| 830 |
+
)
|
| 831 |
+
@pytest.mark.parametrize("method", ["call", "index"])
|
| 832 |
+
def test_nth_slices_with_column_axis(
|
| 833 |
+
start, stop, expected_values, expected_columns, method
|
| 834 |
+
):
|
| 835 |
+
df = DataFrame([range(5)], columns=[list("ABCDE")])
|
| 836 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 837 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 838 |
+
gb = df.groupby([5, 5, 5, 6, 6], axis=1)
|
| 839 |
+
result = {
|
| 840 |
+
"call": lambda start, stop: gb.nth(slice(start, stop)),
|
| 841 |
+
"index": lambda start, stop: gb.nth[start:stop],
|
| 842 |
+
}[method](start, stop)
|
| 843 |
+
expected = DataFrame([expected_values], columns=[expected_columns])
|
| 844 |
+
tm.assert_frame_equal(result, expected)
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
@pytest.mark.filterwarnings(
|
| 848 |
+
"ignore:invalid value encountered in remainder:RuntimeWarning"
|
| 849 |
+
)
|
| 850 |
+
def test_head_tail_dropna_true():
|
| 851 |
+
# GH#45089
|
| 852 |
+
df = DataFrame(
|
| 853 |
+
[["a", "z"], ["b", np.nan], ["c", np.nan], ["c", np.nan]], columns=["X", "Y"]
|
| 854 |
+
)
|
| 855 |
+
expected = DataFrame([["a", "z"]], columns=["X", "Y"])
|
| 856 |
+
|
| 857 |
+
result = df.groupby(["X", "Y"]).head(n=1)
|
| 858 |
+
tm.assert_frame_equal(result, expected)
|
| 859 |
+
|
| 860 |
+
result = df.groupby(["X", "Y"]).tail(n=1)
|
| 861 |
+
tm.assert_frame_equal(result, expected)
|
| 862 |
+
|
| 863 |
+
result = df.groupby(["X", "Y"]).nth(n=0)
|
| 864 |
+
tm.assert_frame_equal(result, expected)
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
def test_head_tail_dropna_false():
|
| 868 |
+
# GH#45089
|
| 869 |
+
df = DataFrame([["a", "z"], ["b", np.nan], ["c", np.nan]], columns=["X", "Y"])
|
| 870 |
+
expected = DataFrame([["a", "z"], ["b", np.nan], ["c", np.nan]], columns=["X", "Y"])
|
| 871 |
+
|
| 872 |
+
result = df.groupby(["X", "Y"], dropna=False).head(n=1)
|
| 873 |
+
tm.assert_frame_equal(result, expected)
|
| 874 |
+
|
| 875 |
+
result = df.groupby(["X", "Y"], dropna=False).tail(n=1)
|
| 876 |
+
tm.assert_frame_equal(result, expected)
|
| 877 |
+
|
| 878 |
+
result = df.groupby(["X", "Y"], dropna=False).nth(n=0)
|
| 879 |
+
tm.assert_frame_equal(result, expected)
|
| 880 |
+
|
| 881 |
+
|
| 882 |
+
@pytest.mark.parametrize("selection", ("b", ["b"], ["b", "c"]))
|
| 883 |
+
@pytest.mark.parametrize("dropna", ["any", "all", None])
|
| 884 |
+
def test_nth_after_selection(selection, dropna):
|
| 885 |
+
# GH#11038, GH#53518
|
| 886 |
+
df = DataFrame(
|
| 887 |
+
{
|
| 888 |
+
"a": [1, 1, 2],
|
| 889 |
+
"b": [np.nan, 3, 4],
|
| 890 |
+
"c": [5, 6, 7],
|
| 891 |
+
}
|
| 892 |
+
)
|
| 893 |
+
gb = df.groupby("a")[selection]
|
| 894 |
+
result = gb.nth(0, dropna=dropna)
|
| 895 |
+
if dropna == "any" or (dropna == "all" and selection != ["b", "c"]):
|
| 896 |
+
locs = [1, 2]
|
| 897 |
+
else:
|
| 898 |
+
locs = [0, 2]
|
| 899 |
+
expected = df.loc[locs, selection]
|
| 900 |
+
tm.assert_equal(result, expected)
|
| 901 |
+
|
| 902 |
+
|
| 903 |
+
@pytest.mark.parametrize(
|
| 904 |
+
"data",
|
| 905 |
+
[
|
| 906 |
+
(
|
| 907 |
+
Timestamp("2011-01-15 12:50:28.502376"),
|
| 908 |
+
Timestamp("2011-01-20 12:50:28.593448"),
|
| 909 |
+
),
|
| 910 |
+
(24650000000000001, 24650000000000002),
|
| 911 |
+
],
|
| 912 |
+
)
|
| 913 |
+
def test_groupby_nth_int_like_precision(data):
|
| 914 |
+
# GH#6620, GH#9311
|
| 915 |
+
df = DataFrame({"a": [1, 1], "b": data})
|
| 916 |
+
|
| 917 |
+
grouped = df.groupby("a")
|
| 918 |
+
result = grouped.nth(0)
|
| 919 |
+
expected = DataFrame({"a": 1, "b": [data[0]]})
|
| 920 |
+
|
| 921 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_quantile.py
ADDED
|
@@ -0,0 +1,496 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import (
|
| 6 |
+
DataFrame,
|
| 7 |
+
Index,
|
| 8 |
+
)
|
| 9 |
+
import pandas._testing as tm
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@pytest.mark.parametrize(
|
| 13 |
+
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
|
| 14 |
+
)
|
| 15 |
+
@pytest.mark.parametrize(
|
| 16 |
+
"a_vals,b_vals",
|
| 17 |
+
[
|
| 18 |
+
# Ints
|
| 19 |
+
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]),
|
| 20 |
+
([1, 2, 3, 4], [4, 3, 2, 1]),
|
| 21 |
+
([1, 2, 3, 4, 5], [4, 3, 2, 1]),
|
| 22 |
+
# Floats
|
| 23 |
+
([1.0, 2.0, 3.0, 4.0, 5.0], [5.0, 4.0, 3.0, 2.0, 1.0]),
|
| 24 |
+
# Missing data
|
| 25 |
+
([1.0, np.nan, 3.0, np.nan, 5.0], [5.0, np.nan, 3.0, np.nan, 1.0]),
|
| 26 |
+
([np.nan, 4.0, np.nan, 2.0, np.nan], [np.nan, 4.0, np.nan, 2.0, np.nan]),
|
| 27 |
+
# Timestamps
|
| 28 |
+
(
|
| 29 |
+
pd.date_range("1/1/18", freq="D", periods=5),
|
| 30 |
+
pd.date_range("1/1/18", freq="D", periods=5)[::-1],
|
| 31 |
+
),
|
| 32 |
+
(
|
| 33 |
+
pd.date_range("1/1/18", freq="D", periods=5).as_unit("s"),
|
| 34 |
+
pd.date_range("1/1/18", freq="D", periods=5)[::-1].as_unit("s"),
|
| 35 |
+
),
|
| 36 |
+
# All NA
|
| 37 |
+
([np.nan] * 5, [np.nan] * 5),
|
| 38 |
+
],
|
| 39 |
+
)
|
| 40 |
+
@pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1])
|
| 41 |
+
def test_quantile(interpolation, a_vals, b_vals, q, request):
|
| 42 |
+
if (
|
| 43 |
+
interpolation == "nearest"
|
| 44 |
+
and q == 0.5
|
| 45 |
+
and isinstance(b_vals, list)
|
| 46 |
+
and b_vals == [4, 3, 2, 1]
|
| 47 |
+
):
|
| 48 |
+
request.applymarker(
|
| 49 |
+
pytest.mark.xfail(
|
| 50 |
+
reason="Unclear numpy expectation for nearest "
|
| 51 |
+
"result with equidistant data"
|
| 52 |
+
)
|
| 53 |
+
)
|
| 54 |
+
all_vals = pd.concat([pd.Series(a_vals), pd.Series(b_vals)])
|
| 55 |
+
|
| 56 |
+
a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation)
|
| 57 |
+
b_expected = pd.Series(b_vals).quantile(q, interpolation=interpolation)
|
| 58 |
+
|
| 59 |
+
df = DataFrame({"key": ["a"] * len(a_vals) + ["b"] * len(b_vals), "val": all_vals})
|
| 60 |
+
|
| 61 |
+
expected = DataFrame(
|
| 62 |
+
[a_expected, b_expected], columns=["val"], index=Index(["a", "b"], name="key")
|
| 63 |
+
)
|
| 64 |
+
if all_vals.dtype.kind == "M" and expected.dtypes.values[0].kind == "M":
|
| 65 |
+
# TODO(non-nano): this should be unnecessary once array_to_datetime
|
| 66 |
+
# correctly infers non-nano from Timestamp.unit
|
| 67 |
+
expected = expected.astype(all_vals.dtype)
|
| 68 |
+
result = df.groupby("key").quantile(q, interpolation=interpolation)
|
| 69 |
+
|
| 70 |
+
tm.assert_frame_equal(result, expected)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def test_quantile_array():
|
| 74 |
+
# https://github.com/pandas-dev/pandas/issues/27526
|
| 75 |
+
df = DataFrame({"A": [0, 1, 2, 3, 4]})
|
| 76 |
+
key = np.array([0, 0, 1, 1, 1], dtype=np.int64)
|
| 77 |
+
result = df.groupby(key).quantile([0.25])
|
| 78 |
+
|
| 79 |
+
index = pd.MultiIndex.from_product([[0, 1], [0.25]])
|
| 80 |
+
expected = DataFrame({"A": [0.25, 2.50]}, index=index)
|
| 81 |
+
tm.assert_frame_equal(result, expected)
|
| 82 |
+
|
| 83 |
+
df = DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]})
|
| 84 |
+
index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]])
|
| 85 |
+
|
| 86 |
+
key = np.array([0, 0, 1, 1], dtype=np.int64)
|
| 87 |
+
result = df.groupby(key).quantile([0.25, 0.75])
|
| 88 |
+
expected = DataFrame(
|
| 89 |
+
{"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index
|
| 90 |
+
)
|
| 91 |
+
tm.assert_frame_equal(result, expected)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def test_quantile_array2():
|
| 95 |
+
# https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959
|
| 96 |
+
arr = np.random.default_rng(2).integers(0, 5, size=(10, 3), dtype=np.int64)
|
| 97 |
+
df = DataFrame(arr, columns=list("ABC"))
|
| 98 |
+
result = df.groupby("A").quantile([0.3, 0.7])
|
| 99 |
+
expected = DataFrame(
|
| 100 |
+
{
|
| 101 |
+
"B": [2.0, 2.0, 2.3, 2.7, 0.3, 0.7, 3.2, 4.0, 0.3, 0.7],
|
| 102 |
+
"C": [1.0, 1.0, 1.9, 3.0999999999999996, 0.3, 0.7, 2.6, 3.0, 1.2, 2.8],
|
| 103 |
+
},
|
| 104 |
+
index=pd.MultiIndex.from_product(
|
| 105 |
+
[[0, 1, 2, 3, 4], [0.3, 0.7]], names=["A", None]
|
| 106 |
+
),
|
| 107 |
+
)
|
| 108 |
+
tm.assert_frame_equal(result, expected)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def test_quantile_array_no_sort():
|
| 112 |
+
df = DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]})
|
| 113 |
+
key = np.array([1, 0, 1], dtype=np.int64)
|
| 114 |
+
result = df.groupby(key, sort=False).quantile([0.25, 0.5, 0.75])
|
| 115 |
+
expected = DataFrame(
|
| 116 |
+
{"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]},
|
| 117 |
+
index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]),
|
| 118 |
+
)
|
| 119 |
+
tm.assert_frame_equal(result, expected)
|
| 120 |
+
|
| 121 |
+
result = df.groupby(key, sort=False).quantile([0.75, 0.25])
|
| 122 |
+
expected = DataFrame(
|
| 123 |
+
{"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]},
|
| 124 |
+
index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]),
|
| 125 |
+
)
|
| 126 |
+
tm.assert_frame_equal(result, expected)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def test_quantile_array_multiple_levels():
|
| 130 |
+
df = DataFrame(
|
| 131 |
+
{"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]}
|
| 132 |
+
)
|
| 133 |
+
result = df.groupby(["c", "d"]).quantile([0.25, 0.75])
|
| 134 |
+
index = pd.MultiIndex.from_tuples(
|
| 135 |
+
[("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)],
|
| 136 |
+
names=["c", "d", None],
|
| 137 |
+
)
|
| 138 |
+
expected = DataFrame(
|
| 139 |
+
{"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index
|
| 140 |
+
)
|
| 141 |
+
tm.assert_frame_equal(result, expected)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@pytest.mark.parametrize("frame_size", [(2, 3), (100, 10)])
|
| 145 |
+
@pytest.mark.parametrize("groupby", [[0], [0, 1]])
|
| 146 |
+
@pytest.mark.parametrize("q", [[0.5, 0.6]])
|
| 147 |
+
def test_groupby_quantile_with_arraylike_q_and_int_columns(frame_size, groupby, q):
|
| 148 |
+
# GH30289
|
| 149 |
+
nrow, ncol = frame_size
|
| 150 |
+
df = DataFrame(np.array([ncol * [_ % 4] for _ in range(nrow)]), columns=range(ncol))
|
| 151 |
+
|
| 152 |
+
idx_levels = [np.arange(min(nrow, 4))] * len(groupby) + [q]
|
| 153 |
+
idx_codes = [[x for x in range(min(nrow, 4)) for _ in q]] * len(groupby) + [
|
| 154 |
+
list(range(len(q))) * min(nrow, 4)
|
| 155 |
+
]
|
| 156 |
+
expected_index = pd.MultiIndex(
|
| 157 |
+
levels=idx_levels, codes=idx_codes, names=groupby + [None]
|
| 158 |
+
)
|
| 159 |
+
expected_values = [
|
| 160 |
+
[float(x)] * (ncol - len(groupby)) for x in range(min(nrow, 4)) for _ in q
|
| 161 |
+
]
|
| 162 |
+
expected_columns = [x for x in range(ncol) if x not in groupby]
|
| 163 |
+
expected = DataFrame(
|
| 164 |
+
expected_values, index=expected_index, columns=expected_columns
|
| 165 |
+
)
|
| 166 |
+
result = df.groupby(groupby).quantile(q)
|
| 167 |
+
|
| 168 |
+
tm.assert_frame_equal(result, expected)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def test_quantile_raises():
|
| 172 |
+
df = DataFrame([["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"])
|
| 173 |
+
|
| 174 |
+
with pytest.raises(TypeError, match="cannot be performed against 'object' dtypes"):
|
| 175 |
+
df.groupby("key").quantile()
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def test_quantile_out_of_bounds_q_raises():
|
| 179 |
+
# https://github.com/pandas-dev/pandas/issues/27470
|
| 180 |
+
df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)})
|
| 181 |
+
g = df.groupby([0, 0, 0, 1, 1, 1])
|
| 182 |
+
with pytest.raises(ValueError, match="Got '50.0' instead"):
|
| 183 |
+
g.quantile(50)
|
| 184 |
+
|
| 185 |
+
with pytest.raises(ValueError, match="Got '-1.0' instead"):
|
| 186 |
+
g.quantile(-1)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def test_quantile_missing_group_values_no_segfaults():
|
| 190 |
+
# GH 28662
|
| 191 |
+
data = np.array([1.0, np.nan, 1.0])
|
| 192 |
+
df = DataFrame({"key": data, "val": range(3)})
|
| 193 |
+
|
| 194 |
+
# Random segfaults; would have been guaranteed in loop
|
| 195 |
+
grp = df.groupby("key")
|
| 196 |
+
for _ in range(100):
|
| 197 |
+
grp.quantile()
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@pytest.mark.parametrize(
|
| 201 |
+
"key, val, expected_key, expected_val",
|
| 202 |
+
[
|
| 203 |
+
([1.0, np.nan, 3.0, np.nan], range(4), [1.0, 3.0], [0.0, 2.0]),
|
| 204 |
+
([1.0, np.nan, 2.0, 2.0], range(4), [1.0, 2.0], [0.0, 2.5]),
|
| 205 |
+
(["a", "b", "b", np.nan], range(4), ["a", "b"], [0, 1.5]),
|
| 206 |
+
([0], [42], [0], [42.0]),
|
| 207 |
+
([], [], np.array([], dtype="float64"), np.array([], dtype="float64")),
|
| 208 |
+
],
|
| 209 |
+
)
|
| 210 |
+
def test_quantile_missing_group_values_correct_results(
|
| 211 |
+
key, val, expected_key, expected_val
|
| 212 |
+
):
|
| 213 |
+
# GH 28662, GH 33200, GH 33569
|
| 214 |
+
df = DataFrame({"key": key, "val": val})
|
| 215 |
+
|
| 216 |
+
expected = DataFrame(
|
| 217 |
+
expected_val, index=Index(expected_key, name="key"), columns=["val"]
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
grp = df.groupby("key")
|
| 221 |
+
|
| 222 |
+
result = grp.quantile(0.5)
|
| 223 |
+
tm.assert_frame_equal(result, expected)
|
| 224 |
+
|
| 225 |
+
result = grp.quantile()
|
| 226 |
+
tm.assert_frame_equal(result, expected)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@pytest.mark.parametrize(
|
| 230 |
+
"values",
|
| 231 |
+
[
|
| 232 |
+
pd.array([1, 0, None] * 2, dtype="Int64"),
|
| 233 |
+
pd.array([True, False, None] * 2, dtype="boolean"),
|
| 234 |
+
],
|
| 235 |
+
)
|
| 236 |
+
@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])
|
| 237 |
+
def test_groupby_quantile_nullable_array(values, q):
|
| 238 |
+
# https://github.com/pandas-dev/pandas/issues/33136
|
| 239 |
+
df = DataFrame({"a": ["x"] * 3 + ["y"] * 3, "b": values})
|
| 240 |
+
result = df.groupby("a")["b"].quantile(q)
|
| 241 |
+
|
| 242 |
+
if isinstance(q, list):
|
| 243 |
+
idx = pd.MultiIndex.from_product((["x", "y"], q), names=["a", None])
|
| 244 |
+
true_quantiles = [0.0, 0.5, 1.0]
|
| 245 |
+
else:
|
| 246 |
+
idx = Index(["x", "y"], name="a")
|
| 247 |
+
true_quantiles = [0.5]
|
| 248 |
+
|
| 249 |
+
expected = pd.Series(true_quantiles * 2, index=idx, name="b", dtype="Float64")
|
| 250 |
+
tm.assert_series_equal(result, expected)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])
|
| 254 |
+
@pytest.mark.parametrize("numeric_only", [True, False])
|
| 255 |
+
def test_groupby_quantile_raises_on_invalid_dtype(q, numeric_only):
|
| 256 |
+
df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]})
|
| 257 |
+
if numeric_only:
|
| 258 |
+
result = df.groupby("a").quantile(q, numeric_only=numeric_only)
|
| 259 |
+
expected = df.groupby("a")[["b"]].quantile(q)
|
| 260 |
+
tm.assert_frame_equal(result, expected)
|
| 261 |
+
else:
|
| 262 |
+
with pytest.raises(
|
| 263 |
+
TypeError, match="'quantile' cannot be performed against 'object' dtypes!"
|
| 264 |
+
):
|
| 265 |
+
df.groupby("a").quantile(q, numeric_only=numeric_only)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def test_groupby_quantile_NA_float(any_float_dtype):
|
| 269 |
+
# GH#42849
|
| 270 |
+
df = DataFrame({"x": [1, 1], "y": [0.2, np.nan]}, dtype=any_float_dtype)
|
| 271 |
+
result = df.groupby("x")["y"].quantile(0.5)
|
| 272 |
+
exp_index = Index([1.0], dtype=any_float_dtype, name="x")
|
| 273 |
+
|
| 274 |
+
if any_float_dtype in ["Float32", "Float64"]:
|
| 275 |
+
expected_dtype = any_float_dtype
|
| 276 |
+
else:
|
| 277 |
+
expected_dtype = None
|
| 278 |
+
|
| 279 |
+
expected = pd.Series([0.2], dtype=expected_dtype, index=exp_index, name="y")
|
| 280 |
+
tm.assert_series_equal(result, expected)
|
| 281 |
+
|
| 282 |
+
result = df.groupby("x")["y"].quantile([0.5, 0.75])
|
| 283 |
+
expected = pd.Series(
|
| 284 |
+
[0.2] * 2,
|
| 285 |
+
index=pd.MultiIndex.from_product((exp_index, [0.5, 0.75]), names=["x", None]),
|
| 286 |
+
name="y",
|
| 287 |
+
dtype=expected_dtype,
|
| 288 |
+
)
|
| 289 |
+
tm.assert_series_equal(result, expected)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def test_groupby_quantile_NA_int(any_int_ea_dtype):
|
| 293 |
+
# GH#42849
|
| 294 |
+
df = DataFrame({"x": [1, 1], "y": [2, 5]}, dtype=any_int_ea_dtype)
|
| 295 |
+
result = df.groupby("x")["y"].quantile(0.5)
|
| 296 |
+
expected = pd.Series(
|
| 297 |
+
[3.5],
|
| 298 |
+
dtype="Float64",
|
| 299 |
+
index=Index([1], name="x", dtype=any_int_ea_dtype),
|
| 300 |
+
name="y",
|
| 301 |
+
)
|
| 302 |
+
tm.assert_series_equal(expected, result)
|
| 303 |
+
|
| 304 |
+
result = df.groupby("x").quantile(0.5)
|
| 305 |
+
expected = DataFrame(
|
| 306 |
+
{"y": 3.5}, dtype="Float64", index=Index([1], name="x", dtype=any_int_ea_dtype)
|
| 307 |
+
)
|
| 308 |
+
tm.assert_frame_equal(result, expected)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
@pytest.mark.parametrize(
|
| 312 |
+
"interpolation, val1, val2", [("lower", 2, 2), ("higher", 2, 3), ("nearest", 2, 2)]
|
| 313 |
+
)
|
| 314 |
+
def test_groupby_quantile_all_na_group_masked(
|
| 315 |
+
interpolation, val1, val2, any_numeric_ea_dtype
|
| 316 |
+
):
|
| 317 |
+
# GH#37493
|
| 318 |
+
df = DataFrame(
|
| 319 |
+
{"a": [1, 1, 1, 2], "b": [1, 2, 3, pd.NA]}, dtype=any_numeric_ea_dtype
|
| 320 |
+
)
|
| 321 |
+
result = df.groupby("a").quantile(q=[0.5, 0.7], interpolation=interpolation)
|
| 322 |
+
expected = DataFrame(
|
| 323 |
+
{"b": [val1, val2, pd.NA, pd.NA]},
|
| 324 |
+
dtype=any_numeric_ea_dtype,
|
| 325 |
+
index=pd.MultiIndex.from_arrays(
|
| 326 |
+
[pd.Series([1, 1, 2, 2], dtype=any_numeric_ea_dtype), [0.5, 0.7, 0.5, 0.7]],
|
| 327 |
+
names=["a", None],
|
| 328 |
+
),
|
| 329 |
+
)
|
| 330 |
+
tm.assert_frame_equal(result, expected)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
@pytest.mark.parametrize("interpolation", ["midpoint", "linear"])
|
| 334 |
+
def test_groupby_quantile_all_na_group_masked_interp(
|
| 335 |
+
interpolation, any_numeric_ea_dtype
|
| 336 |
+
):
|
| 337 |
+
# GH#37493
|
| 338 |
+
df = DataFrame(
|
| 339 |
+
{"a": [1, 1, 1, 2], "b": [1, 2, 3, pd.NA]}, dtype=any_numeric_ea_dtype
|
| 340 |
+
)
|
| 341 |
+
result = df.groupby("a").quantile(q=[0.5, 0.75], interpolation=interpolation)
|
| 342 |
+
|
| 343 |
+
if any_numeric_ea_dtype == "Float32":
|
| 344 |
+
expected_dtype = any_numeric_ea_dtype
|
| 345 |
+
else:
|
| 346 |
+
expected_dtype = "Float64"
|
| 347 |
+
|
| 348 |
+
expected = DataFrame(
|
| 349 |
+
{"b": [2.0, 2.5, pd.NA, pd.NA]},
|
| 350 |
+
dtype=expected_dtype,
|
| 351 |
+
index=pd.MultiIndex.from_arrays(
|
| 352 |
+
[
|
| 353 |
+
pd.Series([1, 1, 2, 2], dtype=any_numeric_ea_dtype),
|
| 354 |
+
[0.5, 0.75, 0.5, 0.75],
|
| 355 |
+
],
|
| 356 |
+
names=["a", None],
|
| 357 |
+
),
|
| 358 |
+
)
|
| 359 |
+
tm.assert_frame_equal(result, expected)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
@pytest.mark.parametrize("dtype", ["Float64", "Float32"])
|
| 363 |
+
def test_groupby_quantile_allNA_column(dtype):
|
| 364 |
+
# GH#42849
|
| 365 |
+
df = DataFrame({"x": [1, 1], "y": [pd.NA] * 2}, dtype=dtype)
|
| 366 |
+
result = df.groupby("x")["y"].quantile(0.5)
|
| 367 |
+
expected = pd.Series(
|
| 368 |
+
[np.nan], dtype=dtype, index=Index([1.0], dtype=dtype), name="y"
|
| 369 |
+
)
|
| 370 |
+
expected.index.name = "x"
|
| 371 |
+
tm.assert_series_equal(expected, result)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def test_groupby_timedelta_quantile():
|
| 375 |
+
# GH: 29485
|
| 376 |
+
df = DataFrame(
|
| 377 |
+
{"value": pd.to_timedelta(np.arange(4), unit="s"), "group": [1, 1, 2, 2]}
|
| 378 |
+
)
|
| 379 |
+
result = df.groupby("group").quantile(0.99)
|
| 380 |
+
expected = DataFrame(
|
| 381 |
+
{
|
| 382 |
+
"value": [
|
| 383 |
+
pd.Timedelta("0 days 00:00:00.990000"),
|
| 384 |
+
pd.Timedelta("0 days 00:00:02.990000"),
|
| 385 |
+
]
|
| 386 |
+
},
|
| 387 |
+
index=Index([1, 2], name="group"),
|
| 388 |
+
)
|
| 389 |
+
tm.assert_frame_equal(result, expected)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def test_columns_groupby_quantile():
|
| 393 |
+
# GH 33795
|
| 394 |
+
df = DataFrame(
|
| 395 |
+
np.arange(12).reshape(3, -1),
|
| 396 |
+
index=list("XYZ"),
|
| 397 |
+
columns=pd.Series(list("ABAB"), name="col"),
|
| 398 |
+
)
|
| 399 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 400 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 401 |
+
gb = df.groupby("col", axis=1)
|
| 402 |
+
result = gb.quantile(q=[0.8, 0.2])
|
| 403 |
+
expected = DataFrame(
|
| 404 |
+
[
|
| 405 |
+
[1.6, 0.4, 2.6, 1.4],
|
| 406 |
+
[5.6, 4.4, 6.6, 5.4],
|
| 407 |
+
[9.6, 8.4, 10.6, 9.4],
|
| 408 |
+
],
|
| 409 |
+
index=list("XYZ"),
|
| 410 |
+
columns=pd.MultiIndex.from_tuples(
|
| 411 |
+
[("A", 0.8), ("A", 0.2), ("B", 0.8), ("B", 0.2)], names=["col", None]
|
| 412 |
+
),
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
tm.assert_frame_equal(result, expected)
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def test_timestamp_groupby_quantile(unit):
|
| 419 |
+
# GH 33168
|
| 420 |
+
dti = pd.date_range(
|
| 421 |
+
start="2020-04-19 00:00:00", freq="1min", periods=100, tz="UTC", unit=unit
|
| 422 |
+
).floor("1h")
|
| 423 |
+
df = DataFrame(
|
| 424 |
+
{
|
| 425 |
+
"timestamp": dti,
|
| 426 |
+
"category": list(range(1, 101)),
|
| 427 |
+
"value": list(range(101, 201)),
|
| 428 |
+
}
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
result = df.groupby("timestamp").quantile([0.2, 0.8])
|
| 432 |
+
|
| 433 |
+
mi = pd.MultiIndex.from_product([dti[::99], [0.2, 0.8]], names=("timestamp", None))
|
| 434 |
+
expected = DataFrame(
|
| 435 |
+
[
|
| 436 |
+
{"category": 12.8, "value": 112.8},
|
| 437 |
+
{"category": 48.2, "value": 148.2},
|
| 438 |
+
{"category": 68.8, "value": 168.8},
|
| 439 |
+
{"category": 92.2, "value": 192.2},
|
| 440 |
+
],
|
| 441 |
+
index=mi,
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
tm.assert_frame_equal(result, expected)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
def test_groupby_quantile_dt64tz_period():
|
| 448 |
+
# GH#51373
|
| 449 |
+
dti = pd.date_range("2016-01-01", periods=1000)
|
| 450 |
+
df = pd.Series(dti).to_frame().copy()
|
| 451 |
+
df[1] = dti.tz_localize("US/Pacific")
|
| 452 |
+
df[2] = dti.to_period("D")
|
| 453 |
+
df[3] = dti - dti[0]
|
| 454 |
+
df.iloc[-1] = pd.NaT
|
| 455 |
+
|
| 456 |
+
by = np.tile(np.arange(5), 200)
|
| 457 |
+
gb = df.groupby(by)
|
| 458 |
+
|
| 459 |
+
result = gb.quantile(0.5)
|
| 460 |
+
|
| 461 |
+
# Check that we match the group-by-group result
|
| 462 |
+
exp = {i: df.iloc[i::5].quantile(0.5) for i in range(5)}
|
| 463 |
+
expected = DataFrame(exp).T.infer_objects()
|
| 464 |
+
expected.index = expected.index.astype(int)
|
| 465 |
+
|
| 466 |
+
tm.assert_frame_equal(result, expected)
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
def test_groupby_quantile_nonmulti_levels_order():
|
| 470 |
+
# Non-regression test for GH #53009
|
| 471 |
+
ind = pd.MultiIndex.from_tuples(
|
| 472 |
+
[
|
| 473 |
+
(0, "a", "B"),
|
| 474 |
+
(0, "a", "A"),
|
| 475 |
+
(0, "b", "B"),
|
| 476 |
+
(0, "b", "A"),
|
| 477 |
+
(1, "a", "B"),
|
| 478 |
+
(1, "a", "A"),
|
| 479 |
+
(1, "b", "B"),
|
| 480 |
+
(1, "b", "A"),
|
| 481 |
+
],
|
| 482 |
+
names=["sample", "cat0", "cat1"],
|
| 483 |
+
)
|
| 484 |
+
ser = pd.Series(range(8), index=ind)
|
| 485 |
+
result = ser.groupby(level="cat1", sort=False).quantile([0.2, 0.8])
|
| 486 |
+
|
| 487 |
+
qind = pd.MultiIndex.from_tuples(
|
| 488 |
+
[("B", 0.2), ("B", 0.8), ("A", 0.2), ("A", 0.8)], names=["cat1", None]
|
| 489 |
+
)
|
| 490 |
+
expected = pd.Series([1.2, 4.8, 2.2, 5.8], index=qind)
|
| 491 |
+
|
| 492 |
+
tm.assert_series_equal(result, expected)
|
| 493 |
+
|
| 494 |
+
# We need to check that index levels are not sorted
|
| 495 |
+
expected_levels = pd.core.indexes.frozen.FrozenList([["B", "A"], [0.2, 0.8]])
|
| 496 |
+
tm.assert_equal(result.index.levels, expected_levels)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_rank.py
ADDED
|
@@ -0,0 +1,721 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from pandas import (
|
| 8 |
+
DataFrame,
|
| 9 |
+
NaT,
|
| 10 |
+
Series,
|
| 11 |
+
concat,
|
| 12 |
+
)
|
| 13 |
+
import pandas._testing as tm
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def test_rank_unordered_categorical_typeerror():
|
| 17 |
+
# GH#51034 should be TypeError, not NotImplementedError
|
| 18 |
+
cat = pd.Categorical([], ordered=False)
|
| 19 |
+
ser = Series(cat)
|
| 20 |
+
df = ser.to_frame()
|
| 21 |
+
|
| 22 |
+
msg = "Cannot perform rank with non-ordered Categorical"
|
| 23 |
+
|
| 24 |
+
gb = ser.groupby(cat, observed=False)
|
| 25 |
+
with pytest.raises(TypeError, match=msg):
|
| 26 |
+
gb.rank()
|
| 27 |
+
|
| 28 |
+
gb2 = df.groupby(cat, observed=False)
|
| 29 |
+
with pytest.raises(TypeError, match=msg):
|
| 30 |
+
gb2.rank()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def test_rank_apply():
|
| 34 |
+
lev1 = np.array(["a" * 10] * 100, dtype=object)
|
| 35 |
+
lev2 = np.array(["b" * 10] * 130, dtype=object)
|
| 36 |
+
lab1 = np.random.default_rng(2).integers(0, 100, size=500, dtype=int)
|
| 37 |
+
lab2 = np.random.default_rng(2).integers(0, 130, size=500, dtype=int)
|
| 38 |
+
|
| 39 |
+
df = DataFrame(
|
| 40 |
+
{
|
| 41 |
+
"value": np.random.default_rng(2).standard_normal(500),
|
| 42 |
+
"key1": lev1.take(lab1),
|
| 43 |
+
"key2": lev2.take(lab2),
|
| 44 |
+
}
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
result = df.groupby(["key1", "key2"]).value.rank()
|
| 48 |
+
|
| 49 |
+
expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
|
| 50 |
+
expected = concat(expected, axis=0)
|
| 51 |
+
expected = expected.reindex(result.index)
|
| 52 |
+
tm.assert_series_equal(result, expected)
|
| 53 |
+
|
| 54 |
+
result = df.groupby(["key1", "key2"]).value.rank(pct=True)
|
| 55 |
+
|
| 56 |
+
expected = [
|
| 57 |
+
piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
|
| 58 |
+
]
|
| 59 |
+
expected = concat(expected, axis=0)
|
| 60 |
+
expected = expected.reindex(result.index)
|
| 61 |
+
tm.assert_series_equal(result, expected)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
|
| 65 |
+
@pytest.mark.parametrize(
|
| 66 |
+
"vals",
|
| 67 |
+
[
|
| 68 |
+
np.array([2, 2, 8, 2, 6], dtype=dtype)
|
| 69 |
+
for dtype in ["i8", "i4", "i2", "i1", "u8", "u4", "u2", "u1", "f8", "f4", "f2"]
|
| 70 |
+
]
|
| 71 |
+
+ [
|
| 72 |
+
[
|
| 73 |
+
pd.Timestamp("2018-01-02"),
|
| 74 |
+
pd.Timestamp("2018-01-02"),
|
| 75 |
+
pd.Timestamp("2018-01-08"),
|
| 76 |
+
pd.Timestamp("2018-01-02"),
|
| 77 |
+
pd.Timestamp("2018-01-06"),
|
| 78 |
+
],
|
| 79 |
+
[
|
| 80 |
+
pd.Timestamp("2018-01-02", tz="US/Pacific"),
|
| 81 |
+
pd.Timestamp("2018-01-02", tz="US/Pacific"),
|
| 82 |
+
pd.Timestamp("2018-01-08", tz="US/Pacific"),
|
| 83 |
+
pd.Timestamp("2018-01-02", tz="US/Pacific"),
|
| 84 |
+
pd.Timestamp("2018-01-06", tz="US/Pacific"),
|
| 85 |
+
],
|
| 86 |
+
[
|
| 87 |
+
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
|
| 88 |
+
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
|
| 89 |
+
pd.Timestamp("2018-01-08") - pd.Timestamp(0),
|
| 90 |
+
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
|
| 91 |
+
pd.Timestamp("2018-01-06") - pd.Timestamp(0),
|
| 92 |
+
],
|
| 93 |
+
[
|
| 94 |
+
pd.Timestamp("2018-01-02").to_period("D"),
|
| 95 |
+
pd.Timestamp("2018-01-02").to_period("D"),
|
| 96 |
+
pd.Timestamp("2018-01-08").to_period("D"),
|
| 97 |
+
pd.Timestamp("2018-01-02").to_period("D"),
|
| 98 |
+
pd.Timestamp("2018-01-06").to_period("D"),
|
| 99 |
+
],
|
| 100 |
+
],
|
| 101 |
+
ids=lambda x: type(x[0]),
|
| 102 |
+
)
|
| 103 |
+
@pytest.mark.parametrize(
|
| 104 |
+
"ties_method,ascending,pct,exp",
|
| 105 |
+
[
|
| 106 |
+
("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]),
|
| 107 |
+
("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
|
| 108 |
+
("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]),
|
| 109 |
+
("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]),
|
| 110 |
+
("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]),
|
| 111 |
+
("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
|
| 112 |
+
("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
|
| 113 |
+
("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]),
|
| 114 |
+
("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]),
|
| 115 |
+
("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
|
| 116 |
+
("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]),
|
| 117 |
+
("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]),
|
| 118 |
+
("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]),
|
| 119 |
+
("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
|
| 120 |
+
("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]),
|
| 121 |
+
("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]),
|
| 122 |
+
("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]),
|
| 123 |
+
("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]),
|
| 124 |
+
("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
|
| 125 |
+
("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]),
|
| 126 |
+
],
|
| 127 |
+
)
|
| 128 |
+
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
|
| 129 |
+
key = np.repeat(grps, len(vals))
|
| 130 |
+
|
| 131 |
+
orig_vals = vals
|
| 132 |
+
vals = list(vals) * len(grps)
|
| 133 |
+
if isinstance(orig_vals, np.ndarray):
|
| 134 |
+
vals = np.array(vals, dtype=orig_vals.dtype)
|
| 135 |
+
|
| 136 |
+
df = DataFrame({"key": key, "val": vals})
|
| 137 |
+
result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct)
|
| 138 |
+
|
| 139 |
+
exp_df = DataFrame(exp * len(grps), columns=["val"])
|
| 140 |
+
tm.assert_frame_equal(result, exp_df)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
|
| 144 |
+
@pytest.mark.parametrize(
|
| 145 |
+
"vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]]
|
| 146 |
+
)
|
| 147 |
+
@pytest.mark.parametrize(
|
| 148 |
+
"ties_method,ascending,na_option,exp",
|
| 149 |
+
[
|
| 150 |
+
("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
|
| 151 |
+
("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]),
|
| 152 |
+
("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]),
|
| 153 |
+
("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
|
| 154 |
+
("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]),
|
| 155 |
+
("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]),
|
| 156 |
+
("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]),
|
| 157 |
+
("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]),
|
| 158 |
+
("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]),
|
| 159 |
+
("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]),
|
| 160 |
+
("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]),
|
| 161 |
+
("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]),
|
| 162 |
+
("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]),
|
| 163 |
+
("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]),
|
| 164 |
+
("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]),
|
| 165 |
+
("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]),
|
| 166 |
+
("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]),
|
| 167 |
+
("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]),
|
| 168 |
+
("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]),
|
| 169 |
+
("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]),
|
| 170 |
+
("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]),
|
| 171 |
+
("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]),
|
| 172 |
+
("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]),
|
| 173 |
+
("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]),
|
| 174 |
+
("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]),
|
| 175 |
+
("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]),
|
| 176 |
+
("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]),
|
| 177 |
+
("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]),
|
| 178 |
+
("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]),
|
| 179 |
+
("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]),
|
| 180 |
+
],
|
| 181 |
+
)
|
| 182 |
+
def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
|
| 183 |
+
# GH 20561
|
| 184 |
+
key = np.repeat(grps, len(vals))
|
| 185 |
+
vals = vals * len(grps)
|
| 186 |
+
df = DataFrame({"key": key, "val": vals})
|
| 187 |
+
result = df.groupby("key").rank(
|
| 188 |
+
method=ties_method, ascending=ascending, na_option=na_option
|
| 189 |
+
)
|
| 190 |
+
exp_df = DataFrame(exp * len(grps), columns=["val"])
|
| 191 |
+
tm.assert_frame_equal(result, exp_df)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
|
| 195 |
+
@pytest.mark.parametrize(
|
| 196 |
+
"vals",
|
| 197 |
+
[
|
| 198 |
+
np.array([2, 2, np.nan, 8, 2, 6, np.nan, np.nan], dtype=dtype)
|
| 199 |
+
for dtype in ["f8", "f4", "f2"]
|
| 200 |
+
]
|
| 201 |
+
+ [
|
| 202 |
+
[
|
| 203 |
+
pd.Timestamp("2018-01-02"),
|
| 204 |
+
pd.Timestamp("2018-01-02"),
|
| 205 |
+
np.nan,
|
| 206 |
+
pd.Timestamp("2018-01-08"),
|
| 207 |
+
pd.Timestamp("2018-01-02"),
|
| 208 |
+
pd.Timestamp("2018-01-06"),
|
| 209 |
+
np.nan,
|
| 210 |
+
np.nan,
|
| 211 |
+
],
|
| 212 |
+
[
|
| 213 |
+
pd.Timestamp("2018-01-02", tz="US/Pacific"),
|
| 214 |
+
pd.Timestamp("2018-01-02", tz="US/Pacific"),
|
| 215 |
+
np.nan,
|
| 216 |
+
pd.Timestamp("2018-01-08", tz="US/Pacific"),
|
| 217 |
+
pd.Timestamp("2018-01-02", tz="US/Pacific"),
|
| 218 |
+
pd.Timestamp("2018-01-06", tz="US/Pacific"),
|
| 219 |
+
np.nan,
|
| 220 |
+
np.nan,
|
| 221 |
+
],
|
| 222 |
+
[
|
| 223 |
+
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
|
| 224 |
+
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
|
| 225 |
+
np.nan,
|
| 226 |
+
pd.Timestamp("2018-01-08") - pd.Timestamp(0),
|
| 227 |
+
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
|
| 228 |
+
pd.Timestamp("2018-01-06") - pd.Timestamp(0),
|
| 229 |
+
np.nan,
|
| 230 |
+
np.nan,
|
| 231 |
+
],
|
| 232 |
+
[
|
| 233 |
+
pd.Timestamp("2018-01-02").to_period("D"),
|
| 234 |
+
pd.Timestamp("2018-01-02").to_period("D"),
|
| 235 |
+
np.nan,
|
| 236 |
+
pd.Timestamp("2018-01-08").to_period("D"),
|
| 237 |
+
pd.Timestamp("2018-01-02").to_period("D"),
|
| 238 |
+
pd.Timestamp("2018-01-06").to_period("D"),
|
| 239 |
+
np.nan,
|
| 240 |
+
np.nan,
|
| 241 |
+
],
|
| 242 |
+
],
|
| 243 |
+
ids=lambda x: type(x[0]),
|
| 244 |
+
)
|
| 245 |
+
@pytest.mark.parametrize(
|
| 246 |
+
"ties_method,ascending,na_option,pct,exp",
|
| 247 |
+
[
|
| 248 |
+
(
|
| 249 |
+
"average",
|
| 250 |
+
True,
|
| 251 |
+
"keep",
|
| 252 |
+
False,
|
| 253 |
+
[2.0, 2.0, np.nan, 5.0, 2.0, 4.0, np.nan, np.nan],
|
| 254 |
+
),
|
| 255 |
+
(
|
| 256 |
+
"average",
|
| 257 |
+
True,
|
| 258 |
+
"keep",
|
| 259 |
+
True,
|
| 260 |
+
[0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan],
|
| 261 |
+
),
|
| 262 |
+
(
|
| 263 |
+
"average",
|
| 264 |
+
False,
|
| 265 |
+
"keep",
|
| 266 |
+
False,
|
| 267 |
+
[4.0, 4.0, np.nan, 1.0, 4.0, 2.0, np.nan, np.nan],
|
| 268 |
+
),
|
| 269 |
+
(
|
| 270 |
+
"average",
|
| 271 |
+
False,
|
| 272 |
+
"keep",
|
| 273 |
+
True,
|
| 274 |
+
[0.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan],
|
| 275 |
+
),
|
| 276 |
+
("min", True, "keep", False, [1.0, 1.0, np.nan, 5.0, 1.0, 4.0, np.nan, np.nan]),
|
| 277 |
+
("min", True, "keep", True, [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
|
| 278 |
+
(
|
| 279 |
+
"min",
|
| 280 |
+
False,
|
| 281 |
+
"keep",
|
| 282 |
+
False,
|
| 283 |
+
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
|
| 284 |
+
),
|
| 285 |
+
("min", False, "keep", True, [0.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
|
| 286 |
+
("max", True, "keep", False, [3.0, 3.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan]),
|
| 287 |
+
("max", True, "keep", True, [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
|
| 288 |
+
(
|
| 289 |
+
"max",
|
| 290 |
+
False,
|
| 291 |
+
"keep",
|
| 292 |
+
False,
|
| 293 |
+
[5.0, 5.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
|
| 294 |
+
),
|
| 295 |
+
("max", False, "keep", True, [1.0, 1.0, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan]),
|
| 296 |
+
(
|
| 297 |
+
"first",
|
| 298 |
+
True,
|
| 299 |
+
"keep",
|
| 300 |
+
False,
|
| 301 |
+
[1.0, 2.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan],
|
| 302 |
+
),
|
| 303 |
+
(
|
| 304 |
+
"first",
|
| 305 |
+
True,
|
| 306 |
+
"keep",
|
| 307 |
+
True,
|
| 308 |
+
[0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan],
|
| 309 |
+
),
|
| 310 |
+
(
|
| 311 |
+
"first",
|
| 312 |
+
False,
|
| 313 |
+
"keep",
|
| 314 |
+
False,
|
| 315 |
+
[3.0, 4.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
|
| 316 |
+
),
|
| 317 |
+
(
|
| 318 |
+
"first",
|
| 319 |
+
False,
|
| 320 |
+
"keep",
|
| 321 |
+
True,
|
| 322 |
+
[0.6, 0.8, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan],
|
| 323 |
+
),
|
| 324 |
+
(
|
| 325 |
+
"dense",
|
| 326 |
+
True,
|
| 327 |
+
"keep",
|
| 328 |
+
False,
|
| 329 |
+
[1.0, 1.0, np.nan, 3.0, 1.0, 2.0, np.nan, np.nan],
|
| 330 |
+
),
|
| 331 |
+
(
|
| 332 |
+
"dense",
|
| 333 |
+
True,
|
| 334 |
+
"keep",
|
| 335 |
+
True,
|
| 336 |
+
[
|
| 337 |
+
1.0 / 3.0,
|
| 338 |
+
1.0 / 3.0,
|
| 339 |
+
np.nan,
|
| 340 |
+
3.0 / 3.0,
|
| 341 |
+
1.0 / 3.0,
|
| 342 |
+
2.0 / 3.0,
|
| 343 |
+
np.nan,
|
| 344 |
+
np.nan,
|
| 345 |
+
],
|
| 346 |
+
),
|
| 347 |
+
(
|
| 348 |
+
"dense",
|
| 349 |
+
False,
|
| 350 |
+
"keep",
|
| 351 |
+
False,
|
| 352 |
+
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
|
| 353 |
+
),
|
| 354 |
+
(
|
| 355 |
+
"dense",
|
| 356 |
+
False,
|
| 357 |
+
"keep",
|
| 358 |
+
True,
|
| 359 |
+
[
|
| 360 |
+
3.0 / 3.0,
|
| 361 |
+
3.0 / 3.0,
|
| 362 |
+
np.nan,
|
| 363 |
+
1.0 / 3.0,
|
| 364 |
+
3.0 / 3.0,
|
| 365 |
+
2.0 / 3.0,
|
| 366 |
+
np.nan,
|
| 367 |
+
np.nan,
|
| 368 |
+
],
|
| 369 |
+
),
|
| 370 |
+
("average", True, "bottom", False, [2.0, 2.0, 7.0, 5.0, 2.0, 4.0, 7.0, 7.0]),
|
| 371 |
+
(
|
| 372 |
+
"average",
|
| 373 |
+
True,
|
| 374 |
+
"bottom",
|
| 375 |
+
True,
|
| 376 |
+
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875],
|
| 377 |
+
),
|
| 378 |
+
("average", False, "bottom", False, [4.0, 4.0, 7.0, 1.0, 4.0, 2.0, 7.0, 7.0]),
|
| 379 |
+
(
|
| 380 |
+
"average",
|
| 381 |
+
False,
|
| 382 |
+
"bottom",
|
| 383 |
+
True,
|
| 384 |
+
[0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875],
|
| 385 |
+
),
|
| 386 |
+
("min", True, "bottom", False, [1.0, 1.0, 6.0, 5.0, 1.0, 4.0, 6.0, 6.0]),
|
| 387 |
+
(
|
| 388 |
+
"min",
|
| 389 |
+
True,
|
| 390 |
+
"bottom",
|
| 391 |
+
True,
|
| 392 |
+
[0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75],
|
| 393 |
+
),
|
| 394 |
+
("min", False, "bottom", False, [3.0, 3.0, 6.0, 1.0, 3.0, 2.0, 6.0, 6.0]),
|
| 395 |
+
(
|
| 396 |
+
"min",
|
| 397 |
+
False,
|
| 398 |
+
"bottom",
|
| 399 |
+
True,
|
| 400 |
+
[0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75],
|
| 401 |
+
),
|
| 402 |
+
("max", True, "bottom", False, [3.0, 3.0, 8.0, 5.0, 3.0, 4.0, 8.0, 8.0]),
|
| 403 |
+
("max", True, "bottom", True, [0.375, 0.375, 1.0, 0.625, 0.375, 0.5, 1.0, 1.0]),
|
| 404 |
+
("max", False, "bottom", False, [5.0, 5.0, 8.0, 1.0, 5.0, 2.0, 8.0, 8.0]),
|
| 405 |
+
(
|
| 406 |
+
"max",
|
| 407 |
+
False,
|
| 408 |
+
"bottom",
|
| 409 |
+
True,
|
| 410 |
+
[0.625, 0.625, 1.0, 0.125, 0.625, 0.25, 1.0, 1.0],
|
| 411 |
+
),
|
| 412 |
+
("first", True, "bottom", False, [1.0, 2.0, 6.0, 5.0, 3.0, 4.0, 7.0, 8.0]),
|
| 413 |
+
(
|
| 414 |
+
"first",
|
| 415 |
+
True,
|
| 416 |
+
"bottom",
|
| 417 |
+
True,
|
| 418 |
+
[0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.0],
|
| 419 |
+
),
|
| 420 |
+
("first", False, "bottom", False, [3.0, 4.0, 6.0, 1.0, 5.0, 2.0, 7.0, 8.0]),
|
| 421 |
+
(
|
| 422 |
+
"first",
|
| 423 |
+
False,
|
| 424 |
+
"bottom",
|
| 425 |
+
True,
|
| 426 |
+
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.0],
|
| 427 |
+
),
|
| 428 |
+
("dense", True, "bottom", False, [1.0, 1.0, 4.0, 3.0, 1.0, 2.0, 4.0, 4.0]),
|
| 429 |
+
("dense", True, "bottom", True, [0.25, 0.25, 1.0, 0.75, 0.25, 0.5, 1.0, 1.0]),
|
| 430 |
+
("dense", False, "bottom", False, [3.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 4.0]),
|
| 431 |
+
("dense", False, "bottom", True, [0.75, 0.75, 1.0, 0.25, 0.75, 0.5, 1.0, 1.0]),
|
| 432 |
+
],
|
| 433 |
+
)
|
| 434 |
+
def test_rank_args_missing(grps, vals, ties_method, ascending, na_option, pct, exp):
|
| 435 |
+
key = np.repeat(grps, len(vals))
|
| 436 |
+
|
| 437 |
+
orig_vals = vals
|
| 438 |
+
vals = list(vals) * len(grps)
|
| 439 |
+
if isinstance(orig_vals, np.ndarray):
|
| 440 |
+
vals = np.array(vals, dtype=orig_vals.dtype)
|
| 441 |
+
|
| 442 |
+
df = DataFrame({"key": key, "val": vals})
|
| 443 |
+
result = df.groupby("key").rank(
|
| 444 |
+
method=ties_method, ascending=ascending, na_option=na_option, pct=pct
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
exp_df = DataFrame(exp * len(grps), columns=["val"])
|
| 448 |
+
tm.assert_frame_equal(result, exp_df)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
@pytest.mark.parametrize(
|
| 452 |
+
"pct,exp", [(False, [3.0, 3.0, 3.0, 3.0, 3.0]), (True, [0.6, 0.6, 0.6, 0.6, 0.6])]
|
| 453 |
+
)
|
| 454 |
+
def test_rank_resets_each_group(pct, exp):
|
| 455 |
+
df = DataFrame(
|
| 456 |
+
{"key": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], "val": [1] * 10}
|
| 457 |
+
)
|
| 458 |
+
result = df.groupby("key").rank(pct=pct)
|
| 459 |
+
exp_df = DataFrame(exp * 2, columns=["val"])
|
| 460 |
+
tm.assert_frame_equal(result, exp_df)
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
@pytest.mark.parametrize(
|
| 464 |
+
"dtype", ["int64", "int32", "uint64", "uint32", "float64", "float32"]
|
| 465 |
+
)
|
| 466 |
+
@pytest.mark.parametrize("upper", [True, False])
|
| 467 |
+
def test_rank_avg_even_vals(dtype, upper):
|
| 468 |
+
if upper:
|
| 469 |
+
# use IntegerDtype/FloatingDtype
|
| 470 |
+
dtype = dtype[0].upper() + dtype[1:]
|
| 471 |
+
dtype = dtype.replace("Ui", "UI")
|
| 472 |
+
df = DataFrame({"key": ["a"] * 4, "val": [1] * 4})
|
| 473 |
+
df["val"] = df["val"].astype(dtype)
|
| 474 |
+
assert df["val"].dtype == dtype
|
| 475 |
+
|
| 476 |
+
result = df.groupby("key").rank()
|
| 477 |
+
exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"])
|
| 478 |
+
if upper:
|
| 479 |
+
exp_df = exp_df.astype("Float64")
|
| 480 |
+
tm.assert_frame_equal(result, exp_df)
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
@pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"])
|
| 484 |
+
@pytest.mark.parametrize("ascending", [True, False])
|
| 485 |
+
@pytest.mark.parametrize("na_option", ["keep", "top", "bottom"])
|
| 486 |
+
@pytest.mark.parametrize("pct", [True, False])
|
| 487 |
+
@pytest.mark.parametrize(
|
| 488 |
+
"vals", [["bar", "bar", "foo", "bar", "baz"], ["bar", np.nan, "foo", np.nan, "baz"]]
|
| 489 |
+
)
|
| 490 |
+
def test_rank_object_dtype(ties_method, ascending, na_option, pct, vals):
|
| 491 |
+
df = DataFrame({"key": ["foo"] * 5, "val": vals})
|
| 492 |
+
mask = df["val"].isna()
|
| 493 |
+
|
| 494 |
+
gb = df.groupby("key")
|
| 495 |
+
res = gb.rank(method=ties_method, ascending=ascending, na_option=na_option, pct=pct)
|
| 496 |
+
|
| 497 |
+
# construct our expected by using numeric values with the same ordering
|
| 498 |
+
if mask.any():
|
| 499 |
+
df2 = DataFrame({"key": ["foo"] * 5, "val": [0, np.nan, 2, np.nan, 1]})
|
| 500 |
+
else:
|
| 501 |
+
df2 = DataFrame({"key": ["foo"] * 5, "val": [0, 0, 2, 0, 1]})
|
| 502 |
+
|
| 503 |
+
gb2 = df2.groupby("key")
|
| 504 |
+
alt = gb2.rank(
|
| 505 |
+
method=ties_method, ascending=ascending, na_option=na_option, pct=pct
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
tm.assert_frame_equal(res, alt)
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
@pytest.mark.parametrize("na_option", [True, "bad", 1])
|
| 512 |
+
@pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"])
|
| 513 |
+
@pytest.mark.parametrize("ascending", [True, False])
|
| 514 |
+
@pytest.mark.parametrize("pct", [True, False])
|
| 515 |
+
@pytest.mark.parametrize(
|
| 516 |
+
"vals",
|
| 517 |
+
[
|
| 518 |
+
["bar", "bar", "foo", "bar", "baz"],
|
| 519 |
+
["bar", np.nan, "foo", np.nan, "baz"],
|
| 520 |
+
[1, np.nan, 2, np.nan, 3],
|
| 521 |
+
],
|
| 522 |
+
)
|
| 523 |
+
def test_rank_naoption_raises(ties_method, ascending, na_option, pct, vals):
|
| 524 |
+
df = DataFrame({"key": ["foo"] * 5, "val": vals})
|
| 525 |
+
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
|
| 526 |
+
|
| 527 |
+
with pytest.raises(ValueError, match=msg):
|
| 528 |
+
df.groupby("key").rank(
|
| 529 |
+
method=ties_method, ascending=ascending, na_option=na_option, pct=pct
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def test_rank_empty_group():
|
| 534 |
+
# see gh-22519
|
| 535 |
+
column = "A"
|
| 536 |
+
df = DataFrame({"A": [0, 1, 0], "B": [1.0, np.nan, 2.0]})
|
| 537 |
+
|
| 538 |
+
result = df.groupby(column).B.rank(pct=True)
|
| 539 |
+
expected = Series([0.5, np.nan, 1.0], name="B")
|
| 540 |
+
tm.assert_series_equal(result, expected)
|
| 541 |
+
|
| 542 |
+
result = df.groupby(column).rank(pct=True)
|
| 543 |
+
expected = DataFrame({"B": [0.5, np.nan, 1.0]})
|
| 544 |
+
tm.assert_frame_equal(result, expected)
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
@pytest.mark.parametrize(
|
| 548 |
+
"input_key,input_value,output_value",
|
| 549 |
+
[
|
| 550 |
+
([1, 2], [1, 1], [1.0, 1.0]),
|
| 551 |
+
([1, 1, 2, 2], [1, 2, 1, 2], [0.5, 1.0, 0.5, 1.0]),
|
| 552 |
+
([1, 1, 2, 2], [1, 2, 1, np.nan], [0.5, 1.0, 1.0, np.nan]),
|
| 553 |
+
([1, 1, 2], [1, 2, np.nan], [0.5, 1.0, np.nan]),
|
| 554 |
+
],
|
| 555 |
+
)
|
| 556 |
+
def test_rank_zero_div(input_key, input_value, output_value):
|
| 557 |
+
# GH 23666
|
| 558 |
+
df = DataFrame({"A": input_key, "B": input_value})
|
| 559 |
+
|
| 560 |
+
result = df.groupby("A").rank(method="dense", pct=True)
|
| 561 |
+
expected = DataFrame({"B": output_value})
|
| 562 |
+
tm.assert_frame_equal(result, expected)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
def test_rank_min_int():
|
| 566 |
+
# GH-32859
|
| 567 |
+
df = DataFrame(
|
| 568 |
+
{
|
| 569 |
+
"grp": [1, 1, 2],
|
| 570 |
+
"int_col": [
|
| 571 |
+
np.iinfo(np.int64).min,
|
| 572 |
+
np.iinfo(np.int64).max,
|
| 573 |
+
np.iinfo(np.int64).min,
|
| 574 |
+
],
|
| 575 |
+
"datetimelike": [NaT, datetime(2001, 1, 1), NaT],
|
| 576 |
+
}
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
result = df.groupby("grp").rank()
|
| 580 |
+
expected = DataFrame(
|
| 581 |
+
{"int_col": [1.0, 2.0, 1.0], "datetimelike": [np.nan, 1.0, np.nan]}
|
| 582 |
+
)
|
| 583 |
+
|
| 584 |
+
tm.assert_frame_equal(result, expected)
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
@pytest.mark.parametrize("use_nan", [True, False])
|
| 588 |
+
def test_rank_pct_equal_values_on_group_transition(use_nan):
|
| 589 |
+
# GH#40518
|
| 590 |
+
fill_value = np.nan if use_nan else 3
|
| 591 |
+
df = DataFrame(
|
| 592 |
+
[
|
| 593 |
+
[-1, 1],
|
| 594 |
+
[-1, 2],
|
| 595 |
+
[1, fill_value],
|
| 596 |
+
[-1, fill_value],
|
| 597 |
+
],
|
| 598 |
+
columns=["group", "val"],
|
| 599 |
+
)
|
| 600 |
+
result = df.groupby(["group"])["val"].rank(
|
| 601 |
+
method="dense",
|
| 602 |
+
pct=True,
|
| 603 |
+
)
|
| 604 |
+
if use_nan:
|
| 605 |
+
expected = Series([0.5, 1, np.nan, np.nan], name="val")
|
| 606 |
+
else:
|
| 607 |
+
expected = Series([1 / 3, 2 / 3, 1, 1], name="val")
|
| 608 |
+
|
| 609 |
+
tm.assert_series_equal(result, expected)
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
def test_rank_multiindex():
|
| 613 |
+
# GH27721
|
| 614 |
+
df = concat(
|
| 615 |
+
{
|
| 616 |
+
"a": DataFrame({"col1": [3, 4], "col2": [1, 2]}),
|
| 617 |
+
"b": DataFrame({"col3": [5, 6], "col4": [7, 8]}),
|
| 618 |
+
},
|
| 619 |
+
axis=1,
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 623 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 624 |
+
gb = df.groupby(level=0, axis=1)
|
| 625 |
+
msg = "DataFrameGroupBy.rank with axis=1 is deprecated"
|
| 626 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 627 |
+
result = gb.rank(axis=1)
|
| 628 |
+
|
| 629 |
+
expected = concat(
|
| 630 |
+
[
|
| 631 |
+
df["a"].rank(axis=1),
|
| 632 |
+
df["b"].rank(axis=1),
|
| 633 |
+
],
|
| 634 |
+
axis=1,
|
| 635 |
+
keys=["a", "b"],
|
| 636 |
+
)
|
| 637 |
+
tm.assert_frame_equal(result, expected)
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def test_groupby_axis0_rank_axis1():
|
| 641 |
+
# GH#41320
|
| 642 |
+
df = DataFrame(
|
| 643 |
+
{0: [1, 3, 5, 7], 1: [2, 4, 6, 8], 2: [1.5, 3.5, 5.5, 7.5]},
|
| 644 |
+
index=["a", "a", "b", "b"],
|
| 645 |
+
)
|
| 646 |
+
msg = "The 'axis' keyword in DataFrame.groupby is deprecated"
|
| 647 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 648 |
+
gb = df.groupby(level=0, axis=0)
|
| 649 |
+
|
| 650 |
+
msg = "DataFrameGroupBy.rank with axis=1 is deprecated"
|
| 651 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 652 |
+
res = gb.rank(axis=1)
|
| 653 |
+
|
| 654 |
+
# This should match what we get when "manually" operating group-by-group
|
| 655 |
+
expected = concat([df.loc["a"].rank(axis=1), df.loc["b"].rank(axis=1)], axis=0)
|
| 656 |
+
tm.assert_frame_equal(res, expected)
|
| 657 |
+
|
| 658 |
+
# check that we haven't accidentally written a case that coincidentally
|
| 659 |
+
# matches rank(axis=0)
|
| 660 |
+
msg = "The 'axis' keyword in DataFrameGroupBy.rank"
|
| 661 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 662 |
+
alt = gb.rank(axis=0)
|
| 663 |
+
assert not alt.equals(expected)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def test_groupby_axis0_cummax_axis1():
|
| 667 |
+
# case where groupby axis is 0 and axis keyword in transform is 1
|
| 668 |
+
|
| 669 |
+
# df has mixed dtype -> multiple blocks
|
| 670 |
+
df = DataFrame(
|
| 671 |
+
{0: [1, 3, 5, 7], 1: [2, 4, 6, 8], 2: [1.5, 3.5, 5.5, 7.5]},
|
| 672 |
+
index=["a", "a", "b", "b"],
|
| 673 |
+
)
|
| 674 |
+
msg = "The 'axis' keyword in DataFrame.groupby is deprecated"
|
| 675 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 676 |
+
gb = df.groupby(level=0, axis=0)
|
| 677 |
+
|
| 678 |
+
msg = "DataFrameGroupBy.cummax with axis=1 is deprecated"
|
| 679 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 680 |
+
cmax = gb.cummax(axis=1)
|
| 681 |
+
expected = df[[0, 1]].astype(np.float64)
|
| 682 |
+
expected[2] = expected[1]
|
| 683 |
+
tm.assert_frame_equal(cmax, expected)
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
def test_non_unique_index():
|
| 687 |
+
# GH 16577
|
| 688 |
+
df = DataFrame(
|
| 689 |
+
{"A": [1.0, 2.0, 3.0, np.nan], "value": 1.0},
|
| 690 |
+
index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4,
|
| 691 |
+
)
|
| 692 |
+
result = df.groupby([df.index, "A"]).value.rank(ascending=True, pct=True)
|
| 693 |
+
expected = Series(
|
| 694 |
+
[1.0, 1.0, 1.0, np.nan],
|
| 695 |
+
index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4,
|
| 696 |
+
name="value",
|
| 697 |
+
)
|
| 698 |
+
tm.assert_series_equal(result, expected)
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
def test_rank_categorical():
|
| 702 |
+
cat = pd.Categorical(["a", "a", "b", np.nan, "c", "b"], ordered=True)
|
| 703 |
+
cat2 = pd.Categorical([1, 2, 3, np.nan, 4, 5], ordered=True)
|
| 704 |
+
|
| 705 |
+
df = DataFrame({"col1": [0, 1, 0, 1, 0, 1], "col2": cat, "col3": cat2})
|
| 706 |
+
|
| 707 |
+
gb = df.groupby("col1")
|
| 708 |
+
|
| 709 |
+
res = gb.rank()
|
| 710 |
+
|
| 711 |
+
expected = df.astype(object).groupby("col1").rank()
|
| 712 |
+
tm.assert_frame_equal(res, expected)
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
@pytest.mark.parametrize("na_option", ["top", "bottom"])
|
| 716 |
+
def test_groupby_op_with_nullables(na_option):
|
| 717 |
+
# GH 54206
|
| 718 |
+
df = DataFrame({"x": [None]}, dtype="Float64")
|
| 719 |
+
result = df.groupby("x", dropna=False)["x"].rank(method="min", na_option=na_option)
|
| 720 |
+
expected = Series([1.0], dtype="Float64", name=result.name)
|
| 721 |
+
tm.assert_series_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_sample.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from pandas import (
|
| 4 |
+
DataFrame,
|
| 5 |
+
Index,
|
| 6 |
+
Series,
|
| 7 |
+
)
|
| 8 |
+
import pandas._testing as tm
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])
|
| 12 |
+
def test_groupby_sample_balanced_groups_shape(n, frac):
|
| 13 |
+
values = [1] * 10 + [2] * 10
|
| 14 |
+
df = DataFrame({"a": values, "b": values})
|
| 15 |
+
|
| 16 |
+
result = df.groupby("a").sample(n=n, frac=frac)
|
| 17 |
+
values = [1] * 2 + [2] * 2
|
| 18 |
+
expected = DataFrame({"a": values, "b": values}, index=result.index)
|
| 19 |
+
tm.assert_frame_equal(result, expected)
|
| 20 |
+
|
| 21 |
+
result = df.groupby("a")["b"].sample(n=n, frac=frac)
|
| 22 |
+
expected = Series(values, name="b", index=result.index)
|
| 23 |
+
tm.assert_series_equal(result, expected)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def test_groupby_sample_unbalanced_groups_shape():
|
| 27 |
+
values = [1] * 10 + [2] * 20
|
| 28 |
+
df = DataFrame({"a": values, "b": values})
|
| 29 |
+
|
| 30 |
+
result = df.groupby("a").sample(n=5)
|
| 31 |
+
values = [1] * 5 + [2] * 5
|
| 32 |
+
expected = DataFrame({"a": values, "b": values}, index=result.index)
|
| 33 |
+
tm.assert_frame_equal(result, expected)
|
| 34 |
+
|
| 35 |
+
result = df.groupby("a")["b"].sample(n=5)
|
| 36 |
+
expected = Series(values, name="b", index=result.index)
|
| 37 |
+
tm.assert_series_equal(result, expected)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def test_groupby_sample_index_value_spans_groups():
|
| 41 |
+
values = [1] * 3 + [2] * 3
|
| 42 |
+
df = DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2])
|
| 43 |
+
|
| 44 |
+
result = df.groupby("a").sample(n=2)
|
| 45 |
+
values = [1] * 2 + [2] * 2
|
| 46 |
+
expected = DataFrame({"a": values, "b": values}, index=result.index)
|
| 47 |
+
tm.assert_frame_equal(result, expected)
|
| 48 |
+
|
| 49 |
+
result = df.groupby("a")["b"].sample(n=2)
|
| 50 |
+
expected = Series(values, name="b", index=result.index)
|
| 51 |
+
tm.assert_series_equal(result, expected)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def test_groupby_sample_n_and_frac_raises():
|
| 55 |
+
df = DataFrame({"a": [1, 2], "b": [1, 2]})
|
| 56 |
+
msg = "Please enter a value for `frac` OR `n`, not both"
|
| 57 |
+
|
| 58 |
+
with pytest.raises(ValueError, match=msg):
|
| 59 |
+
df.groupby("a").sample(n=1, frac=1.0)
|
| 60 |
+
|
| 61 |
+
with pytest.raises(ValueError, match=msg):
|
| 62 |
+
df.groupby("a")["b"].sample(n=1, frac=1.0)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def test_groupby_sample_frac_gt_one_without_replacement_raises():
|
| 66 |
+
df = DataFrame({"a": [1, 2], "b": [1, 2]})
|
| 67 |
+
msg = "Replace has to be set to `True` when upsampling the population `frac` > 1."
|
| 68 |
+
|
| 69 |
+
with pytest.raises(ValueError, match=msg):
|
| 70 |
+
df.groupby("a").sample(frac=1.5, replace=False)
|
| 71 |
+
|
| 72 |
+
with pytest.raises(ValueError, match=msg):
|
| 73 |
+
df.groupby("a")["b"].sample(frac=1.5, replace=False)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@pytest.mark.parametrize("n", [-1, 1.5])
|
| 77 |
+
def test_groupby_sample_invalid_n_raises(n):
|
| 78 |
+
df = DataFrame({"a": [1, 2], "b": [1, 2]})
|
| 79 |
+
|
| 80 |
+
if n < 0:
|
| 81 |
+
msg = "A negative number of rows requested. Please provide `n` >= 0."
|
| 82 |
+
else:
|
| 83 |
+
msg = "Only integers accepted as `n` values"
|
| 84 |
+
|
| 85 |
+
with pytest.raises(ValueError, match=msg):
|
| 86 |
+
df.groupby("a").sample(n=n)
|
| 87 |
+
|
| 88 |
+
with pytest.raises(ValueError, match=msg):
|
| 89 |
+
df.groupby("a")["b"].sample(n=n)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def test_groupby_sample_oversample():
|
| 93 |
+
values = [1] * 10 + [2] * 10
|
| 94 |
+
df = DataFrame({"a": values, "b": values})
|
| 95 |
+
|
| 96 |
+
result = df.groupby("a").sample(frac=2.0, replace=True)
|
| 97 |
+
values = [1] * 20 + [2] * 20
|
| 98 |
+
expected = DataFrame({"a": values, "b": values}, index=result.index)
|
| 99 |
+
tm.assert_frame_equal(result, expected)
|
| 100 |
+
|
| 101 |
+
result = df.groupby("a")["b"].sample(frac=2.0, replace=True)
|
| 102 |
+
expected = Series(values, name="b", index=result.index)
|
| 103 |
+
tm.assert_series_equal(result, expected)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def test_groupby_sample_without_n_or_frac():
|
| 107 |
+
values = [1] * 10 + [2] * 10
|
| 108 |
+
df = DataFrame({"a": values, "b": values})
|
| 109 |
+
|
| 110 |
+
result = df.groupby("a").sample(n=None, frac=None)
|
| 111 |
+
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=result.index)
|
| 112 |
+
tm.assert_frame_equal(result, expected)
|
| 113 |
+
|
| 114 |
+
result = df.groupby("a")["b"].sample(n=None, frac=None)
|
| 115 |
+
expected = Series([1, 2], name="b", index=result.index)
|
| 116 |
+
tm.assert_series_equal(result, expected)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@pytest.mark.parametrize(
|
| 120 |
+
"index, expected_index",
|
| 121 |
+
[(["w", "x", "y", "z"], ["w", "w", "y", "y"]), ([3, 4, 5, 6], [3, 3, 5, 5])],
|
| 122 |
+
)
|
| 123 |
+
def test_groupby_sample_with_weights(index, expected_index):
|
| 124 |
+
# GH 39927 - tests for integer index needed
|
| 125 |
+
values = [1] * 2 + [2] * 2
|
| 126 |
+
df = DataFrame({"a": values, "b": values}, index=Index(index))
|
| 127 |
+
|
| 128 |
+
result = df.groupby("a").sample(n=2, replace=True, weights=[1, 0, 1, 0])
|
| 129 |
+
expected = DataFrame({"a": values, "b": values}, index=Index(expected_index))
|
| 130 |
+
tm.assert_frame_equal(result, expected)
|
| 131 |
+
|
| 132 |
+
result = df.groupby("a")["b"].sample(n=2, replace=True, weights=[1, 0, 1, 0])
|
| 133 |
+
expected = Series(values, name="b", index=Index(expected_index))
|
| 134 |
+
tm.assert_series_equal(result, expected)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def test_groupby_sample_with_selections():
|
| 138 |
+
# GH 39928
|
| 139 |
+
values = [1] * 10 + [2] * 10
|
| 140 |
+
df = DataFrame({"a": values, "b": values, "c": values})
|
| 141 |
+
|
| 142 |
+
result = df.groupby("a")[["b", "c"]].sample(n=None, frac=None)
|
| 143 |
+
expected = DataFrame({"b": [1, 2], "c": [1, 2]}, index=result.index)
|
| 144 |
+
tm.assert_frame_equal(result, expected)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def test_groupby_sample_with_empty_inputs():
|
| 148 |
+
# GH48459
|
| 149 |
+
df = DataFrame({"a": [], "b": []})
|
| 150 |
+
groupby_df = df.groupby("a")
|
| 151 |
+
|
| 152 |
+
result = groupby_df.sample()
|
| 153 |
+
expected = df
|
| 154 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_size.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas.util._test_decorators as td
|
| 5 |
+
|
| 6 |
+
from pandas.core.dtypes.common import is_integer_dtype
|
| 7 |
+
|
| 8 |
+
from pandas import (
|
| 9 |
+
DataFrame,
|
| 10 |
+
Index,
|
| 11 |
+
PeriodIndex,
|
| 12 |
+
Series,
|
| 13 |
+
)
|
| 14 |
+
import pandas._testing as tm
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@pytest.mark.parametrize("by", ["A", "B", ["A", "B"]])
|
| 18 |
+
def test_size(df, by):
|
| 19 |
+
grouped = df.groupby(by=by)
|
| 20 |
+
result = grouped.size()
|
| 21 |
+
for key, group in grouped:
|
| 22 |
+
assert result[key] == len(group)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@pytest.mark.parametrize(
|
| 26 |
+
"by",
|
| 27 |
+
[
|
| 28 |
+
[0, 0, 0, 0],
|
| 29 |
+
[0, 1, 1, 1],
|
| 30 |
+
[1, 0, 1, 1],
|
| 31 |
+
[0, None, None, None],
|
| 32 |
+
pytest.param([None, None, None, None], marks=pytest.mark.xfail),
|
| 33 |
+
],
|
| 34 |
+
)
|
| 35 |
+
def test_size_axis_1(df, axis_1, by, sort, dropna):
|
| 36 |
+
# GH#45715
|
| 37 |
+
counts = {key: sum(value == key for value in by) for key in dict.fromkeys(by)}
|
| 38 |
+
if dropna:
|
| 39 |
+
counts = {key: value for key, value in counts.items() if key is not None}
|
| 40 |
+
expected = Series(counts, dtype="int64")
|
| 41 |
+
if sort:
|
| 42 |
+
expected = expected.sort_index()
|
| 43 |
+
if is_integer_dtype(expected.index.dtype) and not any(x is None for x in by):
|
| 44 |
+
expected.index = expected.index.astype(int)
|
| 45 |
+
|
| 46 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 47 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 48 |
+
grouped = df.groupby(by=by, axis=axis_1, sort=sort, dropna=dropna)
|
| 49 |
+
result = grouped.size()
|
| 50 |
+
tm.assert_series_equal(result, expected)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@pytest.mark.parametrize("by", ["A", "B", ["A", "B"]])
|
| 54 |
+
@pytest.mark.parametrize("sort", [True, False])
|
| 55 |
+
def test_size_sort(sort, by):
|
| 56 |
+
df = DataFrame(np.random.default_rng(2).choice(20, (1000, 3)), columns=list("ABC"))
|
| 57 |
+
left = df.groupby(by=by, sort=sort).size()
|
| 58 |
+
right = df.groupby(by=by, sort=sort)["C"].apply(lambda a: a.shape[0])
|
| 59 |
+
tm.assert_series_equal(left, right, check_names=False)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def test_size_series_dataframe():
|
| 63 |
+
# https://github.com/pandas-dev/pandas/issues/11699
|
| 64 |
+
df = DataFrame(columns=["A", "B"])
|
| 65 |
+
out = Series(dtype="int64", index=Index([], name="A"))
|
| 66 |
+
tm.assert_series_equal(df.groupby("A").size(), out)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def test_size_groupby_all_null():
|
| 70 |
+
# https://github.com/pandas-dev/pandas/issues/23050
|
| 71 |
+
# Assert no 'Value Error : Length of passed values is 2, index implies 0'
|
| 72 |
+
df = DataFrame({"A": [None, None]}) # all-null groups
|
| 73 |
+
result = df.groupby("A").size()
|
| 74 |
+
expected = Series(dtype="int64", index=Index([], name="A"))
|
| 75 |
+
tm.assert_series_equal(result, expected)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def test_size_period_index():
|
| 79 |
+
# https://github.com/pandas-dev/pandas/issues/34010
|
| 80 |
+
ser = Series([1], index=PeriodIndex(["2000"], name="A", freq="D"))
|
| 81 |
+
grp = ser.groupby(level="A")
|
| 82 |
+
result = grp.size()
|
| 83 |
+
tm.assert_series_equal(result, ser)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 87 |
+
def test_size_on_categorical(as_index):
|
| 88 |
+
df = DataFrame([[1, 1], [2, 2]], columns=["A", "B"])
|
| 89 |
+
df["A"] = df["A"].astype("category")
|
| 90 |
+
result = df.groupby(["A", "B"], as_index=as_index, observed=False).size()
|
| 91 |
+
|
| 92 |
+
expected = DataFrame(
|
| 93 |
+
[[1, 1, 1], [1, 2, 0], [2, 1, 0], [2, 2, 1]], columns=["A", "B", "size"]
|
| 94 |
+
)
|
| 95 |
+
expected["A"] = expected["A"].astype("category")
|
| 96 |
+
if as_index:
|
| 97 |
+
expected = expected.set_index(["A", "B"])["size"].rename(None)
|
| 98 |
+
|
| 99 |
+
tm.assert_equal(result, expected)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])
|
| 103 |
+
def test_size_series_masked_type_returns_Int64(dtype):
|
| 104 |
+
# GH 54132
|
| 105 |
+
ser = Series([1, 1, 1], index=["a", "a", "b"], dtype=dtype)
|
| 106 |
+
result = ser.groupby(level=0).size()
|
| 107 |
+
expected = Series([2, 1], dtype="Int64", index=["a", "b"])
|
| 108 |
+
tm.assert_series_equal(result, expected)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@pytest.mark.parametrize(
|
| 112 |
+
"dtype",
|
| 113 |
+
[
|
| 114 |
+
object,
|
| 115 |
+
pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")),
|
| 116 |
+
pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),
|
| 117 |
+
],
|
| 118 |
+
)
|
| 119 |
+
def test_size_strings(dtype):
|
| 120 |
+
# GH#55627
|
| 121 |
+
df = DataFrame({"a": ["a", "a", "b"], "b": "a"}, dtype=dtype)
|
| 122 |
+
result = df.groupby("a")["b"].size()
|
| 123 |
+
exp_dtype = "Int64" if dtype == "string[pyarrow]" else "int64"
|
| 124 |
+
expected = Series(
|
| 125 |
+
[2, 1],
|
| 126 |
+
index=Index(["a", "b"], name="a", dtype=dtype),
|
| 127 |
+
name="b",
|
| 128 |
+
dtype=exp_dtype,
|
| 129 |
+
)
|
| 130 |
+
tm.assert_series_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_skew.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import pandas._testing as tm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_groupby_skew_equivalence():
|
| 8 |
+
# Test that that groupby skew method (which uses libgroupby.group_skew)
|
| 9 |
+
# matches the results of operating group-by-group (which uses nanops.nanskew)
|
| 10 |
+
nrows = 1000
|
| 11 |
+
ngroups = 3
|
| 12 |
+
ncols = 2
|
| 13 |
+
nan_frac = 0.05
|
| 14 |
+
|
| 15 |
+
arr = np.random.default_rng(2).standard_normal((nrows, ncols))
|
| 16 |
+
arr[np.random.default_rng(2).random(nrows) < nan_frac] = np.nan
|
| 17 |
+
|
| 18 |
+
df = pd.DataFrame(arr)
|
| 19 |
+
grps = np.random.default_rng(2).integers(0, ngroups, size=nrows)
|
| 20 |
+
gb = df.groupby(grps)
|
| 21 |
+
|
| 22 |
+
result = gb.skew()
|
| 23 |
+
|
| 24 |
+
grpwise = [grp.skew().to_frame(i).T for i, grp in gb]
|
| 25 |
+
expected = pd.concat(grpwise, axis=0)
|
| 26 |
+
expected.index = expected.index.astype(result.index.dtype) # 32bit builds
|
| 27 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/methods/test_value_counts.py
ADDED
|
@@ -0,0 +1,1241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
these are systematically testing all of the args to value_counts
|
| 3 |
+
with different size combinations. This is to ensure stability of the sorting
|
| 4 |
+
and proper parameter handling
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pytest
|
| 10 |
+
|
| 11 |
+
import pandas.util._test_decorators as td
|
| 12 |
+
|
| 13 |
+
from pandas import (
|
| 14 |
+
Categorical,
|
| 15 |
+
CategoricalIndex,
|
| 16 |
+
DataFrame,
|
| 17 |
+
Grouper,
|
| 18 |
+
Index,
|
| 19 |
+
MultiIndex,
|
| 20 |
+
Series,
|
| 21 |
+
date_range,
|
| 22 |
+
to_datetime,
|
| 23 |
+
)
|
| 24 |
+
import pandas._testing as tm
|
| 25 |
+
from pandas.util.version import Version
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def tests_value_counts_index_names_category_column():
|
| 29 |
+
# GH44324 Missing name of index category column
|
| 30 |
+
df = DataFrame(
|
| 31 |
+
{
|
| 32 |
+
"gender": ["female"],
|
| 33 |
+
"country": ["US"],
|
| 34 |
+
}
|
| 35 |
+
)
|
| 36 |
+
df["gender"] = df["gender"].astype("category")
|
| 37 |
+
result = df.groupby("country")["gender"].value_counts()
|
| 38 |
+
|
| 39 |
+
# Construct expected, very specific multiindex
|
| 40 |
+
df_mi_expected = DataFrame([["US", "female"]], columns=["country", "gender"])
|
| 41 |
+
df_mi_expected["gender"] = df_mi_expected["gender"].astype("category")
|
| 42 |
+
mi_expected = MultiIndex.from_frame(df_mi_expected)
|
| 43 |
+
expected = Series([1], index=mi_expected, name="count")
|
| 44 |
+
|
| 45 |
+
tm.assert_series_equal(result, expected)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def seed_df(seed_nans, n, m):
|
| 49 |
+
days = date_range("2015-08-24", periods=10)
|
| 50 |
+
|
| 51 |
+
frame = DataFrame(
|
| 52 |
+
{
|
| 53 |
+
"1st": np.random.default_rng(2).choice(list("abcd"), n),
|
| 54 |
+
"2nd": np.random.default_rng(2).choice(days, n),
|
| 55 |
+
"3rd": np.random.default_rng(2).integers(1, m + 1, n),
|
| 56 |
+
}
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
if seed_nans:
|
| 60 |
+
# Explicitly cast to float to avoid implicit cast when setting nan
|
| 61 |
+
frame["3rd"] = frame["3rd"].astype("float")
|
| 62 |
+
frame.loc[1::11, "1st"] = np.nan
|
| 63 |
+
frame.loc[3::17, "2nd"] = np.nan
|
| 64 |
+
frame.loc[7::19, "3rd"] = np.nan
|
| 65 |
+
frame.loc[8::19, "3rd"] = np.nan
|
| 66 |
+
frame.loc[9::19, "3rd"] = np.nan
|
| 67 |
+
|
| 68 |
+
return frame
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@pytest.mark.slow
|
| 72 |
+
@pytest.mark.parametrize("seed_nans", [True, False])
|
| 73 |
+
@pytest.mark.parametrize("num_rows", [10, 50])
|
| 74 |
+
@pytest.mark.parametrize("max_int", [5, 20])
|
| 75 |
+
@pytest.mark.parametrize("keys", ["1st", "2nd", ["1st", "2nd"]], ids=repr)
|
| 76 |
+
@pytest.mark.parametrize("bins", [None, [0, 5]], ids=repr)
|
| 77 |
+
@pytest.mark.parametrize("isort", [True, False])
|
| 78 |
+
@pytest.mark.parametrize("normalize, name", [(True, "proportion"), (False, "count")])
|
| 79 |
+
@pytest.mark.parametrize("sort", [True, False])
|
| 80 |
+
@pytest.mark.parametrize("ascending", [True, False])
|
| 81 |
+
@pytest.mark.parametrize("dropna", [True, False])
|
| 82 |
+
def test_series_groupby_value_counts(
|
| 83 |
+
seed_nans,
|
| 84 |
+
num_rows,
|
| 85 |
+
max_int,
|
| 86 |
+
keys,
|
| 87 |
+
bins,
|
| 88 |
+
isort,
|
| 89 |
+
normalize,
|
| 90 |
+
name,
|
| 91 |
+
sort,
|
| 92 |
+
ascending,
|
| 93 |
+
dropna,
|
| 94 |
+
):
|
| 95 |
+
df = seed_df(seed_nans, num_rows, max_int)
|
| 96 |
+
|
| 97 |
+
def rebuild_index(df):
|
| 98 |
+
arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
|
| 99 |
+
df.index = MultiIndex.from_arrays(arr, names=df.index.names)
|
| 100 |
+
return df
|
| 101 |
+
|
| 102 |
+
kwargs = {
|
| 103 |
+
"normalize": normalize,
|
| 104 |
+
"sort": sort,
|
| 105 |
+
"ascending": ascending,
|
| 106 |
+
"dropna": dropna,
|
| 107 |
+
"bins": bins,
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
gr = df.groupby(keys, sort=isort)
|
| 111 |
+
left = gr["3rd"].value_counts(**kwargs)
|
| 112 |
+
|
| 113 |
+
gr = df.groupby(keys, sort=isort)
|
| 114 |
+
right = gr["3rd"].apply(Series.value_counts, **kwargs)
|
| 115 |
+
right.index.names = right.index.names[:-1] + ["3rd"]
|
| 116 |
+
# https://github.com/pandas-dev/pandas/issues/49909
|
| 117 |
+
right = right.rename(name)
|
| 118 |
+
|
| 119 |
+
# have to sort on index because of unstable sort on values
|
| 120 |
+
left, right = map(rebuild_index, (left, right)) # xref GH9212
|
| 121 |
+
tm.assert_series_equal(left.sort_index(), right.sort_index())
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
@pytest.mark.parametrize("utc", [True, False])
|
| 125 |
+
def test_series_groupby_value_counts_with_grouper(utc):
|
| 126 |
+
# GH28479
|
| 127 |
+
df = DataFrame(
|
| 128 |
+
{
|
| 129 |
+
"Timestamp": [
|
| 130 |
+
1565083561,
|
| 131 |
+
1565083561 + 86400,
|
| 132 |
+
1565083561 + 86500,
|
| 133 |
+
1565083561 + 86400 * 2,
|
| 134 |
+
1565083561 + 86400 * 3,
|
| 135 |
+
1565083561 + 86500 * 3,
|
| 136 |
+
1565083561 + 86400 * 4,
|
| 137 |
+
],
|
| 138 |
+
"Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"],
|
| 139 |
+
}
|
| 140 |
+
).drop([3])
|
| 141 |
+
|
| 142 |
+
df["Datetime"] = to_datetime(df["Timestamp"], utc=utc, unit="s")
|
| 143 |
+
dfg = df.groupby(Grouper(freq="1D", key="Datetime"))
|
| 144 |
+
|
| 145 |
+
# have to sort on index because of unstable sort on values xref GH9212
|
| 146 |
+
result = dfg["Food"].value_counts().sort_index()
|
| 147 |
+
expected = dfg["Food"].apply(Series.value_counts).sort_index()
|
| 148 |
+
expected.index.names = result.index.names
|
| 149 |
+
# https://github.com/pandas-dev/pandas/issues/49909
|
| 150 |
+
expected = expected.rename("count")
|
| 151 |
+
|
| 152 |
+
tm.assert_series_equal(result, expected)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]])
|
| 156 |
+
def test_series_groupby_value_counts_empty(columns):
|
| 157 |
+
# GH39172
|
| 158 |
+
df = DataFrame(columns=columns)
|
| 159 |
+
dfg = df.groupby(columns[:-1])
|
| 160 |
+
|
| 161 |
+
result = dfg[columns[-1]].value_counts()
|
| 162 |
+
expected = Series([], dtype=result.dtype, name="count")
|
| 163 |
+
expected.index = MultiIndex.from_arrays([[]] * len(columns), names=columns)
|
| 164 |
+
|
| 165 |
+
tm.assert_series_equal(result, expected)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]])
|
| 169 |
+
def test_series_groupby_value_counts_one_row(columns):
|
| 170 |
+
# GH42618
|
| 171 |
+
df = DataFrame(data=[range(len(columns))], columns=columns)
|
| 172 |
+
dfg = df.groupby(columns[:-1])
|
| 173 |
+
|
| 174 |
+
result = dfg[columns[-1]].value_counts()
|
| 175 |
+
expected = df.value_counts()
|
| 176 |
+
|
| 177 |
+
tm.assert_series_equal(result, expected)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def test_series_groupby_value_counts_on_categorical():
|
| 181 |
+
# GH38672
|
| 182 |
+
|
| 183 |
+
s = Series(Categorical(["a"], categories=["a", "b"]))
|
| 184 |
+
result = s.groupby([0]).value_counts()
|
| 185 |
+
|
| 186 |
+
expected = Series(
|
| 187 |
+
data=[1, 0],
|
| 188 |
+
index=MultiIndex.from_arrays(
|
| 189 |
+
[
|
| 190 |
+
np.array([0, 0]),
|
| 191 |
+
CategoricalIndex(
|
| 192 |
+
["a", "b"], categories=["a", "b"], ordered=False, dtype="category"
|
| 193 |
+
),
|
| 194 |
+
]
|
| 195 |
+
),
|
| 196 |
+
name="count",
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# Expected:
|
| 200 |
+
# 0 a 1
|
| 201 |
+
# b 0
|
| 202 |
+
# dtype: int64
|
| 203 |
+
|
| 204 |
+
tm.assert_series_equal(result, expected)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def test_series_groupby_value_counts_no_sort():
|
| 208 |
+
# GH#50482
|
| 209 |
+
df = DataFrame(
|
| 210 |
+
{
|
| 211 |
+
"gender": ["male", "male", "female", "male", "female", "male"],
|
| 212 |
+
"education": ["low", "medium", "high", "low", "high", "low"],
|
| 213 |
+
"country": ["US", "FR", "US", "FR", "FR", "FR"],
|
| 214 |
+
}
|
| 215 |
+
)
|
| 216 |
+
gb = df.groupby(["country", "gender"], sort=False)["education"]
|
| 217 |
+
result = gb.value_counts(sort=False)
|
| 218 |
+
index = MultiIndex(
|
| 219 |
+
levels=[["US", "FR"], ["male", "female"], ["low", "medium", "high"]],
|
| 220 |
+
codes=[[0, 1, 0, 1, 1], [0, 0, 1, 0, 1], [0, 1, 2, 0, 2]],
|
| 221 |
+
names=["country", "gender", "education"],
|
| 222 |
+
)
|
| 223 |
+
expected = Series([1, 1, 1, 2, 1], index=index, name="count")
|
| 224 |
+
tm.assert_series_equal(result, expected)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
@pytest.fixture
|
| 228 |
+
def education_df():
|
| 229 |
+
return DataFrame(
|
| 230 |
+
{
|
| 231 |
+
"gender": ["male", "male", "female", "male", "female", "male"],
|
| 232 |
+
"education": ["low", "medium", "high", "low", "high", "low"],
|
| 233 |
+
"country": ["US", "FR", "US", "FR", "FR", "FR"],
|
| 234 |
+
}
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def test_axis(education_df):
|
| 239 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 240 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 241 |
+
gp = education_df.groupby("country", axis=1)
|
| 242 |
+
with pytest.raises(NotImplementedError, match="axis"):
|
| 243 |
+
gp.value_counts()
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def test_bad_subset(education_df):
|
| 247 |
+
gp = education_df.groupby("country")
|
| 248 |
+
with pytest.raises(ValueError, match="subset"):
|
| 249 |
+
gp.value_counts(subset=["country"])
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def test_basic(education_df, request):
|
| 253 |
+
# gh43564
|
| 254 |
+
if Version(np.__version__) >= Version("1.25"):
|
| 255 |
+
request.applymarker(
|
| 256 |
+
pytest.mark.xfail(
|
| 257 |
+
reason=(
|
| 258 |
+
"pandas default unstable sorting of duplicates"
|
| 259 |
+
"issue with numpy>=1.25 with AVX instructions"
|
| 260 |
+
),
|
| 261 |
+
strict=False,
|
| 262 |
+
)
|
| 263 |
+
)
|
| 264 |
+
result = education_df.groupby("country")[["gender", "education"]].value_counts(
|
| 265 |
+
normalize=True
|
| 266 |
+
)
|
| 267 |
+
expected = Series(
|
| 268 |
+
data=[0.5, 0.25, 0.25, 0.5, 0.5],
|
| 269 |
+
index=MultiIndex.from_tuples(
|
| 270 |
+
[
|
| 271 |
+
("FR", "male", "low"),
|
| 272 |
+
("FR", "female", "high"),
|
| 273 |
+
("FR", "male", "medium"),
|
| 274 |
+
("US", "female", "high"),
|
| 275 |
+
("US", "male", "low"),
|
| 276 |
+
],
|
| 277 |
+
names=["country", "gender", "education"],
|
| 278 |
+
),
|
| 279 |
+
name="proportion",
|
| 280 |
+
)
|
| 281 |
+
tm.assert_series_equal(result, expected)
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def _frame_value_counts(df, keys, normalize, sort, ascending):
|
| 285 |
+
return df[keys].value_counts(normalize=normalize, sort=sort, ascending=ascending)
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
@pytest.mark.parametrize("groupby", ["column", "array", "function"])
|
| 289 |
+
@pytest.mark.parametrize("normalize, name", [(True, "proportion"), (False, "count")])
|
| 290 |
+
@pytest.mark.parametrize(
|
| 291 |
+
"sort, ascending",
|
| 292 |
+
[
|
| 293 |
+
(False, None),
|
| 294 |
+
(True, True),
|
| 295 |
+
(True, False),
|
| 296 |
+
],
|
| 297 |
+
)
|
| 298 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 299 |
+
@pytest.mark.parametrize("frame", [True, False])
|
| 300 |
+
def test_against_frame_and_seriesgroupby(
|
| 301 |
+
education_df, groupby, normalize, name, sort, ascending, as_index, frame, request
|
| 302 |
+
):
|
| 303 |
+
# test all parameters:
|
| 304 |
+
# - Use column, array or function as by= parameter
|
| 305 |
+
# - Whether or not to normalize
|
| 306 |
+
# - Whether or not to sort and how
|
| 307 |
+
# - Whether or not to use the groupby as an index
|
| 308 |
+
# - 3-way compare against:
|
| 309 |
+
# - apply with :meth:`~DataFrame.value_counts`
|
| 310 |
+
# - `~SeriesGroupBy.value_counts`
|
| 311 |
+
if Version(np.__version__) >= Version("1.25") and frame and sort and normalize:
|
| 312 |
+
request.applymarker(
|
| 313 |
+
pytest.mark.xfail(
|
| 314 |
+
reason=(
|
| 315 |
+
"pandas default unstable sorting of duplicates"
|
| 316 |
+
"issue with numpy>=1.25 with AVX instructions"
|
| 317 |
+
),
|
| 318 |
+
strict=False,
|
| 319 |
+
)
|
| 320 |
+
)
|
| 321 |
+
by = {
|
| 322 |
+
"column": "country",
|
| 323 |
+
"array": education_df["country"].values,
|
| 324 |
+
"function": lambda x: education_df["country"][x] == "US",
|
| 325 |
+
}[groupby]
|
| 326 |
+
|
| 327 |
+
gp = education_df.groupby(by=by, as_index=as_index)
|
| 328 |
+
result = gp[["gender", "education"]].value_counts(
|
| 329 |
+
normalize=normalize, sort=sort, ascending=ascending
|
| 330 |
+
)
|
| 331 |
+
if frame:
|
| 332 |
+
# compare against apply with DataFrame value_counts
|
| 333 |
+
warn = DeprecationWarning if groupby == "column" else None
|
| 334 |
+
msg = "DataFrameGroupBy.apply operated on the grouping columns"
|
| 335 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 336 |
+
expected = gp.apply(
|
| 337 |
+
_frame_value_counts, ["gender", "education"], normalize, sort, ascending
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
if as_index:
|
| 341 |
+
tm.assert_series_equal(result, expected)
|
| 342 |
+
else:
|
| 343 |
+
name = "proportion" if normalize else "count"
|
| 344 |
+
expected = expected.reset_index().rename({0: name}, axis=1)
|
| 345 |
+
if groupby == "column":
|
| 346 |
+
expected = expected.rename({"level_0": "country"}, axis=1)
|
| 347 |
+
expected["country"] = np.where(expected["country"], "US", "FR")
|
| 348 |
+
elif groupby == "function":
|
| 349 |
+
expected["level_0"] = expected["level_0"] == 1
|
| 350 |
+
else:
|
| 351 |
+
expected["level_0"] = np.where(expected["level_0"], "US", "FR")
|
| 352 |
+
tm.assert_frame_equal(result, expected)
|
| 353 |
+
else:
|
| 354 |
+
# compare against SeriesGroupBy value_counts
|
| 355 |
+
education_df["both"] = education_df["gender"] + "-" + education_df["education"]
|
| 356 |
+
expected = gp["both"].value_counts(
|
| 357 |
+
normalize=normalize, sort=sort, ascending=ascending
|
| 358 |
+
)
|
| 359 |
+
expected.name = name
|
| 360 |
+
if as_index:
|
| 361 |
+
index_frame = expected.index.to_frame(index=False)
|
| 362 |
+
index_frame["gender"] = index_frame["both"].str.split("-").str.get(0)
|
| 363 |
+
index_frame["education"] = index_frame["both"].str.split("-").str.get(1)
|
| 364 |
+
del index_frame["both"]
|
| 365 |
+
index_frame = index_frame.rename({0: None}, axis=1)
|
| 366 |
+
expected.index = MultiIndex.from_frame(index_frame)
|
| 367 |
+
tm.assert_series_equal(result, expected)
|
| 368 |
+
else:
|
| 369 |
+
expected.insert(1, "gender", expected["both"].str.split("-").str.get(0))
|
| 370 |
+
expected.insert(2, "education", expected["both"].str.split("-").str.get(1))
|
| 371 |
+
del expected["both"]
|
| 372 |
+
tm.assert_frame_equal(result, expected)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
@pytest.mark.parametrize(
|
| 376 |
+
"dtype",
|
| 377 |
+
[
|
| 378 |
+
object,
|
| 379 |
+
pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")),
|
| 380 |
+
pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),
|
| 381 |
+
],
|
| 382 |
+
)
|
| 383 |
+
@pytest.mark.parametrize("normalize", [True, False])
|
| 384 |
+
@pytest.mark.parametrize(
|
| 385 |
+
"sort, ascending, expected_rows, expected_count, expected_group_size",
|
| 386 |
+
[
|
| 387 |
+
(False, None, [0, 1, 2, 3, 4], [1, 1, 1, 2, 1], [1, 3, 1, 3, 1]),
|
| 388 |
+
(True, False, [3, 0, 1, 2, 4], [2, 1, 1, 1, 1], [3, 1, 3, 1, 1]),
|
| 389 |
+
(True, True, [0, 1, 2, 4, 3], [1, 1, 1, 1, 2], [1, 3, 1, 1, 3]),
|
| 390 |
+
],
|
| 391 |
+
)
|
| 392 |
+
def test_compound(
|
| 393 |
+
education_df,
|
| 394 |
+
normalize,
|
| 395 |
+
sort,
|
| 396 |
+
ascending,
|
| 397 |
+
expected_rows,
|
| 398 |
+
expected_count,
|
| 399 |
+
expected_group_size,
|
| 400 |
+
dtype,
|
| 401 |
+
):
|
| 402 |
+
education_df = education_df.astype(dtype)
|
| 403 |
+
education_df.columns = education_df.columns.astype(dtype)
|
| 404 |
+
# Multiple groupby keys and as_index=False
|
| 405 |
+
gp = education_df.groupby(["country", "gender"], as_index=False, sort=False)
|
| 406 |
+
result = gp["education"].value_counts(
|
| 407 |
+
normalize=normalize, sort=sort, ascending=ascending
|
| 408 |
+
)
|
| 409 |
+
expected = DataFrame()
|
| 410 |
+
for column in ["country", "gender", "education"]:
|
| 411 |
+
expected[column] = [education_df[column][row] for row in expected_rows]
|
| 412 |
+
expected = expected.astype(dtype)
|
| 413 |
+
expected.columns = expected.columns.astype(dtype)
|
| 414 |
+
if normalize:
|
| 415 |
+
expected["proportion"] = expected_count
|
| 416 |
+
expected["proportion"] /= expected_group_size
|
| 417 |
+
if dtype == "string[pyarrow]":
|
| 418 |
+
expected["proportion"] = expected["proportion"].convert_dtypes()
|
| 419 |
+
else:
|
| 420 |
+
expected["count"] = expected_count
|
| 421 |
+
if dtype == "string[pyarrow]":
|
| 422 |
+
expected["count"] = expected["count"].convert_dtypes()
|
| 423 |
+
tm.assert_frame_equal(result, expected)
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
@pytest.fixture
|
| 427 |
+
def animals_df():
|
| 428 |
+
return DataFrame(
|
| 429 |
+
{"key": [1, 1, 1, 1], "num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
|
| 430 |
+
index=["falcon", "dog", "cat", "ant"],
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
@pytest.mark.parametrize(
|
| 435 |
+
"sort, ascending, normalize, name, expected_data, expected_index",
|
| 436 |
+
[
|
| 437 |
+
(False, None, False, "count", [1, 2, 1], [(1, 1, 1), (2, 4, 6), (2, 0, 0)]),
|
| 438 |
+
(True, True, False, "count", [1, 1, 2], [(1, 1, 1), (2, 6, 4), (2, 0, 0)]),
|
| 439 |
+
(True, False, False, "count", [2, 1, 1], [(1, 1, 1), (4, 2, 6), (0, 2, 0)]),
|
| 440 |
+
(
|
| 441 |
+
True,
|
| 442 |
+
False,
|
| 443 |
+
True,
|
| 444 |
+
"proportion",
|
| 445 |
+
[0.5, 0.25, 0.25],
|
| 446 |
+
[(1, 1, 1), (4, 2, 6), (0, 2, 0)],
|
| 447 |
+
),
|
| 448 |
+
],
|
| 449 |
+
)
|
| 450 |
+
def test_data_frame_value_counts(
|
| 451 |
+
animals_df, sort, ascending, normalize, name, expected_data, expected_index
|
| 452 |
+
):
|
| 453 |
+
# 3-way compare with :meth:`~DataFrame.value_counts`
|
| 454 |
+
# Tests from frame/methods/test_value_counts.py
|
| 455 |
+
result_frame = animals_df.value_counts(
|
| 456 |
+
sort=sort, ascending=ascending, normalize=normalize
|
| 457 |
+
)
|
| 458 |
+
expected = Series(
|
| 459 |
+
data=expected_data,
|
| 460 |
+
index=MultiIndex.from_arrays(
|
| 461 |
+
expected_index, names=["key", "num_legs", "num_wings"]
|
| 462 |
+
),
|
| 463 |
+
name=name,
|
| 464 |
+
)
|
| 465 |
+
tm.assert_series_equal(result_frame, expected)
|
| 466 |
+
|
| 467 |
+
result_frame_groupby = animals_df.groupby("key").value_counts(
|
| 468 |
+
sort=sort, ascending=ascending, normalize=normalize
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
tm.assert_series_equal(result_frame_groupby, expected)
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
@pytest.fixture
|
| 475 |
+
def nulls_df():
|
| 476 |
+
n = np.nan
|
| 477 |
+
return DataFrame(
|
| 478 |
+
{
|
| 479 |
+
"A": [1, 1, n, 4, n, 6, 6, 6, 6],
|
| 480 |
+
"B": [1, 1, 3, n, n, 6, 6, 6, 6],
|
| 481 |
+
"C": [1, 2, 3, 4, 5, 6, n, 8, n],
|
| 482 |
+
"D": [1, 2, 3, 4, 5, 6, 7, n, n],
|
| 483 |
+
}
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
@pytest.mark.parametrize(
|
| 488 |
+
"group_dropna, count_dropna, expected_rows, expected_values",
|
| 489 |
+
[
|
| 490 |
+
(
|
| 491 |
+
False,
|
| 492 |
+
False,
|
| 493 |
+
[0, 1, 3, 5, 7, 6, 8, 2, 4],
|
| 494 |
+
[0.5, 0.5, 1.0, 0.25, 0.25, 0.25, 0.25, 1.0, 1.0],
|
| 495 |
+
),
|
| 496 |
+
(False, True, [0, 1, 3, 5, 2, 4], [0.5, 0.5, 1.0, 1.0, 1.0, 1.0]),
|
| 497 |
+
(True, False, [0, 1, 5, 7, 6, 8], [0.5, 0.5, 0.25, 0.25, 0.25, 0.25]),
|
| 498 |
+
(True, True, [0, 1, 5], [0.5, 0.5, 1.0]),
|
| 499 |
+
],
|
| 500 |
+
)
|
| 501 |
+
def test_dropna_combinations(
|
| 502 |
+
nulls_df, group_dropna, count_dropna, expected_rows, expected_values, request
|
| 503 |
+
):
|
| 504 |
+
if Version(np.__version__) >= Version("1.25") and not group_dropna:
|
| 505 |
+
request.applymarker(
|
| 506 |
+
pytest.mark.xfail(
|
| 507 |
+
reason=(
|
| 508 |
+
"pandas default unstable sorting of duplicates"
|
| 509 |
+
"issue with numpy>=1.25 with AVX instructions"
|
| 510 |
+
),
|
| 511 |
+
strict=False,
|
| 512 |
+
)
|
| 513 |
+
)
|
| 514 |
+
gp = nulls_df.groupby(["A", "B"], dropna=group_dropna)
|
| 515 |
+
result = gp.value_counts(normalize=True, sort=True, dropna=count_dropna)
|
| 516 |
+
columns = DataFrame()
|
| 517 |
+
for column in nulls_df.columns:
|
| 518 |
+
columns[column] = [nulls_df[column][row] for row in expected_rows]
|
| 519 |
+
index = MultiIndex.from_frame(columns)
|
| 520 |
+
expected = Series(data=expected_values, index=index, name="proportion")
|
| 521 |
+
tm.assert_series_equal(result, expected)
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
@pytest.fixture
|
| 525 |
+
def names_with_nulls_df(nulls_fixture):
|
| 526 |
+
return DataFrame(
|
| 527 |
+
{
|
| 528 |
+
"key": [1, 1, 1, 1],
|
| 529 |
+
"first_name": ["John", "Anne", "John", "Beth"],
|
| 530 |
+
"middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"],
|
| 531 |
+
},
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
@pytest.mark.parametrize(
|
| 536 |
+
"dropna, expected_data, expected_index",
|
| 537 |
+
[
|
| 538 |
+
(
|
| 539 |
+
True,
|
| 540 |
+
[1, 1],
|
| 541 |
+
MultiIndex.from_arrays(
|
| 542 |
+
[(1, 1), ("Beth", "John"), ("Louise", "Smith")],
|
| 543 |
+
names=["key", "first_name", "middle_name"],
|
| 544 |
+
),
|
| 545 |
+
),
|
| 546 |
+
(
|
| 547 |
+
False,
|
| 548 |
+
[1, 1, 1, 1],
|
| 549 |
+
MultiIndex(
|
| 550 |
+
levels=[
|
| 551 |
+
Index([1]),
|
| 552 |
+
Index(["Anne", "Beth", "John"]),
|
| 553 |
+
Index(["Louise", "Smith", np.nan]),
|
| 554 |
+
],
|
| 555 |
+
codes=[[0, 0, 0, 0], [0, 1, 2, 2], [2, 0, 1, 2]],
|
| 556 |
+
names=["key", "first_name", "middle_name"],
|
| 557 |
+
),
|
| 558 |
+
),
|
| 559 |
+
],
|
| 560 |
+
)
|
| 561 |
+
@pytest.mark.parametrize("normalize, name", [(False, "count"), (True, "proportion")])
|
| 562 |
+
def test_data_frame_value_counts_dropna(
|
| 563 |
+
names_with_nulls_df, dropna, normalize, name, expected_data, expected_index
|
| 564 |
+
):
|
| 565 |
+
# GH 41334
|
| 566 |
+
# 3-way compare with :meth:`~DataFrame.value_counts`
|
| 567 |
+
# Tests with nulls from frame/methods/test_value_counts.py
|
| 568 |
+
result_frame = names_with_nulls_df.value_counts(dropna=dropna, normalize=normalize)
|
| 569 |
+
expected = Series(
|
| 570 |
+
data=expected_data,
|
| 571 |
+
index=expected_index,
|
| 572 |
+
name=name,
|
| 573 |
+
)
|
| 574 |
+
if normalize:
|
| 575 |
+
expected /= float(len(expected_data))
|
| 576 |
+
|
| 577 |
+
tm.assert_series_equal(result_frame, expected)
|
| 578 |
+
|
| 579 |
+
result_frame_groupby = names_with_nulls_df.groupby("key").value_counts(
|
| 580 |
+
dropna=dropna, normalize=normalize
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
tm.assert_series_equal(result_frame_groupby, expected)
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
@pytest.mark.parametrize("as_index", [False, True])
|
| 587 |
+
@pytest.mark.parametrize("observed", [False, True])
|
| 588 |
+
@pytest.mark.parametrize(
|
| 589 |
+
"normalize, name, expected_data",
|
| 590 |
+
[
|
| 591 |
+
(
|
| 592 |
+
False,
|
| 593 |
+
"count",
|
| 594 |
+
np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64),
|
| 595 |
+
),
|
| 596 |
+
(
|
| 597 |
+
True,
|
| 598 |
+
"proportion",
|
| 599 |
+
np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),
|
| 600 |
+
),
|
| 601 |
+
],
|
| 602 |
+
)
|
| 603 |
+
def test_categorical_single_grouper_with_only_observed_categories(
|
| 604 |
+
education_df, as_index, observed, normalize, name, expected_data, request
|
| 605 |
+
):
|
| 606 |
+
# Test single categorical grouper with only observed grouping categories
|
| 607 |
+
# when non-groupers are also categorical
|
| 608 |
+
if Version(np.__version__) >= Version("1.25"):
|
| 609 |
+
request.applymarker(
|
| 610 |
+
pytest.mark.xfail(
|
| 611 |
+
reason=(
|
| 612 |
+
"pandas default unstable sorting of duplicates"
|
| 613 |
+
"issue with numpy>=1.25 with AVX instructions"
|
| 614 |
+
),
|
| 615 |
+
strict=False,
|
| 616 |
+
)
|
| 617 |
+
)
|
| 618 |
+
|
| 619 |
+
gp = education_df.astype("category").groupby(
|
| 620 |
+
"country", as_index=as_index, observed=observed
|
| 621 |
+
)
|
| 622 |
+
result = gp.value_counts(normalize=normalize)
|
| 623 |
+
|
| 624 |
+
expected_index = MultiIndex.from_tuples(
|
| 625 |
+
[
|
| 626 |
+
("FR", "male", "low"),
|
| 627 |
+
("FR", "female", "high"),
|
| 628 |
+
("FR", "male", "medium"),
|
| 629 |
+
("FR", "female", "low"),
|
| 630 |
+
("FR", "female", "medium"),
|
| 631 |
+
("FR", "male", "high"),
|
| 632 |
+
("US", "female", "high"),
|
| 633 |
+
("US", "male", "low"),
|
| 634 |
+
("US", "female", "low"),
|
| 635 |
+
("US", "female", "medium"),
|
| 636 |
+
("US", "male", "high"),
|
| 637 |
+
("US", "male", "medium"),
|
| 638 |
+
],
|
| 639 |
+
names=["country", "gender", "education"],
|
| 640 |
+
)
|
| 641 |
+
|
| 642 |
+
expected_series = Series(
|
| 643 |
+
data=expected_data,
|
| 644 |
+
index=expected_index,
|
| 645 |
+
name=name,
|
| 646 |
+
)
|
| 647 |
+
for i in range(3):
|
| 648 |
+
expected_series.index = expected_series.index.set_levels(
|
| 649 |
+
CategoricalIndex(expected_series.index.levels[i]), level=i
|
| 650 |
+
)
|
| 651 |
+
|
| 652 |
+
if as_index:
|
| 653 |
+
tm.assert_series_equal(result, expected_series)
|
| 654 |
+
else:
|
| 655 |
+
expected = expected_series.reset_index(
|
| 656 |
+
name="proportion" if normalize else "count"
|
| 657 |
+
)
|
| 658 |
+
tm.assert_frame_equal(result, expected)
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
def assert_categorical_single_grouper(
|
| 662 |
+
education_df, as_index, observed, expected_index, normalize, name, expected_data
|
| 663 |
+
):
|
| 664 |
+
# Test single categorical grouper when non-groupers are also categorical
|
| 665 |
+
education_df = education_df.copy().astype("category")
|
| 666 |
+
|
| 667 |
+
# Add non-observed grouping categories
|
| 668 |
+
education_df["country"] = education_df["country"].cat.add_categories(["ASIA"])
|
| 669 |
+
|
| 670 |
+
gp = education_df.groupby("country", as_index=as_index, observed=observed)
|
| 671 |
+
result = gp.value_counts(normalize=normalize)
|
| 672 |
+
|
| 673 |
+
expected_series = Series(
|
| 674 |
+
data=expected_data,
|
| 675 |
+
index=MultiIndex.from_tuples(
|
| 676 |
+
expected_index,
|
| 677 |
+
names=["country", "gender", "education"],
|
| 678 |
+
),
|
| 679 |
+
name=name,
|
| 680 |
+
)
|
| 681 |
+
for i in range(3):
|
| 682 |
+
index_level = CategoricalIndex(expected_series.index.levels[i])
|
| 683 |
+
if i == 0:
|
| 684 |
+
index_level = index_level.set_categories(
|
| 685 |
+
education_df["country"].cat.categories
|
| 686 |
+
)
|
| 687 |
+
expected_series.index = expected_series.index.set_levels(index_level, level=i)
|
| 688 |
+
|
| 689 |
+
if as_index:
|
| 690 |
+
tm.assert_series_equal(result, expected_series)
|
| 691 |
+
else:
|
| 692 |
+
expected = expected_series.reset_index(name=name)
|
| 693 |
+
tm.assert_frame_equal(result, expected)
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 697 |
+
@pytest.mark.parametrize(
|
| 698 |
+
"normalize, name, expected_data",
|
| 699 |
+
[
|
| 700 |
+
(
|
| 701 |
+
False,
|
| 702 |
+
"count",
|
| 703 |
+
np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64),
|
| 704 |
+
),
|
| 705 |
+
(
|
| 706 |
+
True,
|
| 707 |
+
"proportion",
|
| 708 |
+
np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),
|
| 709 |
+
),
|
| 710 |
+
],
|
| 711 |
+
)
|
| 712 |
+
def test_categorical_single_grouper_observed_true(
|
| 713 |
+
education_df, as_index, normalize, name, expected_data, request
|
| 714 |
+
):
|
| 715 |
+
# GH#46357
|
| 716 |
+
|
| 717 |
+
if Version(np.__version__) >= Version("1.25"):
|
| 718 |
+
request.applymarker(
|
| 719 |
+
pytest.mark.xfail(
|
| 720 |
+
reason=(
|
| 721 |
+
"pandas default unstable sorting of duplicates"
|
| 722 |
+
"issue with numpy>=1.25 with AVX instructions"
|
| 723 |
+
),
|
| 724 |
+
strict=False,
|
| 725 |
+
)
|
| 726 |
+
)
|
| 727 |
+
|
| 728 |
+
expected_index = [
|
| 729 |
+
("FR", "male", "low"),
|
| 730 |
+
("FR", "female", "high"),
|
| 731 |
+
("FR", "male", "medium"),
|
| 732 |
+
("FR", "female", "low"),
|
| 733 |
+
("FR", "female", "medium"),
|
| 734 |
+
("FR", "male", "high"),
|
| 735 |
+
("US", "female", "high"),
|
| 736 |
+
("US", "male", "low"),
|
| 737 |
+
("US", "female", "low"),
|
| 738 |
+
("US", "female", "medium"),
|
| 739 |
+
("US", "male", "high"),
|
| 740 |
+
("US", "male", "medium"),
|
| 741 |
+
]
|
| 742 |
+
|
| 743 |
+
assert_categorical_single_grouper(
|
| 744 |
+
education_df=education_df,
|
| 745 |
+
as_index=as_index,
|
| 746 |
+
observed=True,
|
| 747 |
+
expected_index=expected_index,
|
| 748 |
+
normalize=normalize,
|
| 749 |
+
name=name,
|
| 750 |
+
expected_data=expected_data,
|
| 751 |
+
)
|
| 752 |
+
|
| 753 |
+
|
| 754 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 755 |
+
@pytest.mark.parametrize(
|
| 756 |
+
"normalize, name, expected_data",
|
| 757 |
+
[
|
| 758 |
+
(
|
| 759 |
+
False,
|
| 760 |
+
"count",
|
| 761 |
+
np.array(
|
| 762 |
+
[2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int64
|
| 763 |
+
),
|
| 764 |
+
),
|
| 765 |
+
(
|
| 766 |
+
True,
|
| 767 |
+
"proportion",
|
| 768 |
+
np.array(
|
| 769 |
+
[
|
| 770 |
+
0.5,
|
| 771 |
+
0.25,
|
| 772 |
+
0.25,
|
| 773 |
+
0.0,
|
| 774 |
+
0.0,
|
| 775 |
+
0.0,
|
| 776 |
+
0.5,
|
| 777 |
+
0.5,
|
| 778 |
+
0.0,
|
| 779 |
+
0.0,
|
| 780 |
+
0.0,
|
| 781 |
+
0.0,
|
| 782 |
+
0.0,
|
| 783 |
+
0.0,
|
| 784 |
+
0.0,
|
| 785 |
+
0.0,
|
| 786 |
+
0.0,
|
| 787 |
+
0.0,
|
| 788 |
+
]
|
| 789 |
+
),
|
| 790 |
+
),
|
| 791 |
+
],
|
| 792 |
+
)
|
| 793 |
+
def test_categorical_single_grouper_observed_false(
|
| 794 |
+
education_df, as_index, normalize, name, expected_data, request
|
| 795 |
+
):
|
| 796 |
+
# GH#46357
|
| 797 |
+
|
| 798 |
+
if Version(np.__version__) >= Version("1.25"):
|
| 799 |
+
request.applymarker(
|
| 800 |
+
pytest.mark.xfail(
|
| 801 |
+
reason=(
|
| 802 |
+
"pandas default unstable sorting of duplicates"
|
| 803 |
+
"issue with numpy>=1.25 with AVX instructions"
|
| 804 |
+
),
|
| 805 |
+
strict=False,
|
| 806 |
+
)
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
expected_index = [
|
| 810 |
+
("FR", "male", "low"),
|
| 811 |
+
("FR", "female", "high"),
|
| 812 |
+
("FR", "male", "medium"),
|
| 813 |
+
("FR", "female", "low"),
|
| 814 |
+
("FR", "female", "medium"),
|
| 815 |
+
("FR", "male", "high"),
|
| 816 |
+
("US", "female", "high"),
|
| 817 |
+
("US", "male", "low"),
|
| 818 |
+
("US", "female", "low"),
|
| 819 |
+
("US", "female", "medium"),
|
| 820 |
+
("US", "male", "high"),
|
| 821 |
+
("US", "male", "medium"),
|
| 822 |
+
("ASIA", "female", "high"),
|
| 823 |
+
("ASIA", "female", "low"),
|
| 824 |
+
("ASIA", "female", "medium"),
|
| 825 |
+
("ASIA", "male", "high"),
|
| 826 |
+
("ASIA", "male", "low"),
|
| 827 |
+
("ASIA", "male", "medium"),
|
| 828 |
+
]
|
| 829 |
+
|
| 830 |
+
assert_categorical_single_grouper(
|
| 831 |
+
education_df=education_df,
|
| 832 |
+
as_index=as_index,
|
| 833 |
+
observed=False,
|
| 834 |
+
expected_index=expected_index,
|
| 835 |
+
normalize=normalize,
|
| 836 |
+
name=name,
|
| 837 |
+
expected_data=expected_data,
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
|
| 841 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 842 |
+
@pytest.mark.parametrize(
|
| 843 |
+
"observed, expected_index",
|
| 844 |
+
[
|
| 845 |
+
(
|
| 846 |
+
False,
|
| 847 |
+
[
|
| 848 |
+
("FR", "high", "female"),
|
| 849 |
+
("FR", "high", "male"),
|
| 850 |
+
("FR", "low", "male"),
|
| 851 |
+
("FR", "low", "female"),
|
| 852 |
+
("FR", "medium", "male"),
|
| 853 |
+
("FR", "medium", "female"),
|
| 854 |
+
("US", "high", "female"),
|
| 855 |
+
("US", "high", "male"),
|
| 856 |
+
("US", "low", "male"),
|
| 857 |
+
("US", "low", "female"),
|
| 858 |
+
("US", "medium", "female"),
|
| 859 |
+
("US", "medium", "male"),
|
| 860 |
+
],
|
| 861 |
+
),
|
| 862 |
+
(
|
| 863 |
+
True,
|
| 864 |
+
[
|
| 865 |
+
("FR", "high", "female"),
|
| 866 |
+
("FR", "low", "male"),
|
| 867 |
+
("FR", "medium", "male"),
|
| 868 |
+
("US", "high", "female"),
|
| 869 |
+
("US", "low", "male"),
|
| 870 |
+
],
|
| 871 |
+
),
|
| 872 |
+
],
|
| 873 |
+
)
|
| 874 |
+
@pytest.mark.parametrize(
|
| 875 |
+
"normalize, name, expected_data",
|
| 876 |
+
[
|
| 877 |
+
(
|
| 878 |
+
False,
|
| 879 |
+
"count",
|
| 880 |
+
np.array([1, 0, 2, 0, 1, 0, 1, 0, 1, 0, 0, 0], dtype=np.int64),
|
| 881 |
+
),
|
| 882 |
+
(
|
| 883 |
+
True,
|
| 884 |
+
"proportion",
|
| 885 |
+
# NaN values corresponds to non-observed groups
|
| 886 |
+
np.array([1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]),
|
| 887 |
+
),
|
| 888 |
+
],
|
| 889 |
+
)
|
| 890 |
+
def test_categorical_multiple_groupers(
|
| 891 |
+
education_df, as_index, observed, expected_index, normalize, name, expected_data
|
| 892 |
+
):
|
| 893 |
+
# GH#46357
|
| 894 |
+
|
| 895 |
+
# Test multiple categorical groupers when non-groupers are non-categorical
|
| 896 |
+
education_df = education_df.copy()
|
| 897 |
+
education_df["country"] = education_df["country"].astype("category")
|
| 898 |
+
education_df["education"] = education_df["education"].astype("category")
|
| 899 |
+
|
| 900 |
+
gp = education_df.groupby(
|
| 901 |
+
["country", "education"], as_index=as_index, observed=observed
|
| 902 |
+
)
|
| 903 |
+
result = gp.value_counts(normalize=normalize)
|
| 904 |
+
|
| 905 |
+
expected_series = Series(
|
| 906 |
+
data=expected_data[expected_data > 0.0] if observed else expected_data,
|
| 907 |
+
index=MultiIndex.from_tuples(
|
| 908 |
+
expected_index,
|
| 909 |
+
names=["country", "education", "gender"],
|
| 910 |
+
),
|
| 911 |
+
name=name,
|
| 912 |
+
)
|
| 913 |
+
for i in range(2):
|
| 914 |
+
expected_series.index = expected_series.index.set_levels(
|
| 915 |
+
CategoricalIndex(expected_series.index.levels[i]), level=i
|
| 916 |
+
)
|
| 917 |
+
|
| 918 |
+
if as_index:
|
| 919 |
+
tm.assert_series_equal(result, expected_series)
|
| 920 |
+
else:
|
| 921 |
+
expected = expected_series.reset_index(
|
| 922 |
+
name="proportion" if normalize else "count"
|
| 923 |
+
)
|
| 924 |
+
tm.assert_frame_equal(result, expected)
|
| 925 |
+
|
| 926 |
+
|
| 927 |
+
@pytest.mark.parametrize("as_index", [False, True])
|
| 928 |
+
@pytest.mark.parametrize("observed", [False, True])
|
| 929 |
+
@pytest.mark.parametrize(
|
| 930 |
+
"normalize, name, expected_data",
|
| 931 |
+
[
|
| 932 |
+
(
|
| 933 |
+
False,
|
| 934 |
+
"count",
|
| 935 |
+
np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64),
|
| 936 |
+
),
|
| 937 |
+
(
|
| 938 |
+
True,
|
| 939 |
+
"proportion",
|
| 940 |
+
# NaN values corresponds to non-observed groups
|
| 941 |
+
np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),
|
| 942 |
+
),
|
| 943 |
+
],
|
| 944 |
+
)
|
| 945 |
+
def test_categorical_non_groupers(
|
| 946 |
+
education_df, as_index, observed, normalize, name, expected_data, request
|
| 947 |
+
):
|
| 948 |
+
# GH#46357 Test non-observed categories are included in the result,
|
| 949 |
+
# regardless of `observed`
|
| 950 |
+
|
| 951 |
+
if Version(np.__version__) >= Version("1.25"):
|
| 952 |
+
request.applymarker(
|
| 953 |
+
pytest.mark.xfail(
|
| 954 |
+
reason=(
|
| 955 |
+
"pandas default unstable sorting of duplicates"
|
| 956 |
+
"issue with numpy>=1.25 with AVX instructions"
|
| 957 |
+
),
|
| 958 |
+
strict=False,
|
| 959 |
+
)
|
| 960 |
+
)
|
| 961 |
+
|
| 962 |
+
education_df = education_df.copy()
|
| 963 |
+
education_df["gender"] = education_df["gender"].astype("category")
|
| 964 |
+
education_df["education"] = education_df["education"].astype("category")
|
| 965 |
+
|
| 966 |
+
gp = education_df.groupby("country", as_index=as_index, observed=observed)
|
| 967 |
+
result = gp.value_counts(normalize=normalize)
|
| 968 |
+
|
| 969 |
+
expected_index = [
|
| 970 |
+
("FR", "male", "low"),
|
| 971 |
+
("FR", "female", "high"),
|
| 972 |
+
("FR", "male", "medium"),
|
| 973 |
+
("FR", "female", "low"),
|
| 974 |
+
("FR", "female", "medium"),
|
| 975 |
+
("FR", "male", "high"),
|
| 976 |
+
("US", "female", "high"),
|
| 977 |
+
("US", "male", "low"),
|
| 978 |
+
("US", "female", "low"),
|
| 979 |
+
("US", "female", "medium"),
|
| 980 |
+
("US", "male", "high"),
|
| 981 |
+
("US", "male", "medium"),
|
| 982 |
+
]
|
| 983 |
+
expected_series = Series(
|
| 984 |
+
data=expected_data,
|
| 985 |
+
index=MultiIndex.from_tuples(
|
| 986 |
+
expected_index,
|
| 987 |
+
names=["country", "gender", "education"],
|
| 988 |
+
),
|
| 989 |
+
name=name,
|
| 990 |
+
)
|
| 991 |
+
for i in range(1, 3):
|
| 992 |
+
expected_series.index = expected_series.index.set_levels(
|
| 993 |
+
CategoricalIndex(expected_series.index.levels[i]), level=i
|
| 994 |
+
)
|
| 995 |
+
|
| 996 |
+
if as_index:
|
| 997 |
+
tm.assert_series_equal(result, expected_series)
|
| 998 |
+
else:
|
| 999 |
+
expected = expected_series.reset_index(
|
| 1000 |
+
name="proportion" if normalize else "count"
|
| 1001 |
+
)
|
| 1002 |
+
tm.assert_frame_equal(result, expected)
|
| 1003 |
+
|
| 1004 |
+
|
| 1005 |
+
@pytest.mark.parametrize(
|
| 1006 |
+
"normalize, expected_label, expected_values",
|
| 1007 |
+
[
|
| 1008 |
+
(False, "count", [1, 1, 1]),
|
| 1009 |
+
(True, "proportion", [0.5, 0.5, 1.0]),
|
| 1010 |
+
],
|
| 1011 |
+
)
|
| 1012 |
+
def test_mixed_groupings(normalize, expected_label, expected_values):
|
| 1013 |
+
# Test multiple groupings
|
| 1014 |
+
df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
|
| 1015 |
+
gp = df.groupby([[4, 5, 4], "A", lambda i: 7 if i == 1 else 8], as_index=False)
|
| 1016 |
+
result = gp.value_counts(sort=True, normalize=normalize)
|
| 1017 |
+
expected = DataFrame(
|
| 1018 |
+
{
|
| 1019 |
+
"level_0": np.array([4, 4, 5], dtype=int),
|
| 1020 |
+
"A": [1, 1, 2],
|
| 1021 |
+
"level_2": [8, 8, 7],
|
| 1022 |
+
"B": [1, 3, 2],
|
| 1023 |
+
expected_label: expected_values,
|
| 1024 |
+
}
|
| 1025 |
+
)
|
| 1026 |
+
tm.assert_frame_equal(result, expected)
|
| 1027 |
+
|
| 1028 |
+
|
| 1029 |
+
@pytest.mark.parametrize(
|
| 1030 |
+
"test, columns, expected_names",
|
| 1031 |
+
[
|
| 1032 |
+
("repeat", list("abbde"), ["a", None, "d", "b", "b", "e"]),
|
| 1033 |
+
("level", list("abcd") + ["level_1"], ["a", None, "d", "b", "c", "level_1"]),
|
| 1034 |
+
],
|
| 1035 |
+
)
|
| 1036 |
+
@pytest.mark.parametrize("as_index", [False, True])
|
| 1037 |
+
def test_column_label_duplicates(test, columns, expected_names, as_index):
|
| 1038 |
+
# GH 44992
|
| 1039 |
+
# Test for duplicate input column labels and generated duplicate labels
|
| 1040 |
+
df = DataFrame([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]], columns=columns)
|
| 1041 |
+
expected_data = [(1, 0, 7, 3, 5, 9), (2, 1, 8, 4, 6, 10)]
|
| 1042 |
+
keys = ["a", np.array([0, 1], dtype=np.int64), "d"]
|
| 1043 |
+
result = df.groupby(keys, as_index=as_index).value_counts()
|
| 1044 |
+
if as_index:
|
| 1045 |
+
expected = Series(
|
| 1046 |
+
data=(1, 1),
|
| 1047 |
+
index=MultiIndex.from_tuples(
|
| 1048 |
+
expected_data,
|
| 1049 |
+
names=expected_names,
|
| 1050 |
+
),
|
| 1051 |
+
name="count",
|
| 1052 |
+
)
|
| 1053 |
+
tm.assert_series_equal(result, expected)
|
| 1054 |
+
else:
|
| 1055 |
+
expected_data = [list(row) + [1] for row in expected_data]
|
| 1056 |
+
expected_columns = list(expected_names)
|
| 1057 |
+
expected_columns[1] = "level_1"
|
| 1058 |
+
expected_columns.append("count")
|
| 1059 |
+
expected = DataFrame(expected_data, columns=expected_columns)
|
| 1060 |
+
tm.assert_frame_equal(result, expected)
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
@pytest.mark.parametrize(
|
| 1064 |
+
"normalize, expected_label",
|
| 1065 |
+
[
|
| 1066 |
+
(False, "count"),
|
| 1067 |
+
(True, "proportion"),
|
| 1068 |
+
],
|
| 1069 |
+
)
|
| 1070 |
+
def test_result_label_duplicates(normalize, expected_label):
|
| 1071 |
+
# Test for result column label duplicating an input column label
|
| 1072 |
+
gb = DataFrame([[1, 2, 3]], columns=["a", "b", expected_label]).groupby(
|
| 1073 |
+
"a", as_index=False
|
| 1074 |
+
)
|
| 1075 |
+
msg = f"Column label '{expected_label}' is duplicate of result column"
|
| 1076 |
+
with pytest.raises(ValueError, match=msg):
|
| 1077 |
+
gb.value_counts(normalize=normalize)
|
| 1078 |
+
|
| 1079 |
+
|
| 1080 |
+
def test_ambiguous_grouping():
|
| 1081 |
+
# Test that groupby is not confused by groupings length equal to row count
|
| 1082 |
+
df = DataFrame({"a": [1, 1]})
|
| 1083 |
+
gb = df.groupby(np.array([1, 1], dtype=np.int64))
|
| 1084 |
+
result = gb.value_counts()
|
| 1085 |
+
expected = Series(
|
| 1086 |
+
[2], index=MultiIndex.from_tuples([[1, 1]], names=[None, "a"]), name="count"
|
| 1087 |
+
)
|
| 1088 |
+
tm.assert_series_equal(result, expected)
|
| 1089 |
+
|
| 1090 |
+
|
| 1091 |
+
def test_subset_overlaps_gb_key_raises():
|
| 1092 |
+
# GH 46383
|
| 1093 |
+
df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1])
|
| 1094 |
+
msg = "Keys {'c1'} in subset cannot be in the groupby column keys."
|
| 1095 |
+
with pytest.raises(ValueError, match=msg):
|
| 1096 |
+
df.groupby("c1").value_counts(subset=["c1"])
|
| 1097 |
+
|
| 1098 |
+
|
| 1099 |
+
def test_subset_doesnt_exist_in_frame():
|
| 1100 |
+
# GH 46383
|
| 1101 |
+
df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1])
|
| 1102 |
+
msg = "Keys {'c3'} in subset do not exist in the DataFrame."
|
| 1103 |
+
with pytest.raises(ValueError, match=msg):
|
| 1104 |
+
df.groupby("c1").value_counts(subset=["c3"])
|
| 1105 |
+
|
| 1106 |
+
|
| 1107 |
+
def test_subset():
|
| 1108 |
+
# GH 46383
|
| 1109 |
+
df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1])
|
| 1110 |
+
result = df.groupby(level=0).value_counts(subset=["c2"])
|
| 1111 |
+
expected = Series(
|
| 1112 |
+
[1, 2],
|
| 1113 |
+
index=MultiIndex.from_arrays([[0, 1], ["x", "y"]], names=[None, "c2"]),
|
| 1114 |
+
name="count",
|
| 1115 |
+
)
|
| 1116 |
+
tm.assert_series_equal(result, expected)
|
| 1117 |
+
|
| 1118 |
+
|
| 1119 |
+
def test_subset_duplicate_columns():
|
| 1120 |
+
# GH 46383
|
| 1121 |
+
df = DataFrame(
|
| 1122 |
+
[["a", "x", "x"], ["b", "y", "y"], ["b", "y", "y"]],
|
| 1123 |
+
index=[0, 1, 1],
|
| 1124 |
+
columns=["c1", "c2", "c2"],
|
| 1125 |
+
)
|
| 1126 |
+
result = df.groupby(level=0).value_counts(subset=["c2"])
|
| 1127 |
+
expected = Series(
|
| 1128 |
+
[1, 2],
|
| 1129 |
+
index=MultiIndex.from_arrays(
|
| 1130 |
+
[[0, 1], ["x", "y"], ["x", "y"]], names=[None, "c2", "c2"]
|
| 1131 |
+
),
|
| 1132 |
+
name="count",
|
| 1133 |
+
)
|
| 1134 |
+
tm.assert_series_equal(result, expected)
|
| 1135 |
+
|
| 1136 |
+
|
| 1137 |
+
@pytest.mark.parametrize("utc", [True, False])
|
| 1138 |
+
def test_value_counts_time_grouper(utc, unit):
|
| 1139 |
+
# GH#50486
|
| 1140 |
+
df = DataFrame(
|
| 1141 |
+
{
|
| 1142 |
+
"Timestamp": [
|
| 1143 |
+
1565083561,
|
| 1144 |
+
1565083561 + 86400,
|
| 1145 |
+
1565083561 + 86500,
|
| 1146 |
+
1565083561 + 86400 * 2,
|
| 1147 |
+
1565083561 + 86400 * 3,
|
| 1148 |
+
1565083561 + 86500 * 3,
|
| 1149 |
+
1565083561 + 86400 * 4,
|
| 1150 |
+
],
|
| 1151 |
+
"Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"],
|
| 1152 |
+
}
|
| 1153 |
+
).drop([3])
|
| 1154 |
+
|
| 1155 |
+
df["Datetime"] = to_datetime(df["Timestamp"], utc=utc, unit="s").dt.as_unit(unit)
|
| 1156 |
+
gb = df.groupby(Grouper(freq="1D", key="Datetime"))
|
| 1157 |
+
result = gb.value_counts()
|
| 1158 |
+
dates = to_datetime(
|
| 1159 |
+
["2019-08-06", "2019-08-07", "2019-08-09", "2019-08-10"], utc=utc
|
| 1160 |
+
).as_unit(unit)
|
| 1161 |
+
timestamps = df["Timestamp"].unique()
|
| 1162 |
+
index = MultiIndex(
|
| 1163 |
+
levels=[dates, timestamps, ["apple", "banana", "orange", "pear"]],
|
| 1164 |
+
codes=[[0, 1, 1, 2, 2, 3], range(6), [0, 0, 1, 2, 2, 3]],
|
| 1165 |
+
names=["Datetime", "Timestamp", "Food"],
|
| 1166 |
+
)
|
| 1167 |
+
expected = Series(1, index=index, name="count")
|
| 1168 |
+
tm.assert_series_equal(result, expected)
|
| 1169 |
+
|
| 1170 |
+
|
| 1171 |
+
def test_value_counts_integer_columns():
|
| 1172 |
+
# GH#55627
|
| 1173 |
+
df = DataFrame({1: ["a", "a", "a"], 2: ["a", "a", "d"], 3: ["a", "b", "c"]})
|
| 1174 |
+
gp = df.groupby([1, 2], as_index=False, sort=False)
|
| 1175 |
+
result = gp[3].value_counts()
|
| 1176 |
+
expected = DataFrame(
|
| 1177 |
+
{1: ["a", "a", "a"], 2: ["a", "a", "d"], 3: ["a", "b", "c"], "count": 1}
|
| 1178 |
+
)
|
| 1179 |
+
tm.assert_frame_equal(result, expected)
|
| 1180 |
+
|
| 1181 |
+
|
| 1182 |
+
@pytest.mark.parametrize("vc_sort", [True, False])
|
| 1183 |
+
@pytest.mark.parametrize("normalize", [True, False])
|
| 1184 |
+
def test_value_counts_sort(sort, vc_sort, normalize):
|
| 1185 |
+
# GH#55951
|
| 1186 |
+
df = DataFrame({"a": [2, 1, 1, 1], 0: [3, 4, 3, 3]})
|
| 1187 |
+
gb = df.groupby("a", sort=sort)
|
| 1188 |
+
result = gb.value_counts(sort=vc_sort, normalize=normalize)
|
| 1189 |
+
|
| 1190 |
+
if normalize:
|
| 1191 |
+
values = [2 / 3, 1 / 3, 1.0]
|
| 1192 |
+
else:
|
| 1193 |
+
values = [2, 1, 1]
|
| 1194 |
+
index = MultiIndex(
|
| 1195 |
+
levels=[[1, 2], [3, 4]], codes=[[0, 0, 1], [0, 1, 0]], names=["a", 0]
|
| 1196 |
+
)
|
| 1197 |
+
expected = Series(values, index=index, name="proportion" if normalize else "count")
|
| 1198 |
+
if sort and vc_sort:
|
| 1199 |
+
taker = [0, 1, 2]
|
| 1200 |
+
elif sort and not vc_sort:
|
| 1201 |
+
taker = [0, 1, 2]
|
| 1202 |
+
elif not sort and vc_sort:
|
| 1203 |
+
taker = [0, 2, 1]
|
| 1204 |
+
else:
|
| 1205 |
+
taker = [2, 1, 0]
|
| 1206 |
+
expected = expected.take(taker)
|
| 1207 |
+
|
| 1208 |
+
tm.assert_series_equal(result, expected)
|
| 1209 |
+
|
| 1210 |
+
|
| 1211 |
+
@pytest.mark.parametrize("vc_sort", [True, False])
|
| 1212 |
+
@pytest.mark.parametrize("normalize", [True, False])
|
| 1213 |
+
def test_value_counts_sort_categorical(sort, vc_sort, normalize):
|
| 1214 |
+
# GH#55951
|
| 1215 |
+
df = DataFrame({"a": [2, 1, 1, 1], 0: [3, 4, 3, 3]}, dtype="category")
|
| 1216 |
+
gb = df.groupby("a", sort=sort, observed=True)
|
| 1217 |
+
result = gb.value_counts(sort=vc_sort, normalize=normalize)
|
| 1218 |
+
|
| 1219 |
+
if normalize:
|
| 1220 |
+
values = [2 / 3, 1 / 3, 1.0, 0.0]
|
| 1221 |
+
else:
|
| 1222 |
+
values = [2, 1, 1, 0]
|
| 1223 |
+
name = "proportion" if normalize else "count"
|
| 1224 |
+
expected = DataFrame(
|
| 1225 |
+
{
|
| 1226 |
+
"a": Categorical([1, 1, 2, 2]),
|
| 1227 |
+
0: Categorical([3, 4, 3, 4]),
|
| 1228 |
+
name: values,
|
| 1229 |
+
}
|
| 1230 |
+
).set_index(["a", 0])[name]
|
| 1231 |
+
if sort and vc_sort:
|
| 1232 |
+
taker = [0, 1, 2, 3]
|
| 1233 |
+
elif sort and not vc_sort:
|
| 1234 |
+
taker = [0, 1, 2, 3]
|
| 1235 |
+
elif not sort and vc_sort:
|
| 1236 |
+
taker = [0, 2, 1, 3]
|
| 1237 |
+
else:
|
| 1238 |
+
taker = [2, 3, 0, 1]
|
| 1239 |
+
expected = expected.take(taker)
|
| 1240 |
+
|
| 1241 |
+
tm.assert_series_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/test_index_as_string.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import pandas._testing as tm
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@pytest.fixture(params=[["inner"], ["inner", "outer"]])
|
| 9 |
+
def frame(request):
|
| 10 |
+
levels = request.param
|
| 11 |
+
df = pd.DataFrame(
|
| 12 |
+
{
|
| 13 |
+
"outer": ["a", "a", "a", "b", "b", "b"],
|
| 14 |
+
"inner": [1, 2, 3, 1, 2, 3],
|
| 15 |
+
"A": np.arange(6),
|
| 16 |
+
"B": ["one", "one", "two", "two", "one", "one"],
|
| 17 |
+
}
|
| 18 |
+
)
|
| 19 |
+
if levels:
|
| 20 |
+
df = df.set_index(levels)
|
| 21 |
+
|
| 22 |
+
return df
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@pytest.fixture()
|
| 26 |
+
def series():
|
| 27 |
+
df = pd.DataFrame(
|
| 28 |
+
{
|
| 29 |
+
"outer": ["a", "a", "a", "b", "b", "b"],
|
| 30 |
+
"inner": [1, 2, 3, 1, 2, 3],
|
| 31 |
+
"A": np.arange(6),
|
| 32 |
+
"B": ["one", "one", "two", "two", "one", "one"],
|
| 33 |
+
}
|
| 34 |
+
)
|
| 35 |
+
s = df.set_index(["outer", "inner", "B"])["A"]
|
| 36 |
+
|
| 37 |
+
return s
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@pytest.mark.parametrize(
|
| 41 |
+
"key_strs,groupers",
|
| 42 |
+
[
|
| 43 |
+
("inner", pd.Grouper(level="inner")), # Index name
|
| 44 |
+
(["inner"], [pd.Grouper(level="inner")]), # List of index name
|
| 45 |
+
(["B", "inner"], ["B", pd.Grouper(level="inner")]), # Column and index
|
| 46 |
+
(["inner", "B"], [pd.Grouper(level="inner"), "B"]), # Index and column
|
| 47 |
+
],
|
| 48 |
+
)
|
| 49 |
+
def test_grouper_index_level_as_string(frame, key_strs, groupers):
|
| 50 |
+
if "B" not in key_strs or "outer" in frame.columns:
|
| 51 |
+
result = frame.groupby(key_strs).mean(numeric_only=True)
|
| 52 |
+
expected = frame.groupby(groupers).mean(numeric_only=True)
|
| 53 |
+
else:
|
| 54 |
+
result = frame.groupby(key_strs).mean()
|
| 55 |
+
expected = frame.groupby(groupers).mean()
|
| 56 |
+
tm.assert_frame_equal(result, expected)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@pytest.mark.parametrize(
|
| 60 |
+
"levels",
|
| 61 |
+
[
|
| 62 |
+
"inner",
|
| 63 |
+
"outer",
|
| 64 |
+
"B",
|
| 65 |
+
["inner"],
|
| 66 |
+
["outer"],
|
| 67 |
+
["B"],
|
| 68 |
+
["inner", "outer"],
|
| 69 |
+
["outer", "inner"],
|
| 70 |
+
["inner", "outer", "B"],
|
| 71 |
+
["B", "outer", "inner"],
|
| 72 |
+
],
|
| 73 |
+
)
|
| 74 |
+
def test_grouper_index_level_as_string_series(series, levels):
|
| 75 |
+
# Compute expected result
|
| 76 |
+
if isinstance(levels, list):
|
| 77 |
+
groupers = [pd.Grouper(level=lv) for lv in levels]
|
| 78 |
+
else:
|
| 79 |
+
groupers = pd.Grouper(level=levels)
|
| 80 |
+
|
| 81 |
+
expected = series.groupby(groupers).mean()
|
| 82 |
+
|
| 83 |
+
# Compute and check result
|
| 84 |
+
result = series.groupby(levels).mean()
|
| 85 |
+
tm.assert_series_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/groupby/test_raises.py
ADDED
|
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Only tests that raise an error and have no better location should go here.
|
| 2 |
+
# Tests for specific groupby methods should go in their respective
|
| 3 |
+
# test file.
|
| 4 |
+
|
| 5 |
+
import datetime
|
| 6 |
+
import re
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pytest
|
| 10 |
+
|
| 11 |
+
from pandas import (
|
| 12 |
+
Categorical,
|
| 13 |
+
DataFrame,
|
| 14 |
+
Grouper,
|
| 15 |
+
Series,
|
| 16 |
+
)
|
| 17 |
+
import pandas._testing as tm
|
| 18 |
+
from pandas.tests.groupby import get_groupby_method_args
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@pytest.fixture(
|
| 22 |
+
params=[
|
| 23 |
+
"a",
|
| 24 |
+
["a"],
|
| 25 |
+
["a", "b"],
|
| 26 |
+
Grouper(key="a"),
|
| 27 |
+
lambda x: x % 2,
|
| 28 |
+
[0, 0, 0, 1, 2, 2, 2, 3, 3],
|
| 29 |
+
np.array([0, 0, 0, 1, 2, 2, 2, 3, 3]),
|
| 30 |
+
dict(zip(range(9), [0, 0, 0, 1, 2, 2, 2, 3, 3])),
|
| 31 |
+
Series([1, 1, 1, 1, 1, 2, 2, 2, 2]),
|
| 32 |
+
[Series([1, 1, 1, 1, 1, 2, 2, 2, 2]), Series([3, 3, 4, 4, 4, 4, 4, 3, 3])],
|
| 33 |
+
]
|
| 34 |
+
)
|
| 35 |
+
def by(request):
|
| 36 |
+
return request.param
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@pytest.fixture(params=[True, False])
|
| 40 |
+
def groupby_series(request):
|
| 41 |
+
return request.param
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@pytest.fixture
|
| 45 |
+
def df_with_string_col():
|
| 46 |
+
df = DataFrame(
|
| 47 |
+
{
|
| 48 |
+
"a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
|
| 49 |
+
"b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
|
| 50 |
+
"c": range(9),
|
| 51 |
+
"d": list("xyzwtyuio"),
|
| 52 |
+
}
|
| 53 |
+
)
|
| 54 |
+
return df
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@pytest.fixture
|
| 58 |
+
def df_with_datetime_col():
|
| 59 |
+
df = DataFrame(
|
| 60 |
+
{
|
| 61 |
+
"a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
|
| 62 |
+
"b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
|
| 63 |
+
"c": range(9),
|
| 64 |
+
"d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
|
| 65 |
+
}
|
| 66 |
+
)
|
| 67 |
+
return df
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@pytest.fixture
|
| 71 |
+
def df_with_timedelta_col():
|
| 72 |
+
df = DataFrame(
|
| 73 |
+
{
|
| 74 |
+
"a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
|
| 75 |
+
"b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
|
| 76 |
+
"c": range(9),
|
| 77 |
+
"d": datetime.timedelta(days=1),
|
| 78 |
+
}
|
| 79 |
+
)
|
| 80 |
+
return df
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@pytest.fixture
|
| 84 |
+
def df_with_cat_col():
|
| 85 |
+
df = DataFrame(
|
| 86 |
+
{
|
| 87 |
+
"a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
|
| 88 |
+
"b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
|
| 89 |
+
"c": range(9),
|
| 90 |
+
"d": Categorical(
|
| 91 |
+
["a", "a", "a", "a", "b", "b", "b", "b", "c"],
|
| 92 |
+
categories=["a", "b", "c", "d"],
|
| 93 |
+
ordered=True,
|
| 94 |
+
),
|
| 95 |
+
}
|
| 96 |
+
)
|
| 97 |
+
return df
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg=""):
|
| 101 |
+
warn_klass = None if warn_msg == "" else FutureWarning
|
| 102 |
+
with tm.assert_produces_warning(warn_klass, match=warn_msg):
|
| 103 |
+
if klass is None:
|
| 104 |
+
if how == "method":
|
| 105 |
+
getattr(gb, groupby_func)(*args)
|
| 106 |
+
elif how == "agg":
|
| 107 |
+
gb.agg(groupby_func, *args)
|
| 108 |
+
else:
|
| 109 |
+
gb.transform(groupby_func, *args)
|
| 110 |
+
else:
|
| 111 |
+
with pytest.raises(klass, match=msg):
|
| 112 |
+
if how == "method":
|
| 113 |
+
getattr(gb, groupby_func)(*args)
|
| 114 |
+
elif how == "agg":
|
| 115 |
+
gb.agg(groupby_func, *args)
|
| 116 |
+
else:
|
| 117 |
+
gb.transform(groupby_func, *args)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@pytest.mark.parametrize("how", ["method", "agg", "transform"])
|
| 121 |
+
def test_groupby_raises_string(
|
| 122 |
+
how, by, groupby_series, groupby_func, df_with_string_col
|
| 123 |
+
):
|
| 124 |
+
df = df_with_string_col
|
| 125 |
+
args = get_groupby_method_args(groupby_func, df)
|
| 126 |
+
gb = df.groupby(by=by)
|
| 127 |
+
|
| 128 |
+
if groupby_series:
|
| 129 |
+
gb = gb["d"]
|
| 130 |
+
|
| 131 |
+
if groupby_func == "corrwith":
|
| 132 |
+
assert not hasattr(gb, "corrwith")
|
| 133 |
+
return
|
| 134 |
+
|
| 135 |
+
klass, msg = {
|
| 136 |
+
"all": (None, ""),
|
| 137 |
+
"any": (None, ""),
|
| 138 |
+
"bfill": (None, ""),
|
| 139 |
+
"corrwith": (TypeError, "Could not convert"),
|
| 140 |
+
"count": (None, ""),
|
| 141 |
+
"cumcount": (None, ""),
|
| 142 |
+
"cummax": (
|
| 143 |
+
(NotImplementedError, TypeError),
|
| 144 |
+
"(function|cummax) is not (implemented|supported) for (this|object) dtype",
|
| 145 |
+
),
|
| 146 |
+
"cummin": (
|
| 147 |
+
(NotImplementedError, TypeError),
|
| 148 |
+
"(function|cummin) is not (implemented|supported) for (this|object) dtype",
|
| 149 |
+
),
|
| 150 |
+
"cumprod": (
|
| 151 |
+
(NotImplementedError, TypeError),
|
| 152 |
+
"(function|cumprod) is not (implemented|supported) for (this|object) dtype",
|
| 153 |
+
),
|
| 154 |
+
"cumsum": (
|
| 155 |
+
(NotImplementedError, TypeError),
|
| 156 |
+
"(function|cumsum) is not (implemented|supported) for (this|object) dtype",
|
| 157 |
+
),
|
| 158 |
+
"diff": (TypeError, "unsupported operand type"),
|
| 159 |
+
"ffill": (None, ""),
|
| 160 |
+
"fillna": (None, ""),
|
| 161 |
+
"first": (None, ""),
|
| 162 |
+
"idxmax": (None, ""),
|
| 163 |
+
"idxmin": (None, ""),
|
| 164 |
+
"last": (None, ""),
|
| 165 |
+
"max": (None, ""),
|
| 166 |
+
"mean": (
|
| 167 |
+
TypeError,
|
| 168 |
+
re.escape("agg function failed [how->mean,dtype->object]"),
|
| 169 |
+
),
|
| 170 |
+
"median": (
|
| 171 |
+
TypeError,
|
| 172 |
+
re.escape("agg function failed [how->median,dtype->object]"),
|
| 173 |
+
),
|
| 174 |
+
"min": (None, ""),
|
| 175 |
+
"ngroup": (None, ""),
|
| 176 |
+
"nunique": (None, ""),
|
| 177 |
+
"pct_change": (TypeError, "unsupported operand type"),
|
| 178 |
+
"prod": (
|
| 179 |
+
TypeError,
|
| 180 |
+
re.escape("agg function failed [how->prod,dtype->object]"),
|
| 181 |
+
),
|
| 182 |
+
"quantile": (TypeError, "cannot be performed against 'object' dtypes!"),
|
| 183 |
+
"rank": (None, ""),
|
| 184 |
+
"sem": (ValueError, "could not convert string to float"),
|
| 185 |
+
"shift": (None, ""),
|
| 186 |
+
"size": (None, ""),
|
| 187 |
+
"skew": (ValueError, "could not convert string to float"),
|
| 188 |
+
"std": (ValueError, "could not convert string to float"),
|
| 189 |
+
"sum": (None, ""),
|
| 190 |
+
"var": (
|
| 191 |
+
TypeError,
|
| 192 |
+
re.escape("agg function failed [how->var,dtype->"),
|
| 193 |
+
),
|
| 194 |
+
}[groupby_func]
|
| 195 |
+
|
| 196 |
+
if groupby_func == "fillna":
|
| 197 |
+
kind = "Series" if groupby_series else "DataFrame"
|
| 198 |
+
warn_msg = f"{kind}GroupBy.fillna is deprecated"
|
| 199 |
+
else:
|
| 200 |
+
warn_msg = ""
|
| 201 |
+
_call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@pytest.mark.parametrize("how", ["agg", "transform"])
|
| 205 |
+
def test_groupby_raises_string_udf(how, by, groupby_series, df_with_string_col):
|
| 206 |
+
df = df_with_string_col
|
| 207 |
+
gb = df.groupby(by=by)
|
| 208 |
+
|
| 209 |
+
if groupby_series:
|
| 210 |
+
gb = gb["d"]
|
| 211 |
+
|
| 212 |
+
def func(x):
|
| 213 |
+
raise TypeError("Test error message")
|
| 214 |
+
|
| 215 |
+
with pytest.raises(TypeError, match="Test error message"):
|
| 216 |
+
getattr(gb, how)(func)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
@pytest.mark.parametrize("how", ["agg", "transform"])
|
| 220 |
+
@pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])
|
| 221 |
+
def test_groupby_raises_string_np(
|
| 222 |
+
how, by, groupby_series, groupby_func_np, df_with_string_col
|
| 223 |
+
):
|
| 224 |
+
# GH#50749
|
| 225 |
+
df = df_with_string_col
|
| 226 |
+
gb = df.groupby(by=by)
|
| 227 |
+
|
| 228 |
+
if groupby_series:
|
| 229 |
+
gb = gb["d"]
|
| 230 |
+
|
| 231 |
+
klass, msg = {
|
| 232 |
+
np.sum: (None, ""),
|
| 233 |
+
np.mean: (
|
| 234 |
+
TypeError,
|
| 235 |
+
re.escape("agg function failed [how->mean,dtype->object]"),
|
| 236 |
+
),
|
| 237 |
+
}[groupby_func_np]
|
| 238 |
+
|
| 239 |
+
if groupby_series:
|
| 240 |
+
warn_msg = "using SeriesGroupBy.[sum|mean]"
|
| 241 |
+
else:
|
| 242 |
+
warn_msg = "using DataFrameGroupBy.[sum|mean]"
|
| 243 |
+
_call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
@pytest.mark.parametrize("how", ["method", "agg", "transform"])
|
| 247 |
+
def test_groupby_raises_datetime(
|
| 248 |
+
how, by, groupby_series, groupby_func, df_with_datetime_col
|
| 249 |
+
):
|
| 250 |
+
df = df_with_datetime_col
|
| 251 |
+
args = get_groupby_method_args(groupby_func, df)
|
| 252 |
+
gb = df.groupby(by=by)
|
| 253 |
+
|
| 254 |
+
if groupby_series:
|
| 255 |
+
gb = gb["d"]
|
| 256 |
+
|
| 257 |
+
if groupby_func == "corrwith":
|
| 258 |
+
assert not hasattr(gb, "corrwith")
|
| 259 |
+
return
|
| 260 |
+
|
| 261 |
+
klass, msg = {
|
| 262 |
+
"all": (None, ""),
|
| 263 |
+
"any": (None, ""),
|
| 264 |
+
"bfill": (None, ""),
|
| 265 |
+
"corrwith": (TypeError, "cannot perform __mul__ with this index type"),
|
| 266 |
+
"count": (None, ""),
|
| 267 |
+
"cumcount": (None, ""),
|
| 268 |
+
"cummax": (None, ""),
|
| 269 |
+
"cummin": (None, ""),
|
| 270 |
+
"cumprod": (TypeError, "datetime64 type does not support cumprod operations"),
|
| 271 |
+
"cumsum": (TypeError, "datetime64 type does not support cumsum operations"),
|
| 272 |
+
"diff": (None, ""),
|
| 273 |
+
"ffill": (None, ""),
|
| 274 |
+
"fillna": (None, ""),
|
| 275 |
+
"first": (None, ""),
|
| 276 |
+
"idxmax": (None, ""),
|
| 277 |
+
"idxmin": (None, ""),
|
| 278 |
+
"last": (None, ""),
|
| 279 |
+
"max": (None, ""),
|
| 280 |
+
"mean": (None, ""),
|
| 281 |
+
"median": (None, ""),
|
| 282 |
+
"min": (None, ""),
|
| 283 |
+
"ngroup": (None, ""),
|
| 284 |
+
"nunique": (None, ""),
|
| 285 |
+
"pct_change": (TypeError, "cannot perform __truediv__ with this index type"),
|
| 286 |
+
"prod": (TypeError, "datetime64 type does not support prod"),
|
| 287 |
+
"quantile": (None, ""),
|
| 288 |
+
"rank": (None, ""),
|
| 289 |
+
"sem": (None, ""),
|
| 290 |
+
"shift": (None, ""),
|
| 291 |
+
"size": (None, ""),
|
| 292 |
+
"skew": (
|
| 293 |
+
TypeError,
|
| 294 |
+
"|".join(
|
| 295 |
+
[
|
| 296 |
+
r"dtype datetime64\[ns\] does not support reduction",
|
| 297 |
+
"datetime64 type does not support skew operations",
|
| 298 |
+
]
|
| 299 |
+
),
|
| 300 |
+
),
|
| 301 |
+
"std": (None, ""),
|
| 302 |
+
"sum": (TypeError, "datetime64 type does not support sum operations"),
|
| 303 |
+
"var": (TypeError, "datetime64 type does not support var operations"),
|
| 304 |
+
}[groupby_func]
|
| 305 |
+
|
| 306 |
+
if groupby_func in ["any", "all"]:
|
| 307 |
+
warn_msg = f"'{groupby_func}' with datetime64 dtypes is deprecated"
|
| 308 |
+
elif groupby_func == "fillna":
|
| 309 |
+
kind = "Series" if groupby_series else "DataFrame"
|
| 310 |
+
warn_msg = f"{kind}GroupBy.fillna is deprecated"
|
| 311 |
+
else:
|
| 312 |
+
warn_msg = ""
|
| 313 |
+
_call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg=warn_msg)
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
@pytest.mark.parametrize("how", ["agg", "transform"])
|
| 317 |
+
def test_groupby_raises_datetime_udf(how, by, groupby_series, df_with_datetime_col):
|
| 318 |
+
df = df_with_datetime_col
|
| 319 |
+
gb = df.groupby(by=by)
|
| 320 |
+
|
| 321 |
+
if groupby_series:
|
| 322 |
+
gb = gb["d"]
|
| 323 |
+
|
| 324 |
+
def func(x):
|
| 325 |
+
raise TypeError("Test error message")
|
| 326 |
+
|
| 327 |
+
with pytest.raises(TypeError, match="Test error message"):
|
| 328 |
+
getattr(gb, how)(func)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
@pytest.mark.parametrize("how", ["agg", "transform"])
|
| 332 |
+
@pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])
|
| 333 |
+
def test_groupby_raises_datetime_np(
|
| 334 |
+
how, by, groupby_series, groupby_func_np, df_with_datetime_col
|
| 335 |
+
):
|
| 336 |
+
# GH#50749
|
| 337 |
+
df = df_with_datetime_col
|
| 338 |
+
gb = df.groupby(by=by)
|
| 339 |
+
|
| 340 |
+
if groupby_series:
|
| 341 |
+
gb = gb["d"]
|
| 342 |
+
|
| 343 |
+
klass, msg = {
|
| 344 |
+
np.sum: (TypeError, "datetime64 type does not support sum operations"),
|
| 345 |
+
np.mean: (None, ""),
|
| 346 |
+
}[groupby_func_np]
|
| 347 |
+
|
| 348 |
+
if groupby_series:
|
| 349 |
+
warn_msg = "using SeriesGroupBy.[sum|mean]"
|
| 350 |
+
else:
|
| 351 |
+
warn_msg = "using DataFrameGroupBy.[sum|mean]"
|
| 352 |
+
_call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg)
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
@pytest.mark.parametrize("func", ["prod", "cumprod", "skew", "var"])
|
| 356 |
+
def test_groupby_raises_timedelta(func, df_with_timedelta_col):
|
| 357 |
+
df = df_with_timedelta_col
|
| 358 |
+
gb = df.groupby(by="a")
|
| 359 |
+
|
| 360 |
+
_call_and_check(
|
| 361 |
+
TypeError,
|
| 362 |
+
"timedelta64 type does not support .* operations",
|
| 363 |
+
"method",
|
| 364 |
+
gb,
|
| 365 |
+
func,
|
| 366 |
+
[],
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
@pytest.mark.parametrize("how", ["method", "agg", "transform"])
|
| 371 |
+
def test_groupby_raises_category(
|
| 372 |
+
how, by, groupby_series, groupby_func, using_copy_on_write, df_with_cat_col
|
| 373 |
+
):
|
| 374 |
+
# GH#50749
|
| 375 |
+
df = df_with_cat_col
|
| 376 |
+
args = get_groupby_method_args(groupby_func, df)
|
| 377 |
+
gb = df.groupby(by=by)
|
| 378 |
+
|
| 379 |
+
if groupby_series:
|
| 380 |
+
gb = gb["d"]
|
| 381 |
+
|
| 382 |
+
if groupby_func == "corrwith":
|
| 383 |
+
assert not hasattr(gb, "corrwith")
|
| 384 |
+
return
|
| 385 |
+
|
| 386 |
+
klass, msg = {
|
| 387 |
+
"all": (None, ""),
|
| 388 |
+
"any": (None, ""),
|
| 389 |
+
"bfill": (None, ""),
|
| 390 |
+
"corrwith": (
|
| 391 |
+
TypeError,
|
| 392 |
+
r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'",
|
| 393 |
+
),
|
| 394 |
+
"count": (None, ""),
|
| 395 |
+
"cumcount": (None, ""),
|
| 396 |
+
"cummax": (
|
| 397 |
+
(NotImplementedError, TypeError),
|
| 398 |
+
"(category type does not support cummax operations|"
|
| 399 |
+
"category dtype not supported|"
|
| 400 |
+
"cummax is not supported for category dtype)",
|
| 401 |
+
),
|
| 402 |
+
"cummin": (
|
| 403 |
+
(NotImplementedError, TypeError),
|
| 404 |
+
"(category type does not support cummin operations|"
|
| 405 |
+
"category dtype not supported|"
|
| 406 |
+
"cummin is not supported for category dtype)",
|
| 407 |
+
),
|
| 408 |
+
"cumprod": (
|
| 409 |
+
(NotImplementedError, TypeError),
|
| 410 |
+
"(category type does not support cumprod operations|"
|
| 411 |
+
"category dtype not supported|"
|
| 412 |
+
"cumprod is not supported for category dtype)",
|
| 413 |
+
),
|
| 414 |
+
"cumsum": (
|
| 415 |
+
(NotImplementedError, TypeError),
|
| 416 |
+
"(category type does not support cumsum operations|"
|
| 417 |
+
"category dtype not supported|"
|
| 418 |
+
"cumsum is not supported for category dtype)",
|
| 419 |
+
),
|
| 420 |
+
"diff": (
|
| 421 |
+
TypeError,
|
| 422 |
+
r"unsupported operand type\(s\) for -: 'Categorical' and 'Categorical'",
|
| 423 |
+
),
|
| 424 |
+
"ffill": (None, ""),
|
| 425 |
+
"fillna": (
|
| 426 |
+
TypeError,
|
| 427 |
+
r"Cannot setitem on a Categorical with a new category \(0\), "
|
| 428 |
+
"set the categories first",
|
| 429 |
+
)
|
| 430 |
+
if not using_copy_on_write
|
| 431 |
+
else (None, ""), # no-op with CoW
|
| 432 |
+
"first": (None, ""),
|
| 433 |
+
"idxmax": (None, ""),
|
| 434 |
+
"idxmin": (None, ""),
|
| 435 |
+
"last": (None, ""),
|
| 436 |
+
"max": (None, ""),
|
| 437 |
+
"mean": (
|
| 438 |
+
TypeError,
|
| 439 |
+
"|".join(
|
| 440 |
+
[
|
| 441 |
+
"'Categorical' .* does not support reduction 'mean'",
|
| 442 |
+
"category dtype does not support aggregation 'mean'",
|
| 443 |
+
]
|
| 444 |
+
),
|
| 445 |
+
),
|
| 446 |
+
"median": (
|
| 447 |
+
TypeError,
|
| 448 |
+
"|".join(
|
| 449 |
+
[
|
| 450 |
+
"'Categorical' .* does not support reduction 'median'",
|
| 451 |
+
"category dtype does not support aggregation 'median'",
|
| 452 |
+
]
|
| 453 |
+
),
|
| 454 |
+
),
|
| 455 |
+
"min": (None, ""),
|
| 456 |
+
"ngroup": (None, ""),
|
| 457 |
+
"nunique": (None, ""),
|
| 458 |
+
"pct_change": (
|
| 459 |
+
TypeError,
|
| 460 |
+
r"unsupported operand type\(s\) for /: 'Categorical' and 'Categorical'",
|
| 461 |
+
),
|
| 462 |
+
"prod": (TypeError, "category type does not support prod operations"),
|
| 463 |
+
"quantile": (TypeError, "No matching signature found"),
|
| 464 |
+
"rank": (None, ""),
|
| 465 |
+
"sem": (
|
| 466 |
+
TypeError,
|
| 467 |
+
"|".join(
|
| 468 |
+
[
|
| 469 |
+
"'Categorical' .* does not support reduction 'sem'",
|
| 470 |
+
"category dtype does not support aggregation 'sem'",
|
| 471 |
+
]
|
| 472 |
+
),
|
| 473 |
+
),
|
| 474 |
+
"shift": (None, ""),
|
| 475 |
+
"size": (None, ""),
|
| 476 |
+
"skew": (
|
| 477 |
+
TypeError,
|
| 478 |
+
"|".join(
|
| 479 |
+
[
|
| 480 |
+
"dtype category does not support reduction 'skew'",
|
| 481 |
+
"category type does not support skew operations",
|
| 482 |
+
]
|
| 483 |
+
),
|
| 484 |
+
),
|
| 485 |
+
"std": (
|
| 486 |
+
TypeError,
|
| 487 |
+
"|".join(
|
| 488 |
+
[
|
| 489 |
+
"'Categorical' .* does not support reduction 'std'",
|
| 490 |
+
"category dtype does not support aggregation 'std'",
|
| 491 |
+
]
|
| 492 |
+
),
|
| 493 |
+
),
|
| 494 |
+
"sum": (TypeError, "category type does not support sum operations"),
|
| 495 |
+
"var": (
|
| 496 |
+
TypeError,
|
| 497 |
+
"|".join(
|
| 498 |
+
[
|
| 499 |
+
"'Categorical' .* does not support reduction 'var'",
|
| 500 |
+
"category dtype does not support aggregation 'var'",
|
| 501 |
+
]
|
| 502 |
+
),
|
| 503 |
+
),
|
| 504 |
+
}[groupby_func]
|
| 505 |
+
|
| 506 |
+
if groupby_func == "fillna":
|
| 507 |
+
kind = "Series" if groupby_series else "DataFrame"
|
| 508 |
+
warn_msg = f"{kind}GroupBy.fillna is deprecated"
|
| 509 |
+
else:
|
| 510 |
+
warn_msg = ""
|
| 511 |
+
_call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg)
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
@pytest.mark.parametrize("how", ["agg", "transform"])
|
| 515 |
+
def test_groupby_raises_category_udf(how, by, groupby_series, df_with_cat_col):
|
| 516 |
+
# GH#50749
|
| 517 |
+
df = df_with_cat_col
|
| 518 |
+
gb = df.groupby(by=by)
|
| 519 |
+
|
| 520 |
+
if groupby_series:
|
| 521 |
+
gb = gb["d"]
|
| 522 |
+
|
| 523 |
+
def func(x):
|
| 524 |
+
raise TypeError("Test error message")
|
| 525 |
+
|
| 526 |
+
with pytest.raises(TypeError, match="Test error message"):
|
| 527 |
+
getattr(gb, how)(func)
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
@pytest.mark.parametrize("how", ["agg", "transform"])
|
| 531 |
+
@pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])
|
| 532 |
+
def test_groupby_raises_category_np(
|
| 533 |
+
how, by, groupby_series, groupby_func_np, df_with_cat_col
|
| 534 |
+
):
|
| 535 |
+
# GH#50749
|
| 536 |
+
df = df_with_cat_col
|
| 537 |
+
gb = df.groupby(by=by)
|
| 538 |
+
|
| 539 |
+
if groupby_series:
|
| 540 |
+
gb = gb["d"]
|
| 541 |
+
|
| 542 |
+
klass, msg = {
|
| 543 |
+
np.sum: (TypeError, "category type does not support sum operations"),
|
| 544 |
+
np.mean: (
|
| 545 |
+
TypeError,
|
| 546 |
+
"category dtype does not support aggregation 'mean'",
|
| 547 |
+
),
|
| 548 |
+
}[groupby_func_np]
|
| 549 |
+
|
| 550 |
+
if groupby_series:
|
| 551 |
+
warn_msg = "using SeriesGroupBy.[sum|mean]"
|
| 552 |
+
else:
|
| 553 |
+
warn_msg = "using DataFrameGroupBy.[sum|mean]"
|
| 554 |
+
_call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg)
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
@pytest.mark.parametrize("how", ["method", "agg", "transform"])
|
| 558 |
+
def test_groupby_raises_category_on_category(
|
| 559 |
+
how,
|
| 560 |
+
by,
|
| 561 |
+
groupby_series,
|
| 562 |
+
groupby_func,
|
| 563 |
+
observed,
|
| 564 |
+
using_copy_on_write,
|
| 565 |
+
df_with_cat_col,
|
| 566 |
+
):
|
| 567 |
+
# GH#50749
|
| 568 |
+
df = df_with_cat_col
|
| 569 |
+
df["a"] = Categorical(
|
| 570 |
+
["a", "a", "a", "a", "b", "b", "b", "b", "c"],
|
| 571 |
+
categories=["a", "b", "c", "d"],
|
| 572 |
+
ordered=True,
|
| 573 |
+
)
|
| 574 |
+
args = get_groupby_method_args(groupby_func, df)
|
| 575 |
+
gb = df.groupby(by=by, observed=observed)
|
| 576 |
+
|
| 577 |
+
if groupby_series:
|
| 578 |
+
gb = gb["d"]
|
| 579 |
+
|
| 580 |
+
if groupby_func == "corrwith":
|
| 581 |
+
assert not hasattr(gb, "corrwith")
|
| 582 |
+
return
|
| 583 |
+
|
| 584 |
+
empty_groups = not observed and any(group.empty for group in gb.groups.values())
|
| 585 |
+
if (
|
| 586 |
+
not observed
|
| 587 |
+
and how != "transform"
|
| 588 |
+
and isinstance(by, list)
|
| 589 |
+
and isinstance(by[0], str)
|
| 590 |
+
and by == ["a", "b"]
|
| 591 |
+
):
|
| 592 |
+
assert not empty_groups
|
| 593 |
+
# TODO: empty_groups should be true due to unobserved categorical combinations
|
| 594 |
+
empty_groups = True
|
| 595 |
+
if how == "transform":
|
| 596 |
+
# empty groups will be ignored
|
| 597 |
+
empty_groups = False
|
| 598 |
+
|
| 599 |
+
klass, msg = {
|
| 600 |
+
"all": (None, ""),
|
| 601 |
+
"any": (None, ""),
|
| 602 |
+
"bfill": (None, ""),
|
| 603 |
+
"corrwith": (
|
| 604 |
+
TypeError,
|
| 605 |
+
r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'",
|
| 606 |
+
),
|
| 607 |
+
"count": (None, ""),
|
| 608 |
+
"cumcount": (None, ""),
|
| 609 |
+
"cummax": (
|
| 610 |
+
(NotImplementedError, TypeError),
|
| 611 |
+
"(cummax is not supported for category dtype|"
|
| 612 |
+
"category dtype not supported|"
|
| 613 |
+
"category type does not support cummax operations)",
|
| 614 |
+
),
|
| 615 |
+
"cummin": (
|
| 616 |
+
(NotImplementedError, TypeError),
|
| 617 |
+
"(cummin is not supported for category dtype|"
|
| 618 |
+
"category dtype not supported|"
|
| 619 |
+
"category type does not support cummin operations)",
|
| 620 |
+
),
|
| 621 |
+
"cumprod": (
|
| 622 |
+
(NotImplementedError, TypeError),
|
| 623 |
+
"(cumprod is not supported for category dtype|"
|
| 624 |
+
"category dtype not supported|"
|
| 625 |
+
"category type does not support cumprod operations)",
|
| 626 |
+
),
|
| 627 |
+
"cumsum": (
|
| 628 |
+
(NotImplementedError, TypeError),
|
| 629 |
+
"(cumsum is not supported for category dtype|"
|
| 630 |
+
"category dtype not supported|"
|
| 631 |
+
"category type does not support cumsum operations)",
|
| 632 |
+
),
|
| 633 |
+
"diff": (TypeError, "unsupported operand type"),
|
| 634 |
+
"ffill": (None, ""),
|
| 635 |
+
"fillna": (
|
| 636 |
+
TypeError,
|
| 637 |
+
r"Cannot setitem on a Categorical with a new category \(0\), "
|
| 638 |
+
"set the categories first",
|
| 639 |
+
)
|
| 640 |
+
if not using_copy_on_write
|
| 641 |
+
else (None, ""), # no-op with CoW
|
| 642 |
+
"first": (None, ""),
|
| 643 |
+
"idxmax": (ValueError, "empty group due to unobserved categories")
|
| 644 |
+
if empty_groups
|
| 645 |
+
else (None, ""),
|
| 646 |
+
"idxmin": (ValueError, "empty group due to unobserved categories")
|
| 647 |
+
if empty_groups
|
| 648 |
+
else (None, ""),
|
| 649 |
+
"last": (None, ""),
|
| 650 |
+
"max": (None, ""),
|
| 651 |
+
"mean": (TypeError, "category dtype does not support aggregation 'mean'"),
|
| 652 |
+
"median": (TypeError, "category dtype does not support aggregation 'median'"),
|
| 653 |
+
"min": (None, ""),
|
| 654 |
+
"ngroup": (None, ""),
|
| 655 |
+
"nunique": (None, ""),
|
| 656 |
+
"pct_change": (TypeError, "unsupported operand type"),
|
| 657 |
+
"prod": (TypeError, "category type does not support prod operations"),
|
| 658 |
+
"quantile": (TypeError, ""),
|
| 659 |
+
"rank": (None, ""),
|
| 660 |
+
"sem": (
|
| 661 |
+
TypeError,
|
| 662 |
+
"|".join(
|
| 663 |
+
[
|
| 664 |
+
"'Categorical' .* does not support reduction 'sem'",
|
| 665 |
+
"category dtype does not support aggregation 'sem'",
|
| 666 |
+
]
|
| 667 |
+
),
|
| 668 |
+
),
|
| 669 |
+
"shift": (None, ""),
|
| 670 |
+
"size": (None, ""),
|
| 671 |
+
"skew": (
|
| 672 |
+
TypeError,
|
| 673 |
+
"|".join(
|
| 674 |
+
[
|
| 675 |
+
"category type does not support skew operations",
|
| 676 |
+
"dtype category does not support reduction 'skew'",
|
| 677 |
+
]
|
| 678 |
+
),
|
| 679 |
+
),
|
| 680 |
+
"std": (
|
| 681 |
+
TypeError,
|
| 682 |
+
"|".join(
|
| 683 |
+
[
|
| 684 |
+
"'Categorical' .* does not support reduction 'std'",
|
| 685 |
+
"category dtype does not support aggregation 'std'",
|
| 686 |
+
]
|
| 687 |
+
),
|
| 688 |
+
),
|
| 689 |
+
"sum": (TypeError, "category type does not support sum operations"),
|
| 690 |
+
"var": (
|
| 691 |
+
TypeError,
|
| 692 |
+
"|".join(
|
| 693 |
+
[
|
| 694 |
+
"'Categorical' .* does not support reduction 'var'",
|
| 695 |
+
"category dtype does not support aggregation 'var'",
|
| 696 |
+
]
|
| 697 |
+
),
|
| 698 |
+
),
|
| 699 |
+
}[groupby_func]
|
| 700 |
+
|
| 701 |
+
if groupby_func == "fillna":
|
| 702 |
+
kind = "Series" if groupby_series else "DataFrame"
|
| 703 |
+
warn_msg = f"{kind}GroupBy.fillna is deprecated"
|
| 704 |
+
else:
|
| 705 |
+
warn_msg = ""
|
| 706 |
+
_call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg)
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
def test_subsetting_columns_axis_1_raises():
|
| 710 |
+
# GH 35443
|
| 711 |
+
df = DataFrame({"a": [1], "b": [2], "c": [3]})
|
| 712 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 713 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 714 |
+
gb = df.groupby("a", axis=1)
|
| 715 |
+
with pytest.raises(ValueError, match="Cannot subset columns when using axis=1"):
|
| 716 |
+
gb["b"]
|
parrot/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/feats.cpython-310.pyc
ADDED
|
Binary file (6.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/tensor_utils.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 AlQuraishi Laboratory
|
| 2 |
+
# Copyright 2021 DeepMind Technologies Limited
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
from functools import partial
|
| 17 |
+
from typing import Any, Callable, Dict, List, Type, TypeVar, Union, overload
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
import torch.nn as nn
|
| 21 |
+
import torch.types
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def add(m1: torch.Tensor, m2: torch.Tensor, inplace: bool) -> torch.Tensor:
|
| 25 |
+
# The first operation in a checkpoint can't be in-place, but it's
|
| 26 |
+
# nice to have in-place addition during inference. Thus...
|
| 27 |
+
if not inplace:
|
| 28 |
+
m1 = m1 + m2
|
| 29 |
+
else:
|
| 30 |
+
m1 += m2
|
| 31 |
+
|
| 32 |
+
return m1
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def permute_final_dims(tensor: torch.Tensor, inds: List[int]) -> torch.Tensor:
|
| 36 |
+
zero_index = -1 * len(inds)
|
| 37 |
+
first_inds = list(range(len(tensor.shape[:zero_index])))
|
| 38 |
+
return tensor.permute(first_inds + [zero_index + i for i in inds])
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def flatten_final_dims(t: torch.Tensor, no_dims: int) -> torch.Tensor:
|
| 42 |
+
return t.reshape(t.shape[:-no_dims] + (-1,))
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def masked_mean(mask: torch.Tensor, value: torch.Tensor, dim: int, eps: float = 1e-4) -> torch.Tensor:
|
| 46 |
+
mask = mask.expand(*value.shape)
|
| 47 |
+
return torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim))
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def pts_to_distogram(
|
| 51 |
+
pts: torch.Tensor, min_bin: torch.types.Number = 2.3125, max_bin: torch.types.Number = 21.6875, no_bins: int = 64
|
| 52 |
+
) -> torch.Tensor:
|
| 53 |
+
boundaries = torch.linspace(min_bin, max_bin, no_bins - 1, device=pts.device)
|
| 54 |
+
dists = torch.sqrt(torch.sum((pts.unsqueeze(-2) - pts.unsqueeze(-3)) ** 2, dim=-1))
|
| 55 |
+
return torch.bucketize(dists, boundaries)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def dict_multimap(fn: Callable[[list], Any], dicts: List[dict]) -> dict:
|
| 59 |
+
first = dicts[0]
|
| 60 |
+
new_dict = {}
|
| 61 |
+
for k, v in first.items():
|
| 62 |
+
all_v = [d[k] for d in dicts]
|
| 63 |
+
if isinstance(v, dict):
|
| 64 |
+
new_dict[k] = dict_multimap(fn, all_v)
|
| 65 |
+
else:
|
| 66 |
+
new_dict[k] = fn(all_v)
|
| 67 |
+
|
| 68 |
+
return new_dict
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def one_hot(x: torch.Tensor, v_bins: torch.Tensor) -> torch.Tensor:
|
| 72 |
+
reshaped_bins = v_bins.view(((1,) * len(x.shape)) + (len(v_bins),))
|
| 73 |
+
diffs = x[..., None] - reshaped_bins
|
| 74 |
+
am = torch.argmin(torch.abs(diffs), dim=-1)
|
| 75 |
+
return nn.functional.one_hot(am, num_classes=len(v_bins)).float()
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def batched_gather(data: torch.Tensor, inds: torch.Tensor, dim: int = 0, no_batch_dims: int = 0) -> torch.Tensor:
|
| 79 |
+
ranges: List[Union[slice, torch.Tensor]] = []
|
| 80 |
+
for i, s in enumerate(data.shape[:no_batch_dims]):
|
| 81 |
+
r = torch.arange(s)
|
| 82 |
+
r = r.view(*(*((1,) * i), -1, *((1,) * (len(inds.shape) - i - 1))))
|
| 83 |
+
ranges.append(r)
|
| 84 |
+
|
| 85 |
+
remaining_dims: List[Union[slice, torch.Tensor]] = [slice(None) for _ in range(len(data.shape) - no_batch_dims)]
|
| 86 |
+
remaining_dims[dim - no_batch_dims if dim >= 0 else dim] = inds
|
| 87 |
+
ranges.extend(remaining_dims)
|
| 88 |
+
# Matt note: Editing this to get around the behaviour of using a list as an array index changing
|
| 89 |
+
# in recent Numpy versions
|
| 90 |
+
return data[tuple(ranges)]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
T = TypeVar("T")
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# With tree_map, a poor man's JAX tree_map
|
| 97 |
+
def dict_map(
|
| 98 |
+
fn: Callable[[T], Any], dic: Dict[Any, Union[dict, list, tuple, T]], leaf_type: Type[T]
|
| 99 |
+
) -> Dict[Any, Union[dict, list, tuple, Any]]:
|
| 100 |
+
new_dict: Dict[Any, Union[dict, list, tuple, Any]] = {}
|
| 101 |
+
for k, v in dic.items():
|
| 102 |
+
if isinstance(v, dict):
|
| 103 |
+
new_dict[k] = dict_map(fn, v, leaf_type)
|
| 104 |
+
else:
|
| 105 |
+
new_dict[k] = tree_map(fn, v, leaf_type)
|
| 106 |
+
|
| 107 |
+
return new_dict
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@overload
|
| 111 |
+
def tree_map(fn: Callable[[T], Any], tree: T, leaf_type: Type[T]) -> Any:
|
| 112 |
+
...
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@overload
|
| 116 |
+
def tree_map(fn: Callable[[T], Any], tree: dict, leaf_type: Type[T]) -> dict:
|
| 117 |
+
...
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@overload
|
| 121 |
+
def tree_map(fn: Callable[[T], Any], tree: list, leaf_type: Type[T]) -> list:
|
| 122 |
+
...
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@overload
|
| 126 |
+
def tree_map(fn: Callable[[T], Any], tree: tuple, leaf_type: Type[T]) -> tuple:
|
| 127 |
+
...
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def tree_map(fn, tree, leaf_type):
|
| 131 |
+
if isinstance(tree, dict):
|
| 132 |
+
return dict_map(fn, tree, leaf_type)
|
| 133 |
+
elif isinstance(tree, list):
|
| 134 |
+
return [tree_map(fn, x, leaf_type) for x in tree]
|
| 135 |
+
elif isinstance(tree, tuple):
|
| 136 |
+
return tuple(tree_map(fn, x, leaf_type) for x in tree)
|
| 137 |
+
elif isinstance(tree, leaf_type):
|
| 138 |
+
return fn(tree)
|
| 139 |
+
else:
|
| 140 |
+
print(type(tree))
|
| 141 |
+
raise ValueError("Not supported")
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor)
|
parrot/lib/python3.10/site-packages/transformers/models/esm/tokenization_esm.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for ESM."""
|
| 16 |
+
import os
|
| 17 |
+
from typing import List, Optional
|
| 18 |
+
|
| 19 |
+
from ...tokenization_utils import PreTrainedTokenizer
|
| 20 |
+
from ...utils import logging
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__)
|
| 24 |
+
|
| 25 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def load_vocab_file(vocab_file):
|
| 29 |
+
with open(vocab_file, "r") as f:
|
| 30 |
+
lines = f.read().splitlines()
|
| 31 |
+
return [l.strip() for l in lines]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class EsmTokenizer(PreTrainedTokenizer):
|
| 35 |
+
"""
|
| 36 |
+
Constructs an ESM tokenizer.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 40 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 41 |
+
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
vocab_file,
|
| 45 |
+
unk_token="<unk>",
|
| 46 |
+
cls_token="<cls>",
|
| 47 |
+
pad_token="<pad>",
|
| 48 |
+
mask_token="<mask>",
|
| 49 |
+
eos_token="<eos>",
|
| 50 |
+
**kwargs,
|
| 51 |
+
):
|
| 52 |
+
self.all_tokens = load_vocab_file(vocab_file)
|
| 53 |
+
self._id_to_token = dict(enumerate(self.all_tokens))
|
| 54 |
+
self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)}
|
| 55 |
+
super().__init__(
|
| 56 |
+
unk_token=unk_token,
|
| 57 |
+
cls_token=cls_token,
|
| 58 |
+
pad_token=pad_token,
|
| 59 |
+
mask_token=mask_token,
|
| 60 |
+
eos_token=eos_token,
|
| 61 |
+
**kwargs,
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# TODO, all the tokens are added? But they are also part of the vocab... bit strange.
|
| 65 |
+
# none of them are special, but they all need special splitting.
|
| 66 |
+
|
| 67 |
+
self.unique_no_split_tokens = self.all_tokens
|
| 68 |
+
self._update_trie(self.unique_no_split_tokens)
|
| 69 |
+
|
| 70 |
+
def _convert_id_to_token(self, index: int) -> str:
|
| 71 |
+
return self._id_to_token.get(index, self.unk_token)
|
| 72 |
+
|
| 73 |
+
def _convert_token_to_id(self, token: str) -> int:
|
| 74 |
+
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
|
| 75 |
+
|
| 76 |
+
def _tokenize(self, text, **kwargs):
|
| 77 |
+
return text.split()
|
| 78 |
+
|
| 79 |
+
def get_vocab(self):
|
| 80 |
+
base_vocab = self._token_to_id.copy()
|
| 81 |
+
base_vocab.update(self.added_tokens_encoder)
|
| 82 |
+
return base_vocab
|
| 83 |
+
|
| 84 |
+
def token_to_id(self, token: str) -> int:
|
| 85 |
+
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
|
| 86 |
+
|
| 87 |
+
def id_to_token(self, index: int) -> str:
|
| 88 |
+
return self._id_to_token.get(index, self.unk_token)
|
| 89 |
+
|
| 90 |
+
def build_inputs_with_special_tokens(
|
| 91 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 92 |
+
) -> List[int]:
|
| 93 |
+
cls = [self.cls_token_id]
|
| 94 |
+
sep = [self.eos_token_id] # No sep token in ESM vocabulary
|
| 95 |
+
if token_ids_1 is None:
|
| 96 |
+
if self.eos_token_id is None:
|
| 97 |
+
return cls + token_ids_0
|
| 98 |
+
else:
|
| 99 |
+
return cls + token_ids_0 + sep
|
| 100 |
+
elif self.eos_token_id is None:
|
| 101 |
+
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!")
|
| 102 |
+
return cls + token_ids_0 + sep + token_ids_1 + sep # Multiple inputs always have an EOS token
|
| 103 |
+
|
| 104 |
+
def get_special_tokens_mask(
|
| 105 |
+
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
|
| 106 |
+
) -> List[int]:
|
| 107 |
+
"""
|
| 108 |
+
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
|
| 109 |
+
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
token_ids_0 (`List[int]`):
|
| 113 |
+
List of ids of the first sequence.
|
| 114 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 115 |
+
List of ids of the second sequence.
|
| 116 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
| 117 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
| 121 |
+
"""
|
| 122 |
+
if already_has_special_tokens:
|
| 123 |
+
if token_ids_1 is not None:
|
| 124 |
+
raise ValueError(
|
| 125 |
+
"You should not supply a second sequence if the provided sequence of "
|
| 126 |
+
"ids is already formatted with special tokens for the model."
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
return [1 if token in self.all_special_ids else 0 for token in token_ids_0]
|
| 130 |
+
mask = [1] + ([0] * len(token_ids_0)) + [1]
|
| 131 |
+
if token_ids_1 is not None:
|
| 132 |
+
mask += [0] * len(token_ids_1) + [1]
|
| 133 |
+
return mask
|
| 134 |
+
|
| 135 |
+
def save_vocabulary(self, save_directory, filename_prefix):
|
| 136 |
+
vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt")
|
| 137 |
+
with open(vocab_file, "w") as f:
|
| 138 |
+
f.write("\n".join(self.all_tokens))
|
| 139 |
+
return (vocab_file,)
|
| 140 |
+
|
| 141 |
+
@property
|
| 142 |
+
def vocab_size(self) -> int:
|
| 143 |
+
return len(self.all_tokens)
|
parrot/lib/python3.10/site-packages/transformers/models/hubert/__init__.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_import_structure = {"configuration_hubert": ["HubertConfig"]}
|
| 20 |
+
|
| 21 |
+
try:
|
| 22 |
+
if not is_torch_available():
|
| 23 |
+
raise OptionalDependencyNotAvailable()
|
| 24 |
+
except OptionalDependencyNotAvailable:
|
| 25 |
+
pass
|
| 26 |
+
else:
|
| 27 |
+
_import_structure["modeling_hubert"] = [
|
| 28 |
+
"HubertForCTC",
|
| 29 |
+
"HubertForSequenceClassification",
|
| 30 |
+
"HubertModel",
|
| 31 |
+
"HubertPreTrainedModel",
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
if not is_tf_available():
|
| 37 |
+
raise OptionalDependencyNotAvailable()
|
| 38 |
+
except OptionalDependencyNotAvailable:
|
| 39 |
+
pass
|
| 40 |
+
else:
|
| 41 |
+
_import_structure["modeling_tf_hubert"] = [
|
| 42 |
+
"TFHubertForCTC",
|
| 43 |
+
"TFHubertModel",
|
| 44 |
+
"TFHubertPreTrainedModel",
|
| 45 |
+
]
|
| 46 |
+
|
| 47 |
+
if TYPE_CHECKING:
|
| 48 |
+
from .configuration_hubert import HubertConfig
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
if not is_torch_available():
|
| 52 |
+
raise OptionalDependencyNotAvailable()
|
| 53 |
+
except OptionalDependencyNotAvailable:
|
| 54 |
+
pass
|
| 55 |
+
else:
|
| 56 |
+
from .modeling_hubert import (
|
| 57 |
+
HubertForCTC,
|
| 58 |
+
HubertForSequenceClassification,
|
| 59 |
+
HubertModel,
|
| 60 |
+
HubertPreTrainedModel,
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
try:
|
| 64 |
+
if not is_tf_available():
|
| 65 |
+
raise OptionalDependencyNotAvailable()
|
| 66 |
+
except OptionalDependencyNotAvailable:
|
| 67 |
+
pass
|
| 68 |
+
else:
|
| 69 |
+
from .modeling_tf_hubert import (
|
| 70 |
+
TFHubertForCTC,
|
| 71 |
+
TFHubertModel,
|
| 72 |
+
TFHubertPreTrainedModel,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
else:
|
| 77 |
+
import sys
|
| 78 |
+
|
| 79 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
parrot/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc
ADDED
|
Binary file (2.06 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/transformers/models/hubert/configuration_hubert.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" Hubert model configuration"""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
import operator
|
| 19 |
+
|
| 20 |
+
from ...configuration_utils import PretrainedConfig
|
| 21 |
+
from ...utils import logging
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class HubertConfig(PretrainedConfig):
|
| 28 |
+
r"""
|
| 29 |
+
This is the configuration class to store the configuration of a [`HubertModel`]. It is used to instantiate an
|
| 30 |
+
Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 31 |
+
with the defaults will yield a similar configuration to that of the Hubert
|
| 32 |
+
[facebook/hubert-base-ls960](https://huggingface.co/facebook/hubert-base-ls960) architecture.
|
| 33 |
+
|
| 34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 35 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
vocab_size (`int`, *optional*, defaults to 32):
|
| 40 |
+
Vocabulary size of the Hubert model. Defines the number of different tokens that can be represented by the
|
| 41 |
+
`inputs_ids` passed when calling [`HubertModel`]. Vocabulary size of the model. Defines the different
|
| 42 |
+
tokens that can be represented by the *inputs_ids* passed to the forward method of [`HubertModel`].
|
| 43 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 44 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 45 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 46 |
+
Number of hidden layers in the Transformer encoder.
|
| 47 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 48 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 49 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 50 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 51 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 52 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 53 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
| 54 |
+
hidden_dropout(`float`, *optional*, defaults to 0.1):
|
| 55 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 56 |
+
activation_dropout (`float`, *optional*, defaults to 0.1):
|
| 57 |
+
The dropout ratio for activations inside the fully connected layer.
|
| 58 |
+
attention_dropout(`float`, *optional*, defaults to 0.1):
|
| 59 |
+
The dropout ratio for the attention probabilities.
|
| 60 |
+
final_dropout (`float`, *optional*, defaults to 0.1):
|
| 61 |
+
The dropout probability for the final projection layer of [`Wav2Vec2ForCTC`].
|
| 62 |
+
layerdrop (`float`, *optional*, defaults to 0.1):
|
| 63 |
+
The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
|
| 64 |
+
details.
|
| 65 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 66 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 67 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 68 |
+
The epsilon used by the layer normalization layers.
|
| 69 |
+
feat_extract_norm (`str`, *optional*, defaults to `"group"`):
|
| 70 |
+
The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
|
| 71 |
+
normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
|
| 72 |
+
convolutional layers.
|
| 73 |
+
feat_proj_dropout (`float`, *optional*, defaults to 0.0):
|
| 74 |
+
The dropout probability for output of the feature encoder.
|
| 75 |
+
feat_proj_layer_norm (`bool`, *optional*, defaults to `True`):
|
| 76 |
+
Whether to apply LayerNorm to the output of the feature encoder.
|
| 77 |
+
feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
|
| 78 |
+
The non-linear activation function (function or string) in the 1D convolutional layers of the feature
|
| 79 |
+
extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
|
| 80 |
+
conv_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
|
| 81 |
+
A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
|
| 82 |
+
feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
|
| 83 |
+
conv_stride (`Tuple[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
|
| 84 |
+
A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
|
| 85 |
+
of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
|
| 86 |
+
conv_kernel (`Tuple[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
|
| 87 |
+
A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
|
| 88 |
+
length of *conv_kernel* defines the number of convolutional layers and has to match the length of
|
| 89 |
+
*conv_dim*.
|
| 90 |
+
conv_bias (`bool`, *optional*, defaults to `False`):
|
| 91 |
+
Whether the 1D convolutional layers have a bias.
|
| 92 |
+
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
|
| 93 |
+
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
|
| 94 |
+
embeddings layer.
|
| 95 |
+
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
|
| 96 |
+
Number of groups of 1D convolutional positional embeddings layer.
|
| 97 |
+
do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
|
| 98 |
+
Whether do apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
|
| 99 |
+
True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
|
| 100 |
+
False` corresponds to applying layer norm after the attention layer.
|
| 101 |
+
apply_spec_augment (`bool`, *optional*, defaults to `True`):
|
| 102 |
+
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
|
| 103 |
+
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
|
| 104 |
+
Recognition](https://arxiv.org/abs/1904.08779).
|
| 105 |
+
mask_time_prob (`float`, *optional*, defaults to 0.05):
|
| 106 |
+
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
|
| 107 |
+
procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
|
| 108 |
+
reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
|
| 109 |
+
masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
|
| 110 |
+
actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
|
| 111 |
+
mask_time_length (`int`, *optional*, defaults to 10):
|
| 112 |
+
Length of vector span along the time axis.
|
| 113 |
+
mask_time_min_masks (`int`, *optional*, defaults to 2),:
|
| 114 |
+
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
|
| 115 |
+
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
|
| 116 |
+
mask_time_min_masks''
|
| 117 |
+
mask_feature_prob (`float`, *optional*, defaults to 0.0):
|
| 118 |
+
Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
|
| 119 |
+
masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
|
| 120 |
+
the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
|
| 121 |
+
span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
|
| 122 |
+
may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
|
| 123 |
+
True`.
|
| 124 |
+
mask_feature_length (`int`, *optional*, defaults to 10):
|
| 125 |
+
Length of vector span along the feature axis.
|
| 126 |
+
mask_feature_min_masks (`int`, *optional*, defaults to 0),:
|
| 127 |
+
The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
|
| 128 |
+
step, irrespectively of `mask_feature_prob`. Only relevant if
|
| 129 |
+
''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
|
| 130 |
+
ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
|
| 131 |
+
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
|
| 132 |
+
instance of [`HubertForCTC`].
|
| 133 |
+
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
|
| 134 |
+
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
|
| 135 |
+
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
|
| 136 |
+
of [`HubertForCTC`].
|
| 137 |
+
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
|
| 138 |
+
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
|
| 139 |
+
instance of [`HubertForSequenceClassification`].
|
| 140 |
+
classifier_proj_size (`int`, *optional*, defaults to 256):
|
| 141 |
+
Dimensionality of the projection before token mean-pooling for classification.
|
| 142 |
+
|
| 143 |
+
Example:
|
| 144 |
+
|
| 145 |
+
```python
|
| 146 |
+
>>> from transformers import HubertModel, HubertConfig
|
| 147 |
+
|
| 148 |
+
>>> # Initializing a Hubert facebook/hubert-base-ls960 style configuration
|
| 149 |
+
>>> configuration = HubertConfig()
|
| 150 |
+
|
| 151 |
+
>>> # Initializing a model from the facebook/hubert-base-ls960 style configuration
|
| 152 |
+
>>> model = HubertModel(configuration)
|
| 153 |
+
|
| 154 |
+
>>> # Accessing the model configuration
|
| 155 |
+
>>> configuration = model.config
|
| 156 |
+
```"""
|
| 157 |
+
|
| 158 |
+
model_type = "hubert"
|
| 159 |
+
|
| 160 |
+
def __init__(
|
| 161 |
+
self,
|
| 162 |
+
vocab_size=32,
|
| 163 |
+
hidden_size=768,
|
| 164 |
+
num_hidden_layers=12,
|
| 165 |
+
num_attention_heads=12,
|
| 166 |
+
intermediate_size=3072,
|
| 167 |
+
hidden_act="gelu",
|
| 168 |
+
hidden_dropout=0.1,
|
| 169 |
+
activation_dropout=0.1,
|
| 170 |
+
attention_dropout=0.1,
|
| 171 |
+
feat_proj_layer_norm=True,
|
| 172 |
+
feat_proj_dropout=0.0,
|
| 173 |
+
final_dropout=0.1,
|
| 174 |
+
layerdrop=0.1,
|
| 175 |
+
initializer_range=0.02,
|
| 176 |
+
layer_norm_eps=1e-5,
|
| 177 |
+
feat_extract_norm="group",
|
| 178 |
+
feat_extract_activation="gelu",
|
| 179 |
+
conv_dim=(512, 512, 512, 512, 512, 512, 512),
|
| 180 |
+
conv_stride=(5, 2, 2, 2, 2, 2, 2),
|
| 181 |
+
conv_kernel=(10, 3, 3, 3, 3, 2, 2),
|
| 182 |
+
conv_bias=False,
|
| 183 |
+
num_conv_pos_embeddings=128,
|
| 184 |
+
num_conv_pos_embedding_groups=16,
|
| 185 |
+
do_stable_layer_norm=False,
|
| 186 |
+
apply_spec_augment=True,
|
| 187 |
+
mask_time_prob=0.05,
|
| 188 |
+
mask_time_length=10,
|
| 189 |
+
mask_time_min_masks=2,
|
| 190 |
+
mask_feature_prob=0.0,
|
| 191 |
+
mask_feature_length=10,
|
| 192 |
+
mask_feature_min_masks=0,
|
| 193 |
+
ctc_loss_reduction="sum",
|
| 194 |
+
ctc_zero_infinity=False,
|
| 195 |
+
use_weighted_layer_sum=False,
|
| 196 |
+
classifier_proj_size=256,
|
| 197 |
+
pad_token_id=0,
|
| 198 |
+
bos_token_id=1,
|
| 199 |
+
eos_token_id=2,
|
| 200 |
+
**kwargs,
|
| 201 |
+
):
|
| 202 |
+
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
|
| 203 |
+
self.hidden_size = hidden_size
|
| 204 |
+
self.feat_extract_norm = feat_extract_norm
|
| 205 |
+
self.feat_extract_activation = feat_extract_activation
|
| 206 |
+
self.conv_dim = list(conv_dim)
|
| 207 |
+
self.conv_stride = list(conv_stride)
|
| 208 |
+
self.conv_kernel = list(conv_kernel)
|
| 209 |
+
self.conv_bias = conv_bias
|
| 210 |
+
self.num_conv_pos_embeddings = num_conv_pos_embeddings
|
| 211 |
+
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
|
| 212 |
+
self.num_feat_extract_layers = len(self.conv_dim)
|
| 213 |
+
self.num_hidden_layers = num_hidden_layers
|
| 214 |
+
self.intermediate_size = intermediate_size
|
| 215 |
+
self.hidden_act = hidden_act
|
| 216 |
+
self.num_attention_heads = num_attention_heads
|
| 217 |
+
self.hidden_dropout = hidden_dropout
|
| 218 |
+
self.attention_dropout = attention_dropout
|
| 219 |
+
self.activation_dropout = activation_dropout
|
| 220 |
+
self.feat_proj_layer_norm = feat_proj_layer_norm
|
| 221 |
+
self.feat_proj_dropout = feat_proj_dropout
|
| 222 |
+
self.final_dropout = final_dropout
|
| 223 |
+
self.layerdrop = layerdrop
|
| 224 |
+
self.layer_norm_eps = layer_norm_eps
|
| 225 |
+
self.initializer_range = initializer_range
|
| 226 |
+
self.vocab_size = vocab_size
|
| 227 |
+
self.do_stable_layer_norm = do_stable_layer_norm
|
| 228 |
+
self.use_weighted_layer_sum = use_weighted_layer_sum
|
| 229 |
+
self.classifier_proj_size = classifier_proj_size
|
| 230 |
+
|
| 231 |
+
if (
|
| 232 |
+
(len(self.conv_stride) != self.num_feat_extract_layers)
|
| 233 |
+
or (len(self.conv_kernel) != self.num_feat_extract_layers)
|
| 234 |
+
or (len(self.conv_dim) != self.num_feat_extract_layers)
|
| 235 |
+
):
|
| 236 |
+
raise ValueError(
|
| 237 |
+
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
|
| 238 |
+
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
|
| 239 |
+
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
|
| 240 |
+
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
|
| 244 |
+
self.apply_spec_augment = apply_spec_augment
|
| 245 |
+
self.mask_time_prob = mask_time_prob
|
| 246 |
+
self.mask_time_length = mask_time_length
|
| 247 |
+
self.mask_time_min_masks = mask_time_min_masks
|
| 248 |
+
self.mask_feature_prob = mask_feature_prob
|
| 249 |
+
self.mask_feature_length = mask_feature_length
|
| 250 |
+
self.mask_feature_min_masks = mask_feature_min_masks
|
| 251 |
+
|
| 252 |
+
# ctc loss
|
| 253 |
+
self.ctc_loss_reduction = ctc_loss_reduction
|
| 254 |
+
self.ctc_zero_infinity = ctc_zero_infinity
|
| 255 |
+
|
| 256 |
+
@property
|
| 257 |
+
def inputs_to_logits_ratio(self):
|
| 258 |
+
return functools.reduce(operator.mul, self.conv_stride, 1)
|
parrot/lib/python3.10/site-packages/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Convert Hubert checkpoint."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
from s3prl.hub import distilhubert
|
| 22 |
+
|
| 23 |
+
from transformers import HubertConfig, HubertModel, Wav2Vec2FeatureExtractor, logging
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logging.set_verbosity_info()
|
| 27 |
+
logger = logging.get_logger(__name__)
|
| 28 |
+
|
| 29 |
+
MAPPING = {
|
| 30 |
+
"post_extract_proj": "feature_projection.projection",
|
| 31 |
+
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
|
| 32 |
+
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
|
| 33 |
+
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
|
| 34 |
+
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
|
| 35 |
+
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
|
| 36 |
+
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
|
| 37 |
+
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
|
| 38 |
+
"fc2": "encoder.layers.*.feed_forward.output_dense",
|
| 39 |
+
"final_layer_norm": "encoder.layers.*.final_layer_norm",
|
| 40 |
+
"encoder.layer_norm": "encoder.layer_norm",
|
| 41 |
+
"mask_emb": "masked_spec_embed",
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def set_recursively(hf_pointer, key, value, full_name, weight_type):
|
| 46 |
+
for attribute in key.split("."):
|
| 47 |
+
hf_pointer = getattr(hf_pointer, attribute)
|
| 48 |
+
|
| 49 |
+
if weight_type is not None:
|
| 50 |
+
hf_shape = getattr(hf_pointer, weight_type).shape
|
| 51 |
+
else:
|
| 52 |
+
hf_shape = hf_pointer.shape
|
| 53 |
+
|
| 54 |
+
assert hf_shape == value.shape, (
|
| 55 |
+
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
|
| 56 |
+
f" {value.shape} for {full_name}"
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
if weight_type == "weight":
|
| 60 |
+
hf_pointer.weight.data = value
|
| 61 |
+
elif weight_type == "weight_g":
|
| 62 |
+
hf_pointer.weight_g.data = value
|
| 63 |
+
elif weight_type == "weight_v":
|
| 64 |
+
hf_pointer.weight_v.data = value
|
| 65 |
+
elif weight_type == "bias":
|
| 66 |
+
hf_pointer.bias.data = value
|
| 67 |
+
else:
|
| 68 |
+
hf_pointer.data = value
|
| 69 |
+
|
| 70 |
+
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def recursively_load_weights(fairseq_model, hf_model):
|
| 74 |
+
unused_weights = []
|
| 75 |
+
fairseq_dict = fairseq_model.state_dict()
|
| 76 |
+
|
| 77 |
+
feature_extractor = hf_model.feature_extractor
|
| 78 |
+
|
| 79 |
+
for name, value in fairseq_dict.items():
|
| 80 |
+
is_used = False
|
| 81 |
+
if "conv_layers" in name:
|
| 82 |
+
load_conv_layer(
|
| 83 |
+
name,
|
| 84 |
+
value,
|
| 85 |
+
feature_extractor,
|
| 86 |
+
unused_weights,
|
| 87 |
+
hf_model.config.feat_extract_norm == "group",
|
| 88 |
+
)
|
| 89 |
+
is_used = True
|
| 90 |
+
else:
|
| 91 |
+
for key, mapped_key in MAPPING.items():
|
| 92 |
+
mapped_key = mapped_key
|
| 93 |
+
|
| 94 |
+
if key in name:
|
| 95 |
+
is_used = True
|
| 96 |
+
if "*" in mapped_key:
|
| 97 |
+
layer_index = name.split(key)[0].split(".")[-2]
|
| 98 |
+
mapped_key = mapped_key.replace("*", layer_index)
|
| 99 |
+
if "weight_g" in name:
|
| 100 |
+
weight_type = "weight_g"
|
| 101 |
+
elif "weight_v" in name:
|
| 102 |
+
weight_type = "weight_v"
|
| 103 |
+
elif "weight" in name:
|
| 104 |
+
weight_type = "weight"
|
| 105 |
+
elif "bias" in name:
|
| 106 |
+
weight_type = "bias"
|
| 107 |
+
else:
|
| 108 |
+
weight_type = None
|
| 109 |
+
set_recursively(hf_model, mapped_key, value, name, weight_type)
|
| 110 |
+
continue
|
| 111 |
+
if not is_used:
|
| 112 |
+
unused_weights.append(name)
|
| 113 |
+
|
| 114 |
+
logger.warning(f"Unused weights: {unused_weights}")
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
|
| 118 |
+
name = full_name.split("conv_layers.")[-1]
|
| 119 |
+
items = name.split(".")
|
| 120 |
+
layer_id = int(items[0])
|
| 121 |
+
type_id = int(items[1])
|
| 122 |
+
|
| 123 |
+
if type_id == 0:
|
| 124 |
+
if "bias" in name:
|
| 125 |
+
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
|
| 126 |
+
f"{full_name} has size {value.shape}, but"
|
| 127 |
+
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
|
| 128 |
+
)
|
| 129 |
+
feature_extractor.conv_layers[layer_id].conv.bias.data = value
|
| 130 |
+
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
|
| 131 |
+
elif "weight" in name:
|
| 132 |
+
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
|
| 133 |
+
f"{full_name} has size {value.shape}, but"
|
| 134 |
+
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
|
| 135 |
+
)
|
| 136 |
+
feature_extractor.conv_layers[layer_id].conv.weight.data = value
|
| 137 |
+
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
|
| 138 |
+
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
|
| 139 |
+
if "bias" in name:
|
| 140 |
+
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
|
| 141 |
+
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
|
| 142 |
+
" found."
|
| 143 |
+
)
|
| 144 |
+
feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
|
| 145 |
+
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
|
| 146 |
+
elif "weight" in name:
|
| 147 |
+
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
|
| 148 |
+
f"{full_name} has size {value.shape}, but"
|
| 149 |
+
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
|
| 150 |
+
)
|
| 151 |
+
feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
|
| 152 |
+
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
|
| 153 |
+
else:
|
| 154 |
+
unused_weights.append(full_name)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def convert_config(model):
|
| 158 |
+
config = HubertConfig()
|
| 159 |
+
fs_config = model.config
|
| 160 |
+
|
| 161 |
+
config.activation_dropout = fs_config.activation_dropout
|
| 162 |
+
config.apply_spec_augment = False
|
| 163 |
+
config.attention_dropout = fs_config.attention_dropout
|
| 164 |
+
config.conv_bias = False
|
| 165 |
+
conv_layers = eval(fs_config.extractor_conv_feature_layers)
|
| 166 |
+
config.conv_dim = [x[0] for x in conv_layers]
|
| 167 |
+
config.conv_kernel = [x[1] for x in conv_layers]
|
| 168 |
+
config.conv_stride = [x[2] for x in conv_layers]
|
| 169 |
+
config.feat_extract_activation = "gelu"
|
| 170 |
+
config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
|
| 171 |
+
config.feat_proj_layer_norm = False
|
| 172 |
+
config.feat_proj_dropout = 0.0
|
| 173 |
+
config.final_dropout = 0.0
|
| 174 |
+
config.hidden_act = fs_config.activation_fn
|
| 175 |
+
config.hidden_dropout = fs_config.dropout
|
| 176 |
+
config.hidden_size = fs_config.encoder_embed_dim
|
| 177 |
+
config.initializer_range = 0.02
|
| 178 |
+
config.intermediate_size = fs_config.encoder_ffn_embed_dim
|
| 179 |
+
config.layer_norm_eps = 1e-5
|
| 180 |
+
config.layerdrop = 0.0
|
| 181 |
+
config.num_attention_heads = fs_config.encoder_attention_heads
|
| 182 |
+
config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups
|
| 183 |
+
config.num_conv_pos_embeddings = fs_config.conv_pos
|
| 184 |
+
config.num_feat_extract_layers = len(conv_layers)
|
| 185 |
+
config.num_hidden_layers = fs_config.encoder_layers
|
| 186 |
+
|
| 187 |
+
return config
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
@torch.no_grad()
|
| 191 |
+
def convert_hubert_checkpoint(pytorch_dump_folder_path, config_path=None):
|
| 192 |
+
"""
|
| 193 |
+
Copy/paste/tweak model's weights to transformers design.
|
| 194 |
+
"""
|
| 195 |
+
model = distilhubert().model.model
|
| 196 |
+
|
| 197 |
+
if config_path is not None:
|
| 198 |
+
config = HubertConfig.from_pretrained(config_path)
|
| 199 |
+
else:
|
| 200 |
+
config = convert_config(model)
|
| 201 |
+
model = model.eval()
|
| 202 |
+
|
| 203 |
+
feature_extractor = Wav2Vec2FeatureExtractor(
|
| 204 |
+
feature_size=1,
|
| 205 |
+
sampling_rate=16000,
|
| 206 |
+
padding_value=0,
|
| 207 |
+
do_normalize=False,
|
| 208 |
+
return_attention_mask=False,
|
| 209 |
+
)
|
| 210 |
+
hf_model = HubertModel(config)
|
| 211 |
+
|
| 212 |
+
recursively_load_weights(model, hf_model)
|
| 213 |
+
|
| 214 |
+
feature_extractor.save_pretrained(pytorch_dump_folder_path)
|
| 215 |
+
hf_model.save_pretrained(pytorch_dump_folder_path)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
if __name__ == "__main__":
|
| 219 |
+
parser = argparse.ArgumentParser()
|
| 220 |
+
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
|
| 221 |
+
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
|
| 222 |
+
args = parser.parse_args()
|
| 223 |
+
convert_hubert_checkpoint(args.pytorch_dump_folder_path, args.config_path)
|
parrot/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Convert Hubert checkpoint."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import json
|
| 20 |
+
import os
|
| 21 |
+
|
| 22 |
+
import fairseq
|
| 23 |
+
import torch
|
| 24 |
+
from fairseq.data import Dictionary
|
| 25 |
+
|
| 26 |
+
from transformers import (
|
| 27 |
+
HubertConfig,
|
| 28 |
+
HubertForCTC,
|
| 29 |
+
HubertModel,
|
| 30 |
+
Wav2Vec2CTCTokenizer,
|
| 31 |
+
Wav2Vec2FeatureExtractor,
|
| 32 |
+
Wav2Vec2Processor,
|
| 33 |
+
logging,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
logging.set_verbosity_info()
|
| 38 |
+
logger = logging.get_logger(__name__)
|
| 39 |
+
|
| 40 |
+
MAPPING = {
|
| 41 |
+
"post_extract_proj": "feature_projection.projection",
|
| 42 |
+
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
|
| 43 |
+
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
|
| 44 |
+
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
|
| 45 |
+
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
|
| 46 |
+
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
|
| 47 |
+
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
|
| 48 |
+
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
|
| 49 |
+
"fc2": "encoder.layers.*.feed_forward.output_dense",
|
| 50 |
+
"final_layer_norm": "encoder.layers.*.final_layer_norm",
|
| 51 |
+
"encoder.layer_norm": "encoder.layer_norm",
|
| 52 |
+
"w2v_model.layer_norm": "feature_projection.layer_norm",
|
| 53 |
+
"w2v_encoder.proj": "lm_head",
|
| 54 |
+
"mask_emb": "masked_spec_embed",
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def set_recursively(hf_pointer, key, value, full_name, weight_type):
|
| 59 |
+
for attribute in key.split("."):
|
| 60 |
+
hf_pointer = getattr(hf_pointer, attribute)
|
| 61 |
+
|
| 62 |
+
if weight_type is not None:
|
| 63 |
+
hf_shape = getattr(hf_pointer, weight_type).shape
|
| 64 |
+
else:
|
| 65 |
+
hf_shape = hf_pointer.shape
|
| 66 |
+
|
| 67 |
+
assert hf_shape == value.shape, (
|
| 68 |
+
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
|
| 69 |
+
f" {value.shape} for {full_name}"
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
if weight_type == "weight":
|
| 73 |
+
hf_pointer.weight.data = value
|
| 74 |
+
elif weight_type == "weight_g":
|
| 75 |
+
hf_pointer.weight_g.data = value
|
| 76 |
+
elif weight_type == "weight_v":
|
| 77 |
+
hf_pointer.weight_v.data = value
|
| 78 |
+
elif weight_type == "bias":
|
| 79 |
+
hf_pointer.bias.data = value
|
| 80 |
+
else:
|
| 81 |
+
hf_pointer.data = value
|
| 82 |
+
|
| 83 |
+
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def recursively_load_weights(fairseq_model, hf_model, is_finetuned):
|
| 87 |
+
unused_weights = []
|
| 88 |
+
fairseq_dict = fairseq_model.state_dict()
|
| 89 |
+
|
| 90 |
+
feature_extractor = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
|
| 91 |
+
|
| 92 |
+
for name, value in fairseq_dict.items():
|
| 93 |
+
is_used = False
|
| 94 |
+
if "conv_layers" in name:
|
| 95 |
+
load_conv_layer(
|
| 96 |
+
name,
|
| 97 |
+
value,
|
| 98 |
+
feature_extractor,
|
| 99 |
+
unused_weights,
|
| 100 |
+
hf_model.config.feat_extract_norm == "group",
|
| 101 |
+
)
|
| 102 |
+
is_used = True
|
| 103 |
+
else:
|
| 104 |
+
for key, mapped_key in MAPPING.items():
|
| 105 |
+
mapped_key = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
|
| 106 |
+
|
| 107 |
+
if key in name or (key.split("w2v_model.")[-1] == name.split(".")[0] and not is_finetuned):
|
| 108 |
+
is_used = True
|
| 109 |
+
if "*" in mapped_key:
|
| 110 |
+
layer_index = name.split(key)[0].split(".")[-2]
|
| 111 |
+
mapped_key = mapped_key.replace("*", layer_index)
|
| 112 |
+
if "weight_g" in name:
|
| 113 |
+
weight_type = "weight_g"
|
| 114 |
+
elif "weight_v" in name:
|
| 115 |
+
weight_type = "weight_v"
|
| 116 |
+
elif "weight" in name:
|
| 117 |
+
weight_type = "weight"
|
| 118 |
+
elif "bias" in name:
|
| 119 |
+
weight_type = "bias"
|
| 120 |
+
else:
|
| 121 |
+
weight_type = None
|
| 122 |
+
set_recursively(hf_model, mapped_key, value, name, weight_type)
|
| 123 |
+
continue
|
| 124 |
+
if not is_used:
|
| 125 |
+
unused_weights.append(name)
|
| 126 |
+
|
| 127 |
+
logger.warning(f"Unused weights: {unused_weights}")
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
|
| 131 |
+
name = full_name.split("conv_layers.")[-1]
|
| 132 |
+
items = name.split(".")
|
| 133 |
+
layer_id = int(items[0])
|
| 134 |
+
type_id = int(items[1])
|
| 135 |
+
|
| 136 |
+
if type_id == 0:
|
| 137 |
+
if "bias" in name:
|
| 138 |
+
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
|
| 139 |
+
f"{full_name} has size {value.shape}, but"
|
| 140 |
+
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
|
| 141 |
+
)
|
| 142 |
+
feature_extractor.conv_layers[layer_id].conv.bias.data = value
|
| 143 |
+
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
|
| 144 |
+
elif "weight" in name:
|
| 145 |
+
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
|
| 146 |
+
f"{full_name} has size {value.shape}, but"
|
| 147 |
+
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
|
| 148 |
+
)
|
| 149 |
+
feature_extractor.conv_layers[layer_id].conv.weight.data = value
|
| 150 |
+
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
|
| 151 |
+
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
|
| 152 |
+
if "bias" in name:
|
| 153 |
+
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
|
| 154 |
+
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
|
| 155 |
+
" found."
|
| 156 |
+
)
|
| 157 |
+
feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
|
| 158 |
+
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
|
| 159 |
+
elif "weight" in name:
|
| 160 |
+
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
|
| 161 |
+
f"{full_name} has size {value.shape}, but"
|
| 162 |
+
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
|
| 163 |
+
)
|
| 164 |
+
feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
|
| 165 |
+
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
|
| 166 |
+
else:
|
| 167 |
+
unused_weights.append(full_name)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@torch.no_grad()
|
| 171 |
+
def convert_hubert_checkpoint(
|
| 172 |
+
checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
|
| 173 |
+
):
|
| 174 |
+
"""
|
| 175 |
+
Copy/paste/tweak model's weights to transformers design.
|
| 176 |
+
"""
|
| 177 |
+
if config_path is not None:
|
| 178 |
+
config = HubertConfig.from_pretrained(config_path)
|
| 179 |
+
else:
|
| 180 |
+
config = HubertConfig()
|
| 181 |
+
|
| 182 |
+
if is_finetuned:
|
| 183 |
+
if dict_path:
|
| 184 |
+
target_dict = Dictionary.load(dict_path)
|
| 185 |
+
|
| 186 |
+
# important change bos & pad token id since CTC symbol is <pad> and
|
| 187 |
+
# not <s> as in fairseq
|
| 188 |
+
config.bos_token_id = target_dict.pad_index
|
| 189 |
+
config.pad_token_id = target_dict.bos_index
|
| 190 |
+
config.eos_token_id = target_dict.eos_index
|
| 191 |
+
config.vocab_size = len(target_dict.symbols)
|
| 192 |
+
vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
|
| 193 |
+
if not os.path.isdir(pytorch_dump_folder_path):
|
| 194 |
+
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path))
|
| 195 |
+
return
|
| 196 |
+
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
|
| 197 |
+
with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
|
| 198 |
+
json.dump(target_dict.indices, vocab_handle)
|
| 199 |
+
tokenizer = Wav2Vec2CTCTokenizer(
|
| 200 |
+
vocab_path,
|
| 201 |
+
unk_token=target_dict.unk_word,
|
| 202 |
+
pad_token=target_dict.pad_word,
|
| 203 |
+
bos_token=target_dict.bos_word,
|
| 204 |
+
eos_token=target_dict.eos_word,
|
| 205 |
+
word_delimiter_token="|",
|
| 206 |
+
do_lower_case=False,
|
| 207 |
+
)
|
| 208 |
+
return_attention_mask = True if config.feat_extract_norm == "layer" else False
|
| 209 |
+
feature_extractor = Wav2Vec2FeatureExtractor(
|
| 210 |
+
feature_size=1,
|
| 211 |
+
sampling_rate=16000,
|
| 212 |
+
padding_value=0,
|
| 213 |
+
do_normalize=True,
|
| 214 |
+
return_attention_mask=return_attention_mask,
|
| 215 |
+
)
|
| 216 |
+
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
|
| 217 |
+
processor.save_pretrained(pytorch_dump_folder_path)
|
| 218 |
+
|
| 219 |
+
hf_wav2vec = HubertForCTC(config)
|
| 220 |
+
else:
|
| 221 |
+
hf_wav2vec = HubertModel(config)
|
| 222 |
+
|
| 223 |
+
if is_finetuned:
|
| 224 |
+
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
|
| 225 |
+
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
|
| 226 |
+
)
|
| 227 |
+
else:
|
| 228 |
+
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
|
| 229 |
+
|
| 230 |
+
model = model[0].eval()
|
| 231 |
+
|
| 232 |
+
recursively_load_weights(model, hf_wav2vec, is_finetuned)
|
| 233 |
+
|
| 234 |
+
hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
if __name__ == "__main__":
|
| 238 |
+
parser = argparse.ArgumentParser()
|
| 239 |
+
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
|
| 240 |
+
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
|
| 241 |
+
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
|
| 242 |
+
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
|
| 243 |
+
parser.add_argument(
|
| 244 |
+
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
|
| 245 |
+
)
|
| 246 |
+
args = parser.parse_args()
|
| 247 |
+
convert_hubert_checkpoint(
|
| 248 |
+
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
|
| 249 |
+
)
|
parrot/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Convert Hubert checkpoint."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
|
| 22 |
+
from transformers import HubertConfig, HubertForSequenceClassification, Wav2Vec2FeatureExtractor, logging
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
logging.set_verbosity_info()
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
SUPPORTED_MODELS = ["UtteranceLevel"]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@torch.no_grad()
|
| 32 |
+
def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path):
|
| 33 |
+
"""
|
| 34 |
+
Copy/paste/tweak model's weights to transformers design.
|
| 35 |
+
"""
|
| 36 |
+
checkpoint = torch.load(checkpoint_path, map_location="cpu")
|
| 37 |
+
if checkpoint["Config"]["downstream_expert"]["modelrc"]["select"] not in SUPPORTED_MODELS:
|
| 38 |
+
raise NotImplementedError(f"The supported s3prl models are {SUPPORTED_MODELS}")
|
| 39 |
+
|
| 40 |
+
downstream_dict = checkpoint["Downstream"]
|
| 41 |
+
|
| 42 |
+
hf_congfig = HubertConfig.from_pretrained(config_path)
|
| 43 |
+
hf_model = HubertForSequenceClassification.from_pretrained(base_model_name, config=hf_congfig)
|
| 44 |
+
hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
|
| 45 |
+
base_model_name, return_attention_mask=True, do_normalize=False
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
if hf_congfig.use_weighted_layer_sum:
|
| 49 |
+
hf_model.layer_weights.data = checkpoint["Featurizer"]["weights"]
|
| 50 |
+
|
| 51 |
+
hf_model.projector.weight.data = downstream_dict["projector.weight"]
|
| 52 |
+
hf_model.projector.bias.data = downstream_dict["projector.bias"]
|
| 53 |
+
hf_model.classifier.weight.data = downstream_dict["model.post_net.linear.weight"]
|
| 54 |
+
hf_model.classifier.bias.data = downstream_dict["model.post_net.linear.bias"]
|
| 55 |
+
|
| 56 |
+
hf_feature_extractor.save_pretrained(model_dump_path)
|
| 57 |
+
hf_model.save_pretrained(model_dump_path)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
if __name__ == "__main__":
|
| 61 |
+
parser = argparse.ArgumentParser()
|
| 62 |
+
parser.add_argument(
|
| 63 |
+
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
|
| 64 |
+
)
|
| 65 |
+
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
|
| 66 |
+
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
|
| 67 |
+
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
|
| 68 |
+
args = parser.parse_args()
|
| 69 |
+
convert_s3prl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
|
parrot/lib/python3.10/site-packages/transformers/models/hubert/modeling_hubert.py
ADDED
|
@@ -0,0 +1,1744 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" PyTorch Hubert model."""
|
| 16 |
+
|
| 17 |
+
import warnings
|
| 18 |
+
from typing import Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
import torch.utils.checkpoint
|
| 24 |
+
from torch import nn
|
| 25 |
+
from torch.nn import CrossEntropyLoss
|
| 26 |
+
|
| 27 |
+
from ...activations import ACT2FN
|
| 28 |
+
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
|
| 29 |
+
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
|
| 30 |
+
from ...modeling_utils import PreTrainedModel
|
| 31 |
+
from ...utils import (
|
| 32 |
+
add_code_sample_docstrings,
|
| 33 |
+
add_start_docstrings,
|
| 34 |
+
add_start_docstrings_to_model_forward,
|
| 35 |
+
is_flash_attn_2_available,
|
| 36 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 37 |
+
logging,
|
| 38 |
+
replace_return_docstrings,
|
| 39 |
+
)
|
| 40 |
+
from .configuration_hubert import HubertConfig
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
if is_flash_attn_2_available():
|
| 44 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| 45 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
logger = logging.get_logger(__name__)
|
| 49 |
+
|
| 50 |
+
_HIDDEN_STATES_START_POSITION = 1
|
| 51 |
+
|
| 52 |
+
# General docstring
|
| 53 |
+
_CONFIG_FOR_DOC = "HubertConfig"
|
| 54 |
+
|
| 55 |
+
# Base docstring
|
| 56 |
+
_CHECKPOINT_FOR_DOC = "facebook/hubert-large-ls960-ft"
|
| 57 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 292, 768]
|
| 58 |
+
|
| 59 |
+
# CTC docstring
|
| 60 |
+
_CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
|
| 61 |
+
_CTC_EXPECTED_LOSS = 22.68
|
| 62 |
+
|
| 63 |
+
# Audio class docstring
|
| 64 |
+
_SEQ_CLASS_CHECKPOINT = "superb/hubert-base-superb-ks"
|
| 65 |
+
_SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'"
|
| 66 |
+
_SEQ_CLASS_EXPECTED_LOSS = 8.53
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
| 70 |
+
def _get_unpad_data(attention_mask):
|
| 71 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 72 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 73 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 74 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
| 75 |
+
return (
|
| 76 |
+
indices,
|
| 77 |
+
cu_seqlens,
|
| 78 |
+
max_seqlen_in_batch,
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
|
| 83 |
+
def _compute_mask_indices(
|
| 84 |
+
shape: Tuple[int, int],
|
| 85 |
+
mask_prob: float,
|
| 86 |
+
mask_length: int,
|
| 87 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 88 |
+
min_masks: int = 0,
|
| 89 |
+
) -> np.ndarray:
|
| 90 |
+
"""
|
| 91 |
+
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
|
| 92 |
+
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
|
| 93 |
+
CPU as part of the preprocessing during training.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
|
| 97 |
+
the first element is the batch size and the second element is the length of the axis to span.
|
| 98 |
+
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
|
| 99 |
+
independently generated mask spans of length `mask_length` is computed by
|
| 100 |
+
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
|
| 101 |
+
actual percentage will be smaller.
|
| 102 |
+
mask_length: size of the mask
|
| 103 |
+
min_masks: minimum number of masked spans
|
| 104 |
+
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
|
| 105 |
+
each batch dimension.
|
| 106 |
+
"""
|
| 107 |
+
batch_size, sequence_length = shape
|
| 108 |
+
|
| 109 |
+
if mask_length < 1:
|
| 110 |
+
raise ValueError("`mask_length` has to be bigger than 0.")
|
| 111 |
+
|
| 112 |
+
if mask_length > sequence_length:
|
| 113 |
+
raise ValueError(
|
| 114 |
+
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
|
| 115 |
+
f" and `sequence_length`: {sequence_length}`"
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
# epsilon is used for probabilistic rounding
|
| 119 |
+
epsilon = np.random.rand(1).item()
|
| 120 |
+
|
| 121 |
+
def compute_num_masked_span(input_length):
|
| 122 |
+
"""Given input length, compute how many spans should be masked"""
|
| 123 |
+
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
|
| 124 |
+
num_masked_span = max(num_masked_span, min_masks)
|
| 125 |
+
|
| 126 |
+
# make sure num masked span <= sequence_length
|
| 127 |
+
if num_masked_span * mask_length > sequence_length:
|
| 128 |
+
num_masked_span = sequence_length // mask_length
|
| 129 |
+
|
| 130 |
+
# make sure num_masked span is also <= input_length - (mask_length - 1)
|
| 131 |
+
if input_length - (mask_length - 1) < num_masked_span:
|
| 132 |
+
num_masked_span = max(input_length - (mask_length - 1), 0)
|
| 133 |
+
|
| 134 |
+
return num_masked_span
|
| 135 |
+
|
| 136 |
+
# compute number of masked spans in batch
|
| 137 |
+
input_lengths = (
|
| 138 |
+
attention_mask.sum(-1).detach().tolist()
|
| 139 |
+
if attention_mask is not None
|
| 140 |
+
else [sequence_length for _ in range(batch_size)]
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# SpecAugment mask to fill
|
| 144 |
+
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
|
| 145 |
+
spec_aug_mask_idxs = []
|
| 146 |
+
|
| 147 |
+
max_num_masked_span = compute_num_masked_span(sequence_length)
|
| 148 |
+
|
| 149 |
+
if max_num_masked_span == 0:
|
| 150 |
+
return spec_aug_mask
|
| 151 |
+
|
| 152 |
+
for input_length in input_lengths:
|
| 153 |
+
# compute num of masked spans for this input
|
| 154 |
+
num_masked_span = compute_num_masked_span(input_length)
|
| 155 |
+
|
| 156 |
+
# get random indices to mask
|
| 157 |
+
spec_aug_mask_idx = np.random.choice(
|
| 158 |
+
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
# pick first sampled index that will serve as a dummy index to pad vector
|
| 162 |
+
# to ensure same dimension for all batches due to probabilistic rounding
|
| 163 |
+
# Picking first sample just pads those vectors twice.
|
| 164 |
+
if len(spec_aug_mask_idx) == 0:
|
| 165 |
+
# this case can only happen if `input_length` is strictly smaller then
|
| 166 |
+
# `sequence_length` in which case the last token has to be a padding
|
| 167 |
+
# token which we can use as a dummy mask id
|
| 168 |
+
dummy_mask_idx = sequence_length - 1
|
| 169 |
+
else:
|
| 170 |
+
dummy_mask_idx = spec_aug_mask_idx[0]
|
| 171 |
+
|
| 172 |
+
spec_aug_mask_idx = np.concatenate(
|
| 173 |
+
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
|
| 174 |
+
)
|
| 175 |
+
spec_aug_mask_idxs.append(spec_aug_mask_idx)
|
| 176 |
+
|
| 177 |
+
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
|
| 178 |
+
|
| 179 |
+
# expand masked indices to masked spans
|
| 180 |
+
spec_aug_mask_idxs = np.broadcast_to(
|
| 181 |
+
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
|
| 182 |
+
)
|
| 183 |
+
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
|
| 184 |
+
|
| 185 |
+
# add offset to the starting indexes so that indexes now create a span
|
| 186 |
+
offsets = np.arange(mask_length)[None, None, :]
|
| 187 |
+
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
|
| 188 |
+
batch_size, max_num_masked_span * mask_length
|
| 189 |
+
)
|
| 190 |
+
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
|
| 191 |
+
|
| 192 |
+
# ensure that we cannot have indices larger than sequence_length
|
| 193 |
+
if spec_aug_mask_idxs.max() > sequence_length - 1:
|
| 194 |
+
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
|
| 195 |
+
|
| 196 |
+
# scatter indices to mask
|
| 197 |
+
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
|
| 198 |
+
|
| 199 |
+
return spec_aug_mask
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
|
| 203 |
+
class HubertNoLayerNormConvLayer(nn.Module):
|
| 204 |
+
def __init__(self, config, layer_id=0):
|
| 205 |
+
super().__init__()
|
| 206 |
+
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
|
| 207 |
+
self.out_conv_dim = config.conv_dim[layer_id]
|
| 208 |
+
|
| 209 |
+
self.conv = nn.Conv1d(
|
| 210 |
+
self.in_conv_dim,
|
| 211 |
+
self.out_conv_dim,
|
| 212 |
+
kernel_size=config.conv_kernel[layer_id],
|
| 213 |
+
stride=config.conv_stride[layer_id],
|
| 214 |
+
bias=config.conv_bias,
|
| 215 |
+
)
|
| 216 |
+
self.activation = ACT2FN[config.feat_extract_activation]
|
| 217 |
+
|
| 218 |
+
def forward(self, hidden_states):
|
| 219 |
+
hidden_states = self.conv(hidden_states)
|
| 220 |
+
hidden_states = self.activation(hidden_states)
|
| 221 |
+
return hidden_states
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
|
| 225 |
+
class HubertLayerNormConvLayer(nn.Module):
|
| 226 |
+
def __init__(self, config, layer_id=0):
|
| 227 |
+
super().__init__()
|
| 228 |
+
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
|
| 229 |
+
self.out_conv_dim = config.conv_dim[layer_id]
|
| 230 |
+
|
| 231 |
+
self.conv = nn.Conv1d(
|
| 232 |
+
self.in_conv_dim,
|
| 233 |
+
self.out_conv_dim,
|
| 234 |
+
kernel_size=config.conv_kernel[layer_id],
|
| 235 |
+
stride=config.conv_stride[layer_id],
|
| 236 |
+
bias=config.conv_bias,
|
| 237 |
+
)
|
| 238 |
+
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
|
| 239 |
+
self.activation = ACT2FN[config.feat_extract_activation]
|
| 240 |
+
|
| 241 |
+
def forward(self, hidden_states):
|
| 242 |
+
hidden_states = self.conv(hidden_states)
|
| 243 |
+
|
| 244 |
+
hidden_states = hidden_states.transpose(-2, -1)
|
| 245 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 246 |
+
hidden_states = hidden_states.transpose(-2, -1)
|
| 247 |
+
|
| 248 |
+
hidden_states = self.activation(hidden_states)
|
| 249 |
+
return hidden_states
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
|
| 253 |
+
class HubertGroupNormConvLayer(nn.Module):
|
| 254 |
+
def __init__(self, config, layer_id=0):
|
| 255 |
+
super().__init__()
|
| 256 |
+
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
|
| 257 |
+
self.out_conv_dim = config.conv_dim[layer_id]
|
| 258 |
+
|
| 259 |
+
self.conv = nn.Conv1d(
|
| 260 |
+
self.in_conv_dim,
|
| 261 |
+
self.out_conv_dim,
|
| 262 |
+
kernel_size=config.conv_kernel[layer_id],
|
| 263 |
+
stride=config.conv_stride[layer_id],
|
| 264 |
+
bias=config.conv_bias,
|
| 265 |
+
)
|
| 266 |
+
self.activation = ACT2FN[config.feat_extract_activation]
|
| 267 |
+
|
| 268 |
+
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
|
| 269 |
+
|
| 270 |
+
def forward(self, hidden_states):
|
| 271 |
+
hidden_states = self.conv(hidden_states)
|
| 272 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 273 |
+
hidden_states = self.activation(hidden_states)
|
| 274 |
+
return hidden_states
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
|
| 278 |
+
class HubertPositionalConvEmbedding(nn.Module):
|
| 279 |
+
def __init__(self, config):
|
| 280 |
+
super().__init__()
|
| 281 |
+
self.conv = nn.Conv1d(
|
| 282 |
+
config.hidden_size,
|
| 283 |
+
config.hidden_size,
|
| 284 |
+
kernel_size=config.num_conv_pos_embeddings,
|
| 285 |
+
padding=config.num_conv_pos_embeddings // 2,
|
| 286 |
+
groups=config.num_conv_pos_embedding_groups,
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
weight_norm = nn.utils.weight_norm
|
| 290 |
+
if hasattr(nn.utils.parametrizations, "weight_norm"):
|
| 291 |
+
weight_norm = nn.utils.parametrizations.weight_norm
|
| 292 |
+
|
| 293 |
+
if is_deepspeed_zero3_enabled():
|
| 294 |
+
import deepspeed
|
| 295 |
+
|
| 296 |
+
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
|
| 297 |
+
self.conv = weight_norm(self.conv, name="weight", dim=2)
|
| 298 |
+
deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
|
| 299 |
+
deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
|
| 300 |
+
else:
|
| 301 |
+
self.conv = weight_norm(self.conv, name="weight", dim=2)
|
| 302 |
+
|
| 303 |
+
self.padding = HubertSamePadLayer(config.num_conv_pos_embeddings)
|
| 304 |
+
self.activation = ACT2FN[config.feat_extract_activation]
|
| 305 |
+
|
| 306 |
+
def forward(self, hidden_states):
|
| 307 |
+
hidden_states = hidden_states.transpose(1, 2)
|
| 308 |
+
|
| 309 |
+
hidden_states = self.conv(hidden_states)
|
| 310 |
+
hidden_states = self.padding(hidden_states)
|
| 311 |
+
hidden_states = self.activation(hidden_states)
|
| 312 |
+
|
| 313 |
+
hidden_states = hidden_states.transpose(1, 2)
|
| 314 |
+
return hidden_states
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->Hubert
|
| 318 |
+
class HubertSamePadLayer(nn.Module):
|
| 319 |
+
def __init__(self, num_conv_pos_embeddings):
|
| 320 |
+
super().__init__()
|
| 321 |
+
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
|
| 322 |
+
|
| 323 |
+
def forward(self, hidden_states):
|
| 324 |
+
if self.num_pad_remove > 0:
|
| 325 |
+
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
|
| 326 |
+
return hidden_states
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->Hubert
|
| 330 |
+
class HubertFeatureEncoder(nn.Module):
|
| 331 |
+
"""Construct the features from raw audio waveform"""
|
| 332 |
+
|
| 333 |
+
def __init__(self, config):
|
| 334 |
+
super().__init__()
|
| 335 |
+
|
| 336 |
+
if config.feat_extract_norm == "group":
|
| 337 |
+
conv_layers = [HubertGroupNormConvLayer(config, layer_id=0)] + [
|
| 338 |
+
HubertNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
|
| 339 |
+
]
|
| 340 |
+
elif config.feat_extract_norm == "layer":
|
| 341 |
+
conv_layers = [HubertLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
|
| 342 |
+
else:
|
| 343 |
+
raise ValueError(
|
| 344 |
+
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
|
| 345 |
+
)
|
| 346 |
+
self.conv_layers = nn.ModuleList(conv_layers)
|
| 347 |
+
self.gradient_checkpointing = False
|
| 348 |
+
self._requires_grad = True
|
| 349 |
+
|
| 350 |
+
def _freeze_parameters(self):
|
| 351 |
+
for param in self.parameters():
|
| 352 |
+
param.requires_grad = False
|
| 353 |
+
self._requires_grad = False
|
| 354 |
+
|
| 355 |
+
def forward(self, input_values):
|
| 356 |
+
hidden_states = input_values[:, None]
|
| 357 |
+
|
| 358 |
+
# make sure hidden_states require grad for gradient_checkpointing
|
| 359 |
+
if self._requires_grad and self.training:
|
| 360 |
+
hidden_states.requires_grad = True
|
| 361 |
+
|
| 362 |
+
for conv_layer in self.conv_layers:
|
| 363 |
+
if self._requires_grad and self.gradient_checkpointing and self.training:
|
| 364 |
+
hidden_states = self._gradient_checkpointing_func(
|
| 365 |
+
conv_layer.__call__,
|
| 366 |
+
hidden_states,
|
| 367 |
+
)
|
| 368 |
+
else:
|
| 369 |
+
hidden_states = conv_layer(hidden_states)
|
| 370 |
+
|
| 371 |
+
return hidden_states
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
class HubertFeatureExtractor(HubertFeatureEncoder):
|
| 375 |
+
def __init__(self, config):
|
| 376 |
+
super().__init__(config)
|
| 377 |
+
warnings.warn(
|
| 378 |
+
f"The class `{self.__class__.__name__}` has been depreciated "
|
| 379 |
+
"and will be removed in Transformers v5. "
|
| 380 |
+
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
|
| 381 |
+
FutureWarning,
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
class HubertFeatureProjection(nn.Module):
|
| 386 |
+
def __init__(self, config):
|
| 387 |
+
super().__init__()
|
| 388 |
+
self.feat_proj_layer_norm = config.feat_proj_layer_norm
|
| 389 |
+
if self.feat_proj_layer_norm:
|
| 390 |
+
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
|
| 391 |
+
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
|
| 392 |
+
self.dropout = nn.Dropout(config.feat_proj_dropout)
|
| 393 |
+
|
| 394 |
+
def forward(self, hidden_states):
|
| 395 |
+
# non-projected hidden states are needed for quantization
|
| 396 |
+
if self.feat_proj_layer_norm:
|
| 397 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 398 |
+
hidden_states = self.projection(hidden_states)
|
| 399 |
+
hidden_states = self.dropout(hidden_states)
|
| 400 |
+
return hidden_states
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Hubert
|
| 404 |
+
class HubertAttention(nn.Module):
|
| 405 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 406 |
+
|
| 407 |
+
def __init__(
|
| 408 |
+
self,
|
| 409 |
+
embed_dim: int,
|
| 410 |
+
num_heads: int,
|
| 411 |
+
dropout: float = 0.0,
|
| 412 |
+
is_decoder: bool = False,
|
| 413 |
+
bias: bool = True,
|
| 414 |
+
is_causal: bool = False,
|
| 415 |
+
config: Optional[HubertConfig] = None,
|
| 416 |
+
):
|
| 417 |
+
super().__init__()
|
| 418 |
+
self.embed_dim = embed_dim
|
| 419 |
+
self.num_heads = num_heads
|
| 420 |
+
self.dropout = dropout
|
| 421 |
+
self.head_dim = embed_dim // num_heads
|
| 422 |
+
self.config = config
|
| 423 |
+
|
| 424 |
+
if (self.head_dim * num_heads) != self.embed_dim:
|
| 425 |
+
raise ValueError(
|
| 426 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
|
| 427 |
+
f" and `num_heads`: {num_heads})."
|
| 428 |
+
)
|
| 429 |
+
self.scaling = self.head_dim**-0.5
|
| 430 |
+
self.is_decoder = is_decoder
|
| 431 |
+
self.is_causal = is_causal
|
| 432 |
+
|
| 433 |
+
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 434 |
+
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 435 |
+
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 436 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 437 |
+
|
| 438 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 439 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 440 |
+
|
| 441 |
+
def forward(
|
| 442 |
+
self,
|
| 443 |
+
hidden_states: torch.Tensor,
|
| 444 |
+
key_value_states: Optional[torch.Tensor] = None,
|
| 445 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 446 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 447 |
+
layer_head_mask: Optional[torch.Tensor] = None,
|
| 448 |
+
output_attentions: bool = False,
|
| 449 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 450 |
+
"""Input shape: Batch x Time x Channel"""
|
| 451 |
+
|
| 452 |
+
# if key_value_states are provided this layer is used as a cross-attention layer
|
| 453 |
+
# for the decoder
|
| 454 |
+
is_cross_attention = key_value_states is not None
|
| 455 |
+
|
| 456 |
+
bsz, tgt_len, _ = hidden_states.size()
|
| 457 |
+
|
| 458 |
+
# get query proj
|
| 459 |
+
query_states = self.q_proj(hidden_states) * self.scaling
|
| 460 |
+
# get key, value proj
|
| 461 |
+
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
|
| 462 |
+
# is checking that the `sequence_length` of the `past_key_value` is the same as
|
| 463 |
+
# the provided `key_value_states` to support prefix tuning
|
| 464 |
+
if (
|
| 465 |
+
is_cross_attention
|
| 466 |
+
and past_key_value is not None
|
| 467 |
+
and past_key_value[0].shape[2] == key_value_states.shape[1]
|
| 468 |
+
):
|
| 469 |
+
# reuse k,v, cross_attentions
|
| 470 |
+
key_states = past_key_value[0]
|
| 471 |
+
value_states = past_key_value[1]
|
| 472 |
+
elif is_cross_attention:
|
| 473 |
+
# cross_attentions
|
| 474 |
+
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
|
| 475 |
+
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
|
| 476 |
+
elif past_key_value is not None:
|
| 477 |
+
# reuse k, v, self_attention
|
| 478 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 479 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 480 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
| 481 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
| 482 |
+
else:
|
| 483 |
+
# self_attention
|
| 484 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 485 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 486 |
+
|
| 487 |
+
if self.is_decoder:
|
| 488 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
| 489 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
| 490 |
+
# key/value_states (first "if" case)
|
| 491 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
| 492 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
| 493 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
| 494 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
| 495 |
+
past_key_value = (key_states, value_states)
|
| 496 |
+
|
| 497 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
| 498 |
+
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
| 499 |
+
key_states = key_states.reshape(*proj_shape)
|
| 500 |
+
value_states = value_states.reshape(*proj_shape)
|
| 501 |
+
|
| 502 |
+
src_len = key_states.size(1)
|
| 503 |
+
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
| 504 |
+
|
| 505 |
+
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
| 506 |
+
raise ValueError(
|
| 507 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
| 508 |
+
f" {attn_weights.size()}"
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
if attention_mask is not None:
|
| 512 |
+
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
| 513 |
+
raise ValueError(
|
| 514 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
| 515 |
+
)
|
| 516 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
| 517 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 518 |
+
|
| 519 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
| 520 |
+
|
| 521 |
+
if layer_head_mask is not None:
|
| 522 |
+
if layer_head_mask.size() != (self.num_heads,):
|
| 523 |
+
raise ValueError(
|
| 524 |
+
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
|
| 525 |
+
f" {layer_head_mask.size()}"
|
| 526 |
+
)
|
| 527 |
+
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 528 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 529 |
+
|
| 530 |
+
if output_attentions:
|
| 531 |
+
# this operation is a bit awkward, but it's required to
|
| 532 |
+
# make sure that attn_weights keeps its gradient.
|
| 533 |
+
# In order to do so, attn_weights have to be reshaped
|
| 534 |
+
# twice and have to be reused in the following
|
| 535 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 536 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
| 537 |
+
else:
|
| 538 |
+
attn_weights_reshaped = None
|
| 539 |
+
|
| 540 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
| 541 |
+
|
| 542 |
+
attn_output = torch.bmm(attn_probs, value_states)
|
| 543 |
+
|
| 544 |
+
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
| 545 |
+
raise ValueError(
|
| 546 |
+
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
|
| 547 |
+
f" {attn_output.size()}"
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
| 551 |
+
attn_output = attn_output.transpose(1, 2)
|
| 552 |
+
|
| 553 |
+
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
|
| 554 |
+
# partitioned across GPUs when using tensor-parallelism.
|
| 555 |
+
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
| 556 |
+
|
| 557 |
+
attn_output = self.out_proj(attn_output)
|
| 558 |
+
|
| 559 |
+
return attn_output, attn_weights_reshaped, past_key_value
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
# Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->Hubert
|
| 563 |
+
class HubertFlashAttention2(HubertAttention):
|
| 564 |
+
"""
|
| 565 |
+
Hubert flash attention module. This module inherits from `HubertAttention` as the weights of the module stays
|
| 566 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 567 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 568 |
+
"""
|
| 569 |
+
|
| 570 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
| 571 |
+
def __init__(self, *args, **kwargs):
|
| 572 |
+
super().__init__(*args, **kwargs)
|
| 573 |
+
|
| 574 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 575 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 576 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 577 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 578 |
+
|
| 579 |
+
def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 580 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
|
| 581 |
+
|
| 582 |
+
def forward(
|
| 583 |
+
self,
|
| 584 |
+
hidden_states: torch.Tensor,
|
| 585 |
+
key_value_states: Optional[torch.Tensor] = None,
|
| 586 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 587 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 588 |
+
layer_head_mask: Optional[torch.Tensor] = None,
|
| 589 |
+
output_attentions: bool = False,
|
| 590 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 591 |
+
# HubertFlashAttention2 attention does not support output_attentions
|
| 592 |
+
if output_attentions:
|
| 593 |
+
raise ValueError("HubertFlashAttention2 attention does not support output_attentions")
|
| 594 |
+
|
| 595 |
+
# if key_value_states are provided this layer is used as a cross-attention layer
|
| 596 |
+
# for the decoder
|
| 597 |
+
is_cross_attention = key_value_states is not None
|
| 598 |
+
|
| 599 |
+
bsz, q_len, _ = hidden_states.size()
|
| 600 |
+
|
| 601 |
+
# get query proj
|
| 602 |
+
query_states = self._reshape(self.q_proj(hidden_states), -1, bsz)
|
| 603 |
+
# get key, value proj
|
| 604 |
+
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
|
| 605 |
+
# is checking that the `sequence_length` of the `past_key_value` is the same as
|
| 606 |
+
# the provided `key_value_states` to support prefix tuning
|
| 607 |
+
if (
|
| 608 |
+
is_cross_attention
|
| 609 |
+
and past_key_value is not None
|
| 610 |
+
and past_key_value[0].shape[2] == key_value_states.shape[1]
|
| 611 |
+
):
|
| 612 |
+
# reuse k,v, cross_attentions
|
| 613 |
+
key_states = past_key_value[0].transpose(1, 2)
|
| 614 |
+
value_states = past_key_value[1].transpose(1, 2)
|
| 615 |
+
elif is_cross_attention:
|
| 616 |
+
# cross_attentions
|
| 617 |
+
key_states = self._reshape(self.k_proj(key_value_states), -1, bsz)
|
| 618 |
+
value_states = self._reshape(self.v_proj(key_value_states), -1, bsz)
|
| 619 |
+
elif past_key_value is not None:
|
| 620 |
+
# reuse k, v, self_attention
|
| 621 |
+
key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
|
| 622 |
+
value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
|
| 623 |
+
key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1)
|
| 624 |
+
value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1)
|
| 625 |
+
else:
|
| 626 |
+
# self_attention
|
| 627 |
+
key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
|
| 628 |
+
value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
|
| 629 |
+
|
| 630 |
+
if self.is_decoder:
|
| 631 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
| 632 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
| 633 |
+
# key/value_states (first "if" case)
|
| 634 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
| 635 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
| 636 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
| 637 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
| 638 |
+
past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2))
|
| 639 |
+
|
| 640 |
+
kv_seq_len = key_states.shape[-2]
|
| 641 |
+
if past_key_value is not None:
|
| 642 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
| 643 |
+
|
| 644 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 645 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 646 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
| 647 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
| 648 |
+
# in fp32. (LlamaRMSNorm handles it correctly)
|
| 649 |
+
|
| 650 |
+
input_dtype = query_states.dtype
|
| 651 |
+
if input_dtype == torch.float32:
|
| 652 |
+
if torch.is_autocast_enabled():
|
| 653 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 654 |
+
# Handle the case where the model is quantized
|
| 655 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 656 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 657 |
+
else:
|
| 658 |
+
target_dtype = self.q_proj.weight.dtype
|
| 659 |
+
|
| 660 |
+
logger.warning_once(
|
| 661 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 662 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 663 |
+
f" {target_dtype}."
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
query_states = query_states.to(target_dtype)
|
| 667 |
+
key_states = key_states.to(target_dtype)
|
| 668 |
+
value_states = value_states.to(target_dtype)
|
| 669 |
+
|
| 670 |
+
attn_output = self._flash_attention_forward(
|
| 671 |
+
query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout
|
| 672 |
+
)
|
| 673 |
+
|
| 674 |
+
attn_output = attn_output.reshape(bsz, q_len, -1)
|
| 675 |
+
attn_output = self.out_proj(attn_output)
|
| 676 |
+
|
| 677 |
+
if not output_attentions:
|
| 678 |
+
attn_weights = None
|
| 679 |
+
|
| 680 |
+
return attn_output, attn_weights, past_key_value
|
| 681 |
+
|
| 682 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
|
| 683 |
+
def _flash_attention_forward(
|
| 684 |
+
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
| 685 |
+
):
|
| 686 |
+
"""
|
| 687 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
| 688 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
| 689 |
+
Args:
|
| 690 |
+
query_states (`torch.Tensor`):
|
| 691 |
+
Input query states to be passed to Flash Attention API
|
| 692 |
+
key_states (`torch.Tensor`):
|
| 693 |
+
Input key states to be passed to Flash Attention API
|
| 694 |
+
value_states (`torch.Tensor`):
|
| 695 |
+
Input value states to be passed to Flash Attention API
|
| 696 |
+
attention_mask (`torch.Tensor`):
|
| 697 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 698 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
| 699 |
+
dropout (`float`):
|
| 700 |
+
Attention dropout
|
| 701 |
+
softmax_scale (`float`, *optional*):
|
| 702 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 703 |
+
"""
|
| 704 |
+
if not self._flash_attn_uses_top_left_mask:
|
| 705 |
+
causal = self.is_causal
|
| 706 |
+
else:
|
| 707 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
| 708 |
+
causal = self.is_causal and query_length != 1
|
| 709 |
+
|
| 710 |
+
# Contains at least one padding token in the sequence
|
| 711 |
+
if attention_mask is not None:
|
| 712 |
+
batch_size = query_states.shape[0]
|
| 713 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 714 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 718 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 719 |
+
|
| 720 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 721 |
+
query_states,
|
| 722 |
+
key_states,
|
| 723 |
+
value_states,
|
| 724 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 725 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 726 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 727 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 728 |
+
dropout_p=dropout,
|
| 729 |
+
softmax_scale=softmax_scale,
|
| 730 |
+
causal=causal,
|
| 731 |
+
)
|
| 732 |
+
|
| 733 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 734 |
+
else:
|
| 735 |
+
attn_output = flash_attn_func(
|
| 736 |
+
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
| 737 |
+
)
|
| 738 |
+
|
| 739 |
+
return attn_output
|
| 740 |
+
|
| 741 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
|
| 742 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
| 743 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
| 744 |
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
| 745 |
+
|
| 746 |
+
key_layer = index_first_axis(
|
| 747 |
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
| 748 |
+
)
|
| 749 |
+
value_layer = index_first_axis(
|
| 750 |
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
| 751 |
+
)
|
| 752 |
+
if query_length == kv_seq_len:
|
| 753 |
+
query_layer = index_first_axis(
|
| 754 |
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
| 755 |
+
)
|
| 756 |
+
cu_seqlens_q = cu_seqlens_k
|
| 757 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
| 758 |
+
indices_q = indices_k
|
| 759 |
+
elif query_length == 1:
|
| 760 |
+
max_seqlen_in_batch_q = 1
|
| 761 |
+
cu_seqlens_q = torch.arange(
|
| 762 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
| 763 |
+
) # There is a memcpy here, that is very bad.
|
| 764 |
+
indices_q = cu_seqlens_q[:-1]
|
| 765 |
+
query_layer = query_layer.squeeze(1)
|
| 766 |
+
else:
|
| 767 |
+
# The -q_len: slice assumes left padding.
|
| 768 |
+
attention_mask = attention_mask[:, -query_length:]
|
| 769 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
| 770 |
+
|
| 771 |
+
return (
|
| 772 |
+
query_layer,
|
| 773 |
+
key_layer,
|
| 774 |
+
value_layer,
|
| 775 |
+
indices_q,
|
| 776 |
+
(cu_seqlens_q, cu_seqlens_k),
|
| 777 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
| 778 |
+
)
|
| 779 |
+
|
| 780 |
+
|
| 781 |
+
class HubertSdpaAttention(HubertAttention):
|
| 782 |
+
# Copied from transformers.models.bart.modeling_bart.BartSdpaAttention.forward with Bart->Hubert
|
| 783 |
+
def forward(
|
| 784 |
+
self,
|
| 785 |
+
hidden_states: torch.Tensor,
|
| 786 |
+
key_value_states: Optional[torch.Tensor] = None,
|
| 787 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 788 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 789 |
+
layer_head_mask: Optional[torch.Tensor] = None,
|
| 790 |
+
output_attentions: bool = False,
|
| 791 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 792 |
+
"""Input shape: Batch x Time x Channel"""
|
| 793 |
+
if output_attentions or layer_head_mask is not None:
|
| 794 |
+
# TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented.
|
| 795 |
+
logger.warning_once(
|
| 796 |
+
"HubertModel is using HubertSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention"
|
| 797 |
+
' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 798 |
+
)
|
| 799 |
+
return super().forward(
|
| 800 |
+
hidden_states,
|
| 801 |
+
key_value_states=key_value_states,
|
| 802 |
+
past_key_value=past_key_value,
|
| 803 |
+
attention_mask=attention_mask,
|
| 804 |
+
layer_head_mask=layer_head_mask,
|
| 805 |
+
output_attentions=output_attentions,
|
| 806 |
+
)
|
| 807 |
+
|
| 808 |
+
# if key_value_states are provided this layer is used as a cross-attention layer
|
| 809 |
+
# for the decoder
|
| 810 |
+
is_cross_attention = key_value_states is not None
|
| 811 |
+
|
| 812 |
+
bsz, tgt_len, _ = hidden_states.size()
|
| 813 |
+
|
| 814 |
+
# get query proj
|
| 815 |
+
query_states = self.q_proj(hidden_states)
|
| 816 |
+
# get key, value proj
|
| 817 |
+
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
|
| 818 |
+
# is checking that the `sequence_length` of the `past_key_value` is the same as
|
| 819 |
+
# the provided `key_value_states` to support prefix tuning
|
| 820 |
+
if (
|
| 821 |
+
is_cross_attention
|
| 822 |
+
and past_key_value is not None
|
| 823 |
+
and past_key_value[0].shape[2] == key_value_states.shape[1]
|
| 824 |
+
):
|
| 825 |
+
# reuse k,v, cross_attentions
|
| 826 |
+
key_states = past_key_value[0]
|
| 827 |
+
value_states = past_key_value[1]
|
| 828 |
+
elif is_cross_attention:
|
| 829 |
+
# cross_attentions
|
| 830 |
+
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
|
| 831 |
+
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
|
| 832 |
+
elif past_key_value is not None:
|
| 833 |
+
# reuse k, v, self_attention
|
| 834 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 835 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 836 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
| 837 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
| 838 |
+
else:
|
| 839 |
+
# self_attention
|
| 840 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 841 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 842 |
+
|
| 843 |
+
if self.is_decoder:
|
| 844 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
| 845 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
| 846 |
+
# key/value_states (first "if" case)
|
| 847 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
| 848 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
| 849 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
| 850 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
| 851 |
+
past_key_value = (key_states, value_states)
|
| 852 |
+
|
| 853 |
+
query_states = self._shape(query_states, tgt_len, bsz)
|
| 854 |
+
|
| 855 |
+
# NOTE: SDPA with memory-efficient backend is currently (torch==2.1.2) bugged when using non-contiguous inputs and a custom attn_mask,
|
| 856 |
+
# but we are fine here as `_shape` do call `.contiguous()`. Reference: https://github.com/pytorch/pytorch/issues/112577
|
| 857 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 858 |
+
query_states,
|
| 859 |
+
key_states,
|
| 860 |
+
value_states,
|
| 861 |
+
attn_mask=attention_mask,
|
| 862 |
+
dropout_p=self.dropout if self.training else 0.0,
|
| 863 |
+
# The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1.
|
| 864 |
+
is_causal=self.is_causal and attention_mask is None and tgt_len > 1,
|
| 865 |
+
)
|
| 866 |
+
|
| 867 |
+
if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
|
| 868 |
+
raise ValueError(
|
| 869 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
| 870 |
+
f" {attn_output.size()}"
|
| 871 |
+
)
|
| 872 |
+
|
| 873 |
+
attn_output = attn_output.transpose(1, 2)
|
| 874 |
+
|
| 875 |
+
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
|
| 876 |
+
# partitioned across GPUs when using tensor-parallelism.
|
| 877 |
+
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
| 878 |
+
|
| 879 |
+
attn_output = self.out_proj(attn_output)
|
| 880 |
+
|
| 881 |
+
return attn_output, None, past_key_value
|
| 882 |
+
|
| 883 |
+
|
| 884 |
+
HUBERT_ATTENTION_CLASSES = {
|
| 885 |
+
"eager": HubertAttention,
|
| 886 |
+
"sdpa": HubertSdpaAttention,
|
| 887 |
+
"flash_attention_2": HubertFlashAttention2,
|
| 888 |
+
}
|
| 889 |
+
|
| 890 |
+
|
| 891 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->Hubert
|
| 892 |
+
class HubertFeedForward(nn.Module):
|
| 893 |
+
def __init__(self, config):
|
| 894 |
+
super().__init__()
|
| 895 |
+
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
|
| 896 |
+
|
| 897 |
+
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 898 |
+
if isinstance(config.hidden_act, str):
|
| 899 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 900 |
+
else:
|
| 901 |
+
self.intermediate_act_fn = config.hidden_act
|
| 902 |
+
|
| 903 |
+
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 904 |
+
self.output_dropout = nn.Dropout(config.hidden_dropout)
|
| 905 |
+
|
| 906 |
+
def forward(self, hidden_states):
|
| 907 |
+
hidden_states = self.intermediate_dense(hidden_states)
|
| 908 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 909 |
+
hidden_states = self.intermediate_dropout(hidden_states)
|
| 910 |
+
|
| 911 |
+
hidden_states = self.output_dense(hidden_states)
|
| 912 |
+
hidden_states = self.output_dropout(hidden_states)
|
| 913 |
+
return hidden_states
|
| 914 |
+
|
| 915 |
+
|
| 916 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->Hubert, WAV2VEC2->HUBERT
|
| 917 |
+
class HubertEncoderLayer(nn.Module):
|
| 918 |
+
def __init__(self, config):
|
| 919 |
+
super().__init__()
|
| 920 |
+
self.attention = HUBERT_ATTENTION_CLASSES[config._attn_implementation](
|
| 921 |
+
embed_dim=config.hidden_size,
|
| 922 |
+
num_heads=config.num_attention_heads,
|
| 923 |
+
dropout=config.attention_dropout,
|
| 924 |
+
is_decoder=False,
|
| 925 |
+
)
|
| 926 |
+
|
| 927 |
+
self.dropout = nn.Dropout(config.hidden_dropout)
|
| 928 |
+
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 929 |
+
self.feed_forward = HubertFeedForward(config)
|
| 930 |
+
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 931 |
+
|
| 932 |
+
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
|
| 933 |
+
attn_residual = hidden_states
|
| 934 |
+
hidden_states, attn_weights, _ = self.attention(
|
| 935 |
+
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
|
| 936 |
+
)
|
| 937 |
+
hidden_states = self.dropout(hidden_states)
|
| 938 |
+
hidden_states = attn_residual + hidden_states
|
| 939 |
+
|
| 940 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 941 |
+
hidden_states = hidden_states + self.feed_forward(hidden_states)
|
| 942 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
| 943 |
+
|
| 944 |
+
outputs = (hidden_states,)
|
| 945 |
+
|
| 946 |
+
if output_attentions:
|
| 947 |
+
outputs += (attn_weights,)
|
| 948 |
+
|
| 949 |
+
return outputs
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AttnAdapterLayer with Wav2Vec2->Hubert
|
| 953 |
+
class HubertAttnAdapterLayer(nn.Module):
|
| 954 |
+
def __init__(self, config):
|
| 955 |
+
"""
|
| 956 |
+
Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
|
| 957 |
+
up training throughput.
|
| 958 |
+
"""
|
| 959 |
+
super().__init__()
|
| 960 |
+
self.input_dim = config.adapter_attn_dim
|
| 961 |
+
self.hidden_dim = config.hidden_size
|
| 962 |
+
|
| 963 |
+
self.norm = nn.LayerNorm(self.hidden_dim)
|
| 964 |
+
self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim)
|
| 965 |
+
self.act_fn = nn.ReLU()
|
| 966 |
+
self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim)
|
| 967 |
+
|
| 968 |
+
def forward(self, hidden_states: torch.FloatTensor):
|
| 969 |
+
hidden_states = self.norm(hidden_states)
|
| 970 |
+
|
| 971 |
+
hidden_states = self.linear_1(hidden_states)
|
| 972 |
+
hidden_states = self.act_fn(hidden_states)
|
| 973 |
+
hidden_states = self.linear_2(hidden_states)
|
| 974 |
+
|
| 975 |
+
return hidden_states
|
| 976 |
+
|
| 977 |
+
|
| 978 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert, WAV2VEC2->HUBERT
|
| 979 |
+
class HubertEncoderLayerStableLayerNorm(nn.Module):
|
| 980 |
+
def __init__(self, config):
|
| 981 |
+
super().__init__()
|
| 982 |
+
self.attention = HUBERT_ATTENTION_CLASSES[config._attn_implementation](
|
| 983 |
+
embed_dim=config.hidden_size,
|
| 984 |
+
num_heads=config.num_attention_heads,
|
| 985 |
+
dropout=config.attention_dropout,
|
| 986 |
+
is_decoder=False,
|
| 987 |
+
)
|
| 988 |
+
self.dropout = nn.Dropout(config.hidden_dropout)
|
| 989 |
+
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 990 |
+
self.feed_forward = HubertFeedForward(config)
|
| 991 |
+
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 992 |
+
|
| 993 |
+
if getattr(config, "adapter_attn_dim", None) is not None:
|
| 994 |
+
self.adapter_layer = HubertAttnAdapterLayer(config)
|
| 995 |
+
else:
|
| 996 |
+
self.adapter_layer = None
|
| 997 |
+
|
| 998 |
+
def forward(
|
| 999 |
+
self,
|
| 1000 |
+
hidden_states: torch.Tensor,
|
| 1001 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1002 |
+
output_attentions: bool = False,
|
| 1003 |
+
):
|
| 1004 |
+
attn_residual = hidden_states
|
| 1005 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 1006 |
+
hidden_states, attn_weights, _ = self.attention(
|
| 1007 |
+
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
|
| 1008 |
+
)
|
| 1009 |
+
hidden_states = self.dropout(hidden_states)
|
| 1010 |
+
hidden_states = attn_residual + hidden_states
|
| 1011 |
+
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
|
| 1012 |
+
|
| 1013 |
+
if self.adapter_layer is not None:
|
| 1014 |
+
hidden_states = hidden_states + self.adapter_layer(hidden_states)
|
| 1015 |
+
|
| 1016 |
+
outputs = (hidden_states,)
|
| 1017 |
+
|
| 1018 |
+
if output_attentions:
|
| 1019 |
+
outputs += (attn_weights,)
|
| 1020 |
+
|
| 1021 |
+
return outputs
|
| 1022 |
+
|
| 1023 |
+
|
| 1024 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->Hubert
|
| 1025 |
+
class HubertEncoder(nn.Module):
|
| 1026 |
+
def __init__(self, config):
|
| 1027 |
+
super().__init__()
|
| 1028 |
+
self.config = config
|
| 1029 |
+
self.pos_conv_embed = HubertPositionalConvEmbedding(config)
|
| 1030 |
+
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 1031 |
+
self.dropout = nn.Dropout(config.hidden_dropout)
|
| 1032 |
+
self.layers = nn.ModuleList([HubertEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 1033 |
+
self.gradient_checkpointing = False
|
| 1034 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 1035 |
+
|
| 1036 |
+
def forward(
|
| 1037 |
+
self,
|
| 1038 |
+
hidden_states: torch.tensor,
|
| 1039 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1040 |
+
output_attentions: bool = False,
|
| 1041 |
+
output_hidden_states: bool = False,
|
| 1042 |
+
return_dict: bool = True,
|
| 1043 |
+
):
|
| 1044 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1045 |
+
all_self_attentions = () if output_attentions else None
|
| 1046 |
+
|
| 1047 |
+
if attention_mask is not None:
|
| 1048 |
+
# make sure padded tokens output 0
|
| 1049 |
+
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
|
| 1050 |
+
hidden_states[~expand_attention_mask] = 0
|
| 1051 |
+
if self._use_flash_attention_2:
|
| 1052 |
+
# 2d mask is passed through the layers
|
| 1053 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 1054 |
+
else:
|
| 1055 |
+
# extend attention_mask
|
| 1056 |
+
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
|
| 1057 |
+
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
|
| 1058 |
+
attention_mask = attention_mask.expand(
|
| 1059 |
+
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
|
| 1060 |
+
)
|
| 1061 |
+
|
| 1062 |
+
position_embeddings = self.pos_conv_embed(hidden_states)
|
| 1063 |
+
hidden_states = hidden_states + position_embeddings
|
| 1064 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 1065 |
+
hidden_states = self.dropout(hidden_states)
|
| 1066 |
+
|
| 1067 |
+
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
|
| 1068 |
+
|
| 1069 |
+
for layer in self.layers:
|
| 1070 |
+
if output_hidden_states:
|
| 1071 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1072 |
+
|
| 1073 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
| 1074 |
+
dropout_probability = torch.rand([])
|
| 1075 |
+
|
| 1076 |
+
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
|
| 1077 |
+
if not skip_the_layer or deepspeed_zero3_is_enabled:
|
| 1078 |
+
# under deepspeed zero3 all gpus must run in sync
|
| 1079 |
+
if self.gradient_checkpointing and self.training:
|
| 1080 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 1081 |
+
layer.__call__,
|
| 1082 |
+
hidden_states,
|
| 1083 |
+
attention_mask,
|
| 1084 |
+
output_attentions,
|
| 1085 |
+
)
|
| 1086 |
+
else:
|
| 1087 |
+
layer_outputs = layer(
|
| 1088 |
+
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
|
| 1089 |
+
)
|
| 1090 |
+
hidden_states = layer_outputs[0]
|
| 1091 |
+
|
| 1092 |
+
if skip_the_layer:
|
| 1093 |
+
layer_outputs = (None, None)
|
| 1094 |
+
|
| 1095 |
+
if output_attentions:
|
| 1096 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 1097 |
+
|
| 1098 |
+
if output_hidden_states:
|
| 1099 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1100 |
+
|
| 1101 |
+
if not return_dict:
|
| 1102 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 1103 |
+
return BaseModelOutput(
|
| 1104 |
+
last_hidden_state=hidden_states,
|
| 1105 |
+
hidden_states=all_hidden_states,
|
| 1106 |
+
attentions=all_self_attentions,
|
| 1107 |
+
)
|
| 1108 |
+
|
| 1109 |
+
|
| 1110 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
|
| 1111 |
+
class HubertEncoderStableLayerNorm(nn.Module):
|
| 1112 |
+
def __init__(self, config):
|
| 1113 |
+
super().__init__()
|
| 1114 |
+
self.config = config
|
| 1115 |
+
self.pos_conv_embed = HubertPositionalConvEmbedding(config)
|
| 1116 |
+
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 1117 |
+
self.dropout = nn.Dropout(config.hidden_dropout)
|
| 1118 |
+
self.layers = nn.ModuleList(
|
| 1119 |
+
[HubertEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
|
| 1120 |
+
)
|
| 1121 |
+
self.gradient_checkpointing = False
|
| 1122 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 1123 |
+
|
| 1124 |
+
def forward(
|
| 1125 |
+
self,
|
| 1126 |
+
hidden_states,
|
| 1127 |
+
attention_mask=None,
|
| 1128 |
+
output_attentions=False,
|
| 1129 |
+
output_hidden_states=False,
|
| 1130 |
+
return_dict=True,
|
| 1131 |
+
):
|
| 1132 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1133 |
+
all_self_attentions = () if output_attentions else None
|
| 1134 |
+
|
| 1135 |
+
if attention_mask is not None:
|
| 1136 |
+
# make sure padded tokens are not attended to
|
| 1137 |
+
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
|
| 1138 |
+
hidden_states[~expand_attention_mask] = 0
|
| 1139 |
+
if self._use_flash_attention_2:
|
| 1140 |
+
# 2d mask is passed through the layers
|
| 1141 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 1142 |
+
else:
|
| 1143 |
+
# extend attention_mask
|
| 1144 |
+
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
|
| 1145 |
+
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
|
| 1146 |
+
attention_mask = attention_mask.expand(
|
| 1147 |
+
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
|
| 1148 |
+
)
|
| 1149 |
+
|
| 1150 |
+
position_embeddings = self.pos_conv_embed(hidden_states)
|
| 1151 |
+
hidden_states = hidden_states + position_embeddings
|
| 1152 |
+
hidden_states = self.dropout(hidden_states)
|
| 1153 |
+
|
| 1154 |
+
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
|
| 1155 |
+
|
| 1156 |
+
for layer in self.layers:
|
| 1157 |
+
if output_hidden_states:
|
| 1158 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1159 |
+
|
| 1160 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
| 1161 |
+
dropout_probability = torch.rand([])
|
| 1162 |
+
|
| 1163 |
+
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
|
| 1164 |
+
if not skip_the_layer or deepspeed_zero3_is_enabled:
|
| 1165 |
+
# under deepspeed zero3 all gpus must run in sync
|
| 1166 |
+
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
|
| 1167 |
+
if self.gradient_checkpointing and self.training:
|
| 1168 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 1169 |
+
layer.__call__,
|
| 1170 |
+
hidden_states,
|
| 1171 |
+
attention_mask,
|
| 1172 |
+
output_attentions,
|
| 1173 |
+
)
|
| 1174 |
+
else:
|
| 1175 |
+
layer_outputs = layer(
|
| 1176 |
+
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
|
| 1177 |
+
)
|
| 1178 |
+
hidden_states = layer_outputs[0]
|
| 1179 |
+
|
| 1180 |
+
if skip_the_layer:
|
| 1181 |
+
layer_outputs = (None, None)
|
| 1182 |
+
|
| 1183 |
+
if output_attentions:
|
| 1184 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 1185 |
+
|
| 1186 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 1187 |
+
|
| 1188 |
+
if output_hidden_states:
|
| 1189 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1190 |
+
|
| 1191 |
+
if not return_dict:
|
| 1192 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 1193 |
+
return BaseModelOutput(
|
| 1194 |
+
last_hidden_state=hidden_states,
|
| 1195 |
+
hidden_states=all_hidden_states,
|
| 1196 |
+
attentions=all_self_attentions,
|
| 1197 |
+
)
|
| 1198 |
+
|
| 1199 |
+
|
| 1200 |
+
class HubertPreTrainedModel(PreTrainedModel):
|
| 1201 |
+
"""
|
| 1202 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 1203 |
+
models.
|
| 1204 |
+
"""
|
| 1205 |
+
|
| 1206 |
+
config_class = HubertConfig
|
| 1207 |
+
base_model_prefix = "hubert"
|
| 1208 |
+
main_input_name = "input_values"
|
| 1209 |
+
supports_gradient_checkpointing = True
|
| 1210 |
+
_supports_flash_attn_2 = True
|
| 1211 |
+
_supports_sdpa = True
|
| 1212 |
+
|
| 1213 |
+
def _init_weights(self, module):
|
| 1214 |
+
"""Initialize the weights"""
|
| 1215 |
+
if isinstance(module, nn.Linear):
|
| 1216 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 1217 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 1218 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 1219 |
+
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
|
| 1220 |
+
module.bias.data.zero_()
|
| 1221 |
+
module.weight.data.fill_(1.0)
|
| 1222 |
+
elif isinstance(module, nn.Conv1d):
|
| 1223 |
+
if is_deepspeed_zero3_enabled():
|
| 1224 |
+
import deepspeed
|
| 1225 |
+
|
| 1226 |
+
if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
|
| 1227 |
+
with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
|
| 1228 |
+
nn.init.kaiming_normal_(module.weight.data)
|
| 1229 |
+
else:
|
| 1230 |
+
with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
|
| 1231 |
+
nn.init.kaiming_normal_(module.weight.data)
|
| 1232 |
+
else:
|
| 1233 |
+
nn.init.kaiming_normal_(module.weight.data)
|
| 1234 |
+
|
| 1235 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
|
| 1236 |
+
module.bias.data.zero_()
|
| 1237 |
+
|
| 1238 |
+
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
|
| 1239 |
+
"""
|
| 1240 |
+
Computes the output length of the convolutional layers
|
| 1241 |
+
"""
|
| 1242 |
+
|
| 1243 |
+
def _conv_out_length(input_length, kernel_size, stride):
|
| 1244 |
+
# 1D convolutional layer output length formula taken
|
| 1245 |
+
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
|
| 1246 |
+
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
|
| 1247 |
+
|
| 1248 |
+
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
|
| 1249 |
+
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
|
| 1250 |
+
|
| 1251 |
+
return input_lengths
|
| 1252 |
+
|
| 1253 |
+
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
|
| 1254 |
+
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
|
| 1255 |
+
batch_size = attention_mask.shape[0]
|
| 1256 |
+
|
| 1257 |
+
attention_mask = torch.zeros(
|
| 1258 |
+
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
|
| 1259 |
+
)
|
| 1260 |
+
# these two operations makes sure that all values before the output lengths idxs are attended to
|
| 1261 |
+
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
|
| 1262 |
+
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
|
| 1263 |
+
return attention_mask
|
| 1264 |
+
|
| 1265 |
+
|
| 1266 |
+
HUBERT_START_DOCSTRING = r"""
|
| 1267 |
+
Hubert was proposed in [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden
|
| 1268 |
+
Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia,
|
| 1269 |
+
Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
| 1270 |
+
|
| 1271 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 1272 |
+
library implements for all its model (such as downloading or saving etc.).
|
| 1273 |
+
|
| 1274 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
| 1275 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 1276 |
+
behavior.
|
| 1277 |
+
|
| 1278 |
+
Parameters:
|
| 1279 |
+
config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
|
| 1280 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 1281 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 1282 |
+
"""
|
| 1283 |
+
|
| 1284 |
+
|
| 1285 |
+
HUBERT_INPUTS_DOCSTRING = r"""
|
| 1286 |
+
Args:
|
| 1287 |
+
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
|
| 1288 |
+
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
|
| 1289 |
+
into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
|
| 1290 |
+
soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
|
| 1291 |
+
conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
|
| 1292 |
+
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1293 |
+
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
|
| 1294 |
+
1]`:
|
| 1295 |
+
|
| 1296 |
+
- 1 for tokens that are **not masked**,
|
| 1297 |
+
- 0 for tokens that are **masked**.
|
| 1298 |
+
|
| 1299 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1300 |
+
|
| 1301 |
+
<Tip warning={true}>
|
| 1302 |
+
|
| 1303 |
+
`attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
|
| 1304 |
+
True`. For all models whose processor has `config.return_attention_mask == False`, such as
|
| 1305 |
+
[hubert-base](https://huggingface.co/facebook/hubert-base-ls960), `attention_mask` should **not** be passed
|
| 1306 |
+
to avoid degraded performance when doing batched inference. For such models `input_values` should simply be
|
| 1307 |
+
padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different
|
| 1308 |
+
results depending on whether `input_values` is padded or not.
|
| 1309 |
+
|
| 1310 |
+
</Tip>
|
| 1311 |
+
|
| 1312 |
+
output_attentions (`bool`, *optional*):
|
| 1313 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1314 |
+
tensors for more detail.
|
| 1315 |
+
output_hidden_states (`bool`, *optional*):
|
| 1316 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1317 |
+
more detail.
|
| 1318 |
+
return_dict (`bool`, *optional*):
|
| 1319 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1320 |
+
"""
|
| 1321 |
+
|
| 1322 |
+
|
| 1323 |
+
@add_start_docstrings(
|
| 1324 |
+
"The bare Hubert Model transformer outputting raw hidden-states without any specific head on top.",
|
| 1325 |
+
HUBERT_START_DOCSTRING,
|
| 1326 |
+
)
|
| 1327 |
+
class HubertModel(HubertPreTrainedModel):
|
| 1328 |
+
def __init__(self, config: HubertConfig):
|
| 1329 |
+
super().__init__(config)
|
| 1330 |
+
self.config = config
|
| 1331 |
+
self.feature_extractor = HubertFeatureEncoder(config)
|
| 1332 |
+
self.feature_projection = HubertFeatureProjection(config)
|
| 1333 |
+
|
| 1334 |
+
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
|
| 1335 |
+
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
|
| 1336 |
+
|
| 1337 |
+
if config.do_stable_layer_norm:
|
| 1338 |
+
self.encoder = HubertEncoderStableLayerNorm(config)
|
| 1339 |
+
else:
|
| 1340 |
+
self.encoder = HubertEncoder(config)
|
| 1341 |
+
|
| 1342 |
+
# Initialize weights and apply final processing
|
| 1343 |
+
self.post_init()
|
| 1344 |
+
|
| 1345 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
|
| 1346 |
+
def _mask_hidden_states(
|
| 1347 |
+
self,
|
| 1348 |
+
hidden_states: torch.FloatTensor,
|
| 1349 |
+
mask_time_indices: Optional[torch.FloatTensor] = None,
|
| 1350 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 1351 |
+
):
|
| 1352 |
+
"""
|
| 1353 |
+
Masks extracted features along time axis and/or along feature axis according to
|
| 1354 |
+
[SpecAugment](https://arxiv.org/abs/1904.08779).
|
| 1355 |
+
"""
|
| 1356 |
+
|
| 1357 |
+
# `config.apply_spec_augment` can set masking to False
|
| 1358 |
+
if not getattr(self.config, "apply_spec_augment", True):
|
| 1359 |
+
return hidden_states
|
| 1360 |
+
|
| 1361 |
+
# generate indices & apply SpecAugment along time axis
|
| 1362 |
+
batch_size, sequence_length, hidden_size = hidden_states.size()
|
| 1363 |
+
|
| 1364 |
+
if mask_time_indices is not None:
|
| 1365 |
+
# apply SpecAugment along time axis with given mask_time_indices
|
| 1366 |
+
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
|
| 1367 |
+
elif self.config.mask_time_prob > 0 and self.training:
|
| 1368 |
+
mask_time_indices = _compute_mask_indices(
|
| 1369 |
+
(batch_size, sequence_length),
|
| 1370 |
+
mask_prob=self.config.mask_time_prob,
|
| 1371 |
+
mask_length=self.config.mask_time_length,
|
| 1372 |
+
attention_mask=attention_mask,
|
| 1373 |
+
min_masks=self.config.mask_time_min_masks,
|
| 1374 |
+
)
|
| 1375 |
+
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
|
| 1376 |
+
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
|
| 1377 |
+
|
| 1378 |
+
if self.config.mask_feature_prob > 0 and self.training:
|
| 1379 |
+
# generate indices & apply SpecAugment along feature axis
|
| 1380 |
+
mask_feature_indices = _compute_mask_indices(
|
| 1381 |
+
(batch_size, hidden_size),
|
| 1382 |
+
mask_prob=self.config.mask_feature_prob,
|
| 1383 |
+
mask_length=self.config.mask_feature_length,
|
| 1384 |
+
min_masks=self.config.mask_feature_min_masks,
|
| 1385 |
+
)
|
| 1386 |
+
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
|
| 1387 |
+
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
|
| 1388 |
+
hidden_states[mask_feature_indices] = 0
|
| 1389 |
+
|
| 1390 |
+
return hidden_states
|
| 1391 |
+
|
| 1392 |
+
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
|
| 1393 |
+
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
|
| 1394 |
+
def forward(
|
| 1395 |
+
self,
|
| 1396 |
+
input_values: Optional[torch.Tensor],
|
| 1397 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1398 |
+
mask_time_indices: Optional[torch.FloatTensor] = None,
|
| 1399 |
+
output_attentions: Optional[bool] = None,
|
| 1400 |
+
output_hidden_states: Optional[bool] = None,
|
| 1401 |
+
return_dict: Optional[bool] = None,
|
| 1402 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 1403 |
+
"""
|
| 1404 |
+
|
| 1405 |
+
Returns:
|
| 1406 |
+
|
| 1407 |
+
Example:
|
| 1408 |
+
|
| 1409 |
+
```python
|
| 1410 |
+
>>> from transformers import AutoProcessor, HubertModel
|
| 1411 |
+
>>> from datasets import load_dataset
|
| 1412 |
+
>>> import soundfile as sf
|
| 1413 |
+
|
| 1414 |
+
>>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
|
| 1415 |
+
>>> model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
|
| 1416 |
+
|
| 1417 |
+
|
| 1418 |
+
>>> def map_to_array(batch):
|
| 1419 |
+
... speech, _ = sf.read(batch["file"])
|
| 1420 |
+
... batch["speech"] = speech
|
| 1421 |
+
... return batch
|
| 1422 |
+
|
| 1423 |
+
|
| 1424 |
+
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
| 1425 |
+
>>> ds = ds.map(map_to_array)
|
| 1426 |
+
|
| 1427 |
+
>>> input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1
|
| 1428 |
+
>>> hidden_states = model(input_values).last_hidden_state
|
| 1429 |
+
```"""
|
| 1430 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1431 |
+
output_hidden_states = (
|
| 1432 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1433 |
+
)
|
| 1434 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1435 |
+
|
| 1436 |
+
extract_features = self.feature_extractor(input_values)
|
| 1437 |
+
extract_features = extract_features.transpose(1, 2)
|
| 1438 |
+
|
| 1439 |
+
if attention_mask is not None:
|
| 1440 |
+
# compute reduced attention_mask corresponding to feature vectors
|
| 1441 |
+
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
|
| 1442 |
+
|
| 1443 |
+
hidden_states = self.feature_projection(extract_features)
|
| 1444 |
+
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
|
| 1445 |
+
|
| 1446 |
+
encoder_outputs = self.encoder(
|
| 1447 |
+
hidden_states,
|
| 1448 |
+
attention_mask=attention_mask,
|
| 1449 |
+
output_attentions=output_attentions,
|
| 1450 |
+
output_hidden_states=output_hidden_states,
|
| 1451 |
+
return_dict=return_dict,
|
| 1452 |
+
)
|
| 1453 |
+
|
| 1454 |
+
hidden_states = encoder_outputs[0]
|
| 1455 |
+
|
| 1456 |
+
if not return_dict:
|
| 1457 |
+
return (hidden_states,) + encoder_outputs[1:]
|
| 1458 |
+
|
| 1459 |
+
return BaseModelOutput(
|
| 1460 |
+
last_hidden_state=hidden_states,
|
| 1461 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 1462 |
+
attentions=encoder_outputs.attentions,
|
| 1463 |
+
)
|
| 1464 |
+
|
| 1465 |
+
|
| 1466 |
+
@add_start_docstrings(
|
| 1467 |
+
"""Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
|
| 1468 |
+
HUBERT_START_DOCSTRING,
|
| 1469 |
+
)
|
| 1470 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->Hubert, wav2vec2->hubert, WAV_2_VEC_2->HUBERT
|
| 1471 |
+
class HubertForCTC(HubertPreTrainedModel):
|
| 1472 |
+
def __init__(self, config, target_lang: Optional[str] = None):
|
| 1473 |
+
super().__init__(config)
|
| 1474 |
+
|
| 1475 |
+
self.hubert = HubertModel(config)
|
| 1476 |
+
self.dropout = nn.Dropout(config.final_dropout)
|
| 1477 |
+
|
| 1478 |
+
self.target_lang = target_lang
|
| 1479 |
+
|
| 1480 |
+
if config.vocab_size is None:
|
| 1481 |
+
raise ValueError(
|
| 1482 |
+
f"You are trying to instantiate {self.__class__} with a configuration that "
|
| 1483 |
+
"does not define the vocabulary size of the language model head. Please "
|
| 1484 |
+
"instantiate the model as follows: `HubertForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
|
| 1485 |
+
"or define `vocab_size` of your model's configuration."
|
| 1486 |
+
)
|
| 1487 |
+
output_hidden_size = (
|
| 1488 |
+
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
|
| 1489 |
+
)
|
| 1490 |
+
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
|
| 1491 |
+
|
| 1492 |
+
# Initialize weights and apply final processing
|
| 1493 |
+
self.post_init()
|
| 1494 |
+
|
| 1495 |
+
def tie_weights(self):
|
| 1496 |
+
"""
|
| 1497 |
+
This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
|
| 1498 |
+
passing `target_lang=...` to `from_pretrained(...)`.
|
| 1499 |
+
|
| 1500 |
+
This method is **not** supposed to be called by the user and is prone to be changed in the future.
|
| 1501 |
+
"""
|
| 1502 |
+
|
| 1503 |
+
# Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to
|
| 1504 |
+
# correctly load adapter layers for Hubert so that we do not have to introduce a new API to
|
| 1505 |
+
# [`PreTrainedModel`]. While slightly hacky, Hubert never has to tie input and output embeddings, so that it is
|
| 1506 |
+
# ok to repurpose this function here.
|
| 1507 |
+
target_lang = self.target_lang
|
| 1508 |
+
|
| 1509 |
+
if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
|
| 1510 |
+
raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
|
| 1511 |
+
elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
|
| 1512 |
+
logger.info("By default `target_lang` is set to 'eng'.")
|
| 1513 |
+
elif target_lang is not None:
|
| 1514 |
+
self.load_adapter(target_lang, force_load=True)
|
| 1515 |
+
|
| 1516 |
+
def freeze_feature_extractor(self):
|
| 1517 |
+
"""
|
| 1518 |
+
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
|
| 1519 |
+
not be updated during training.
|
| 1520 |
+
"""
|
| 1521 |
+
warnings.warn(
|
| 1522 |
+
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
|
| 1523 |
+
"Please use the equivalent `freeze_feature_encoder` method instead.",
|
| 1524 |
+
FutureWarning,
|
| 1525 |
+
)
|
| 1526 |
+
self.freeze_feature_encoder()
|
| 1527 |
+
|
| 1528 |
+
def freeze_feature_encoder(self):
|
| 1529 |
+
"""
|
| 1530 |
+
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
|
| 1531 |
+
not be updated during training.
|
| 1532 |
+
"""
|
| 1533 |
+
self.hubert.feature_extractor._freeze_parameters()
|
| 1534 |
+
|
| 1535 |
+
def freeze_base_model(self):
|
| 1536 |
+
"""
|
| 1537 |
+
Calling this function will disable the gradient computation for the base model so that its parameters will not
|
| 1538 |
+
be updated during training. Only the classification head will be updated.
|
| 1539 |
+
"""
|
| 1540 |
+
for param in self.hubert.parameters():
|
| 1541 |
+
param.requires_grad = False
|
| 1542 |
+
|
| 1543 |
+
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
|
| 1544 |
+
@add_code_sample_docstrings(
|
| 1545 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1546 |
+
output_type=CausalLMOutput,
|
| 1547 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1548 |
+
expected_output=_CTC_EXPECTED_OUTPUT,
|
| 1549 |
+
expected_loss=_CTC_EXPECTED_LOSS,
|
| 1550 |
+
)
|
| 1551 |
+
def forward(
|
| 1552 |
+
self,
|
| 1553 |
+
input_values: Optional[torch.Tensor],
|
| 1554 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1555 |
+
output_attentions: Optional[bool] = None,
|
| 1556 |
+
output_hidden_states: Optional[bool] = None,
|
| 1557 |
+
return_dict: Optional[bool] = None,
|
| 1558 |
+
labels: Optional[torch.Tensor] = None,
|
| 1559 |
+
) -> Union[Tuple, CausalLMOutput]:
|
| 1560 |
+
r"""
|
| 1561 |
+
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
|
| 1562 |
+
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
|
| 1563 |
+
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
|
| 1564 |
+
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
|
| 1565 |
+
config.vocab_size - 1]`.
|
| 1566 |
+
"""
|
| 1567 |
+
|
| 1568 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1569 |
+
|
| 1570 |
+
outputs = self.hubert(
|
| 1571 |
+
input_values,
|
| 1572 |
+
attention_mask=attention_mask,
|
| 1573 |
+
output_attentions=output_attentions,
|
| 1574 |
+
output_hidden_states=output_hidden_states,
|
| 1575 |
+
return_dict=return_dict,
|
| 1576 |
+
)
|
| 1577 |
+
|
| 1578 |
+
hidden_states = outputs[0]
|
| 1579 |
+
hidden_states = self.dropout(hidden_states)
|
| 1580 |
+
|
| 1581 |
+
logits = self.lm_head(hidden_states)
|
| 1582 |
+
|
| 1583 |
+
loss = None
|
| 1584 |
+
if labels is not None:
|
| 1585 |
+
if labels.max() >= self.config.vocab_size:
|
| 1586 |
+
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
|
| 1587 |
+
|
| 1588 |
+
# retrieve loss input_lengths from attention_mask
|
| 1589 |
+
attention_mask = (
|
| 1590 |
+
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
|
| 1591 |
+
)
|
| 1592 |
+
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
|
| 1593 |
+
|
| 1594 |
+
# assuming that padded tokens are filled with -100
|
| 1595 |
+
# when not being attended to
|
| 1596 |
+
labels_mask = labels >= 0
|
| 1597 |
+
target_lengths = labels_mask.sum(-1)
|
| 1598 |
+
flattened_targets = labels.masked_select(labels_mask)
|
| 1599 |
+
|
| 1600 |
+
# ctc_loss doesn't support fp16
|
| 1601 |
+
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
|
| 1602 |
+
|
| 1603 |
+
with torch.backends.cudnn.flags(enabled=False):
|
| 1604 |
+
loss = nn.functional.ctc_loss(
|
| 1605 |
+
log_probs,
|
| 1606 |
+
flattened_targets,
|
| 1607 |
+
input_lengths,
|
| 1608 |
+
target_lengths,
|
| 1609 |
+
blank=self.config.pad_token_id,
|
| 1610 |
+
reduction=self.config.ctc_loss_reduction,
|
| 1611 |
+
zero_infinity=self.config.ctc_zero_infinity,
|
| 1612 |
+
)
|
| 1613 |
+
|
| 1614 |
+
if not return_dict:
|
| 1615 |
+
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
|
| 1616 |
+
return ((loss,) + output) if loss is not None else output
|
| 1617 |
+
|
| 1618 |
+
return CausalLMOutput(
|
| 1619 |
+
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
|
| 1620 |
+
)
|
| 1621 |
+
|
| 1622 |
+
|
| 1623 |
+
@add_start_docstrings(
|
| 1624 |
+
"""
|
| 1625 |
+
Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
|
| 1626 |
+
SUPERB Keyword Spotting.
|
| 1627 |
+
""",
|
| 1628 |
+
HUBERT_START_DOCSTRING,
|
| 1629 |
+
)
|
| 1630 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->Hubert, wav2vec2->hubert, WAV_2_VEC_2->HUBERT
|
| 1631 |
+
class HubertForSequenceClassification(HubertPreTrainedModel):
|
| 1632 |
+
def __init__(self, config):
|
| 1633 |
+
super().__init__(config)
|
| 1634 |
+
|
| 1635 |
+
if hasattr(config, "add_adapter") and config.add_adapter:
|
| 1636 |
+
raise ValueError(
|
| 1637 |
+
"Sequence classification does not support the use of Hubert adapters (config.add_adapter=True)"
|
| 1638 |
+
)
|
| 1639 |
+
self.hubert = HubertModel(config)
|
| 1640 |
+
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
|
| 1641 |
+
if config.use_weighted_layer_sum:
|
| 1642 |
+
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
|
| 1643 |
+
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
|
| 1644 |
+
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
|
| 1645 |
+
|
| 1646 |
+
# Initialize weights and apply final processing
|
| 1647 |
+
self.post_init()
|
| 1648 |
+
|
| 1649 |
+
def freeze_feature_extractor(self):
|
| 1650 |
+
"""
|
| 1651 |
+
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
|
| 1652 |
+
not be updated during training.
|
| 1653 |
+
"""
|
| 1654 |
+
warnings.warn(
|
| 1655 |
+
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
|
| 1656 |
+
"Please use the equivalent `freeze_feature_encoder` method instead.",
|
| 1657 |
+
FutureWarning,
|
| 1658 |
+
)
|
| 1659 |
+
self.freeze_feature_encoder()
|
| 1660 |
+
|
| 1661 |
+
def freeze_feature_encoder(self):
|
| 1662 |
+
"""
|
| 1663 |
+
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
|
| 1664 |
+
not be updated during training.
|
| 1665 |
+
"""
|
| 1666 |
+
self.hubert.feature_extractor._freeze_parameters()
|
| 1667 |
+
|
| 1668 |
+
def freeze_base_model(self):
|
| 1669 |
+
"""
|
| 1670 |
+
Calling this function will disable the gradient computation for the base model so that its parameters will not
|
| 1671 |
+
be updated during training. Only the classification head will be updated.
|
| 1672 |
+
"""
|
| 1673 |
+
for param in self.hubert.parameters():
|
| 1674 |
+
param.requires_grad = False
|
| 1675 |
+
|
| 1676 |
+
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
|
| 1677 |
+
@add_code_sample_docstrings(
|
| 1678 |
+
checkpoint=_SEQ_CLASS_CHECKPOINT,
|
| 1679 |
+
output_type=SequenceClassifierOutput,
|
| 1680 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1681 |
+
modality="audio",
|
| 1682 |
+
expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
|
| 1683 |
+
expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
|
| 1684 |
+
)
|
| 1685 |
+
def forward(
|
| 1686 |
+
self,
|
| 1687 |
+
input_values: Optional[torch.Tensor],
|
| 1688 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1689 |
+
output_attentions: Optional[bool] = None,
|
| 1690 |
+
output_hidden_states: Optional[bool] = None,
|
| 1691 |
+
return_dict: Optional[bool] = None,
|
| 1692 |
+
labels: Optional[torch.Tensor] = None,
|
| 1693 |
+
) -> Union[Tuple, SequenceClassifierOutput]:
|
| 1694 |
+
r"""
|
| 1695 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1696 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1697 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1698 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1699 |
+
"""
|
| 1700 |
+
|
| 1701 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1702 |
+
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
|
| 1703 |
+
|
| 1704 |
+
outputs = self.hubert(
|
| 1705 |
+
input_values,
|
| 1706 |
+
attention_mask=attention_mask,
|
| 1707 |
+
output_attentions=output_attentions,
|
| 1708 |
+
output_hidden_states=output_hidden_states,
|
| 1709 |
+
return_dict=return_dict,
|
| 1710 |
+
)
|
| 1711 |
+
|
| 1712 |
+
if self.config.use_weighted_layer_sum:
|
| 1713 |
+
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
|
| 1714 |
+
hidden_states = torch.stack(hidden_states, dim=1)
|
| 1715 |
+
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
|
| 1716 |
+
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
|
| 1717 |
+
else:
|
| 1718 |
+
hidden_states = outputs[0]
|
| 1719 |
+
|
| 1720 |
+
hidden_states = self.projector(hidden_states)
|
| 1721 |
+
if attention_mask is None:
|
| 1722 |
+
pooled_output = hidden_states.mean(dim=1)
|
| 1723 |
+
else:
|
| 1724 |
+
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
|
| 1725 |
+
hidden_states[~padding_mask] = 0.0
|
| 1726 |
+
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
|
| 1727 |
+
|
| 1728 |
+
logits = self.classifier(pooled_output)
|
| 1729 |
+
|
| 1730 |
+
loss = None
|
| 1731 |
+
if labels is not None:
|
| 1732 |
+
loss_fct = CrossEntropyLoss()
|
| 1733 |
+
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
|
| 1734 |
+
|
| 1735 |
+
if not return_dict:
|
| 1736 |
+
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
|
| 1737 |
+
return ((loss,) + output) if loss is not None else output
|
| 1738 |
+
|
| 1739 |
+
return SequenceClassifierOutput(
|
| 1740 |
+
loss=loss,
|
| 1741 |
+
logits=logits,
|
| 1742 |
+
hidden_states=outputs.hidden_states,
|
| 1743 |
+
attentions=outputs.attentions,
|
| 1744 |
+
)
|
parrot/lib/python3.10/site-packages/transformers/models/hubert/modeling_tf_hubert.py
ADDED
|
@@ -0,0 +1,1673 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" TensorFlow Hubert model."""
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
|
| 19 |
+
import warnings
|
| 20 |
+
from typing import Any, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import tensorflow as tf
|
| 24 |
+
|
| 25 |
+
from ...activations_tf import get_tf_activation
|
| 26 |
+
from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput
|
| 27 |
+
from ...modeling_tf_utils import (
|
| 28 |
+
TFPreTrainedModel,
|
| 29 |
+
get_initializer,
|
| 30 |
+
keras,
|
| 31 |
+
keras_serializable,
|
| 32 |
+
unpack_inputs,
|
| 33 |
+
)
|
| 34 |
+
from ...tf_utils import shape_list, stable_softmax
|
| 35 |
+
from ...utils import (
|
| 36 |
+
add_start_docstrings,
|
| 37 |
+
add_start_docstrings_to_model_forward,
|
| 38 |
+
logging,
|
| 39 |
+
replace_return_docstrings,
|
| 40 |
+
)
|
| 41 |
+
from .configuration_hubert import HubertConfig
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
logger = logging.get_logger(__name__)
|
| 45 |
+
|
| 46 |
+
_CONFIG_FOR_DOC = "HubertConfig"
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
LARGE_NEGATIVE = -1e8
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement
|
| 53 |
+
def _sample_without_replacement(distribution, num_samples):
|
| 54 |
+
"""
|
| 55 |
+
Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see
|
| 56 |
+
https://github.com/tensorflow/tensorflow/issues/9260 for more info
|
| 57 |
+
"""
|
| 58 |
+
z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1))
|
| 59 |
+
_, indices = tf.nn.top_k(distribution + z, num_samples)
|
| 60 |
+
return indices
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices
|
| 64 |
+
def _scatter_values_on_batch_indices(values, batch_indices, output_shape):
|
| 65 |
+
"""
|
| 66 |
+
Scatter function as in PyTorch with indices in format (batch_dim, indixes)
|
| 67 |
+
"""
|
| 68 |
+
indices_shape = shape_list(batch_indices)
|
| 69 |
+
# broadcast batch dim to indices_shape
|
| 70 |
+
broad_casted_batch_dims = tf.reshape(
|
| 71 |
+
tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1]
|
| 72 |
+
)
|
| 73 |
+
# transform batch_indices to pair_indices
|
| 74 |
+
pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
|
| 75 |
+
# scatter values to pair indices
|
| 76 |
+
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices
|
| 80 |
+
def _compute_mask_indices(
|
| 81 |
+
shape: Tuple[int, int],
|
| 82 |
+
mask_prob: float,
|
| 83 |
+
mask_length: int,
|
| 84 |
+
min_masks: int = 0,
|
| 85 |
+
) -> tf.Tensor:
|
| 86 |
+
"""
|
| 87 |
+
Computes random mask spans for a given shape
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
shape: the shape for which to compute masks.
|
| 91 |
+
should be of size 2 where first element is batch size and 2nd is timesteps
|
| 92 |
+
attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
|
| 93 |
+
mask_prob:
|
| 94 |
+
probability for each token to be chosen as start of the span to be masked. this will be multiplied by
|
| 95 |
+
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
|
| 96 |
+
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
|
| 97 |
+
mask_length: size of the mask
|
| 98 |
+
min_masks: minimum number of masked spans
|
| 99 |
+
|
| 100 |
+
Adapted from [fairseq's
|
| 101 |
+
data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376).
|
| 102 |
+
"""
|
| 103 |
+
batch_size, sequence_length = shape
|
| 104 |
+
|
| 105 |
+
if mask_length < 1:
|
| 106 |
+
raise ValueError("`mask_length` has to be bigger than 0.")
|
| 107 |
+
|
| 108 |
+
tf.debugging.assert_less(
|
| 109 |
+
mask_length,
|
| 110 |
+
sequence_length,
|
| 111 |
+
message=(
|
| 112 |
+
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and"
|
| 113 |
+
f" `sequence_length`: {sequence_length}`"
|
| 114 |
+
),
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
# compute number of masked spans in batch
|
| 118 |
+
num_masked_spans = mask_prob * tf.cast(sequence_length, tf.float32) / mask_length + tf.random.uniform((1,))
|
| 119 |
+
num_masked_spans = tf.maximum(num_masked_spans, min_masks)
|
| 120 |
+
num_masked_spans = tf.cast(num_masked_spans, tf.int32)
|
| 121 |
+
|
| 122 |
+
# make sure num masked indices <= sequence_length
|
| 123 |
+
num_masked_spans = tf.math.minimum(sequence_length // mask_length, num_masked_spans)
|
| 124 |
+
num_masked_spans = tf.squeeze(num_masked_spans)
|
| 125 |
+
|
| 126 |
+
# SpecAugment mask to fill
|
| 127 |
+
spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32)
|
| 128 |
+
|
| 129 |
+
# uniform distribution to sample from, make sure that offset samples are < sequence_length
|
| 130 |
+
uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1)))
|
| 131 |
+
|
| 132 |
+
# get random indices to mask
|
| 133 |
+
spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans)
|
| 134 |
+
|
| 135 |
+
# expand masked indices to masked spans
|
| 136 |
+
spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1)
|
| 137 |
+
spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length))
|
| 138 |
+
spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length))
|
| 139 |
+
|
| 140 |
+
offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :]
|
| 141 |
+
offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1))
|
| 142 |
+
offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length))
|
| 143 |
+
|
| 144 |
+
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
|
| 145 |
+
|
| 146 |
+
# scatter indices to mask
|
| 147 |
+
spec_aug_mask = _scatter_values_on_batch_indices(
|
| 148 |
+
tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, tf.shape(spec_aug_mask)
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
return spec_aug_mask
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
|
| 155 |
+
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
|
| 156 |
+
"""
|
| 157 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
| 158 |
+
"""
|
| 159 |
+
src_len = shape_list(mask)[1]
|
| 160 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
| 161 |
+
one_cst = tf.constant(1.0)
|
| 162 |
+
mask = tf.cast(mask, dtype=one_cst.dtype)
|
| 163 |
+
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
|
| 164 |
+
|
| 165 |
+
return (one_cst - expanded_mask) * LARGE_NEGATIVE
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert
|
| 169 |
+
class TFHubertGroupNorm(keras.layers.Layer):
|
| 170 |
+
"""
|
| 171 |
+
From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
def __init__(
|
| 175 |
+
self,
|
| 176 |
+
groups: int = 32,
|
| 177 |
+
axis: int = -1,
|
| 178 |
+
epsilon: float = 1e-3,
|
| 179 |
+
center: bool = True,
|
| 180 |
+
scale: bool = True,
|
| 181 |
+
beta_initializer: keras.initializers.Initializer = "zeros",
|
| 182 |
+
gamma_initializer: keras.initializers.Initializer = "ones",
|
| 183 |
+
beta_regularizer: keras.regularizers.Regularizer = None,
|
| 184 |
+
gamma_regularizer: keras.regularizers.Regularizer = None,
|
| 185 |
+
beta_constraint: keras.constraints.Constraint = None,
|
| 186 |
+
gamma_constraint: keras.constraints.Constraint = None,
|
| 187 |
+
**kwargs,
|
| 188 |
+
):
|
| 189 |
+
super().__init__(**kwargs)
|
| 190 |
+
self.supports_masking = True
|
| 191 |
+
self.groups = groups
|
| 192 |
+
self.axis = axis
|
| 193 |
+
self.epsilon = epsilon
|
| 194 |
+
self.center = center
|
| 195 |
+
self.scale = scale
|
| 196 |
+
self.beta_initializer = keras.initializers.get(beta_initializer)
|
| 197 |
+
self.gamma_initializer = keras.initializers.get(gamma_initializer)
|
| 198 |
+
self.beta_regularizer = keras.regularizers.get(beta_regularizer)
|
| 199 |
+
self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
|
| 200 |
+
self.beta_constraint = keras.constraints.get(beta_constraint)
|
| 201 |
+
self.gamma_constraint = keras.constraints.get(gamma_constraint)
|
| 202 |
+
self._check_axis()
|
| 203 |
+
|
| 204 |
+
def build(self, input_shape):
|
| 205 |
+
self._check_if_input_shape_is_none(input_shape)
|
| 206 |
+
self._set_number_of_groups_for_instance_norm(input_shape)
|
| 207 |
+
self._check_size_of_dimensions(input_shape)
|
| 208 |
+
self._create_input_spec(input_shape)
|
| 209 |
+
|
| 210 |
+
self._add_gamma_weight(input_shape)
|
| 211 |
+
self._add_beta_weight(input_shape)
|
| 212 |
+
self.built = True
|
| 213 |
+
super().build(input_shape)
|
| 214 |
+
|
| 215 |
+
def call(self, inputs):
|
| 216 |
+
input_shape = keras.backend.int_shape(inputs)
|
| 217 |
+
tensor_input_shape = tf.shape(inputs)
|
| 218 |
+
|
| 219 |
+
reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape)
|
| 220 |
+
|
| 221 |
+
normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape)
|
| 222 |
+
|
| 223 |
+
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
|
| 224 |
+
if not is_instance_norm:
|
| 225 |
+
outputs = tf.reshape(normalized_inputs, tensor_input_shape)
|
| 226 |
+
else:
|
| 227 |
+
outputs = normalized_inputs
|
| 228 |
+
|
| 229 |
+
return outputs
|
| 230 |
+
|
| 231 |
+
def get_config(self):
|
| 232 |
+
config = {
|
| 233 |
+
"groups": self.groups,
|
| 234 |
+
"axis": self.axis,
|
| 235 |
+
"epsilon": self.epsilon,
|
| 236 |
+
"center": self.center,
|
| 237 |
+
"scale": self.scale,
|
| 238 |
+
"beta_initializer": keras.initializers.serialize(self.beta_initializer),
|
| 239 |
+
"gamma_initializer": keras.initializers.serialize(self.gamma_initializer),
|
| 240 |
+
"beta_regularizer": keras.regularizers.serialize(self.beta_regularizer),
|
| 241 |
+
"gamma_regularizer": keras.regularizers.serialize(self.gamma_regularizer),
|
| 242 |
+
"beta_constraint": keras.constraints.serialize(self.beta_constraint),
|
| 243 |
+
"gamma_constraint": keras.constraints.serialize(self.gamma_constraint),
|
| 244 |
+
}
|
| 245 |
+
base_config = super().get_config()
|
| 246 |
+
return {**base_config, **config}
|
| 247 |
+
|
| 248 |
+
def compute_output_shape(self, input_shape):
|
| 249 |
+
return input_shape
|
| 250 |
+
|
| 251 |
+
def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
|
| 252 |
+
group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
|
| 253 |
+
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
|
| 254 |
+
if not is_instance_norm:
|
| 255 |
+
group_shape[self.axis] = input_shape[self.axis] // self.groups
|
| 256 |
+
group_shape.insert(self.axis, self.groups)
|
| 257 |
+
group_shape = tf.stack(group_shape)
|
| 258 |
+
reshaped_inputs = tf.reshape(inputs, group_shape)
|
| 259 |
+
return reshaped_inputs, group_shape
|
| 260 |
+
else:
|
| 261 |
+
return inputs, group_shape
|
| 262 |
+
|
| 263 |
+
def _apply_normalization(self, reshaped_inputs, input_shape):
|
| 264 |
+
group_shape = keras.backend.int_shape(reshaped_inputs)
|
| 265 |
+
group_reduction_axes = list(range(1, len(group_shape)))
|
| 266 |
+
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
|
| 267 |
+
if not is_instance_norm:
|
| 268 |
+
axis = -2 if self.axis == -1 else self.axis - 1
|
| 269 |
+
else:
|
| 270 |
+
axis = -1 if self.axis == -1 else self.axis - 1
|
| 271 |
+
group_reduction_axes.pop(axis)
|
| 272 |
+
|
| 273 |
+
mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True)
|
| 274 |
+
|
| 275 |
+
gamma, beta = self._get_reshaped_weights(input_shape)
|
| 276 |
+
normalized_inputs = tf.nn.batch_normalization(
|
| 277 |
+
reshaped_inputs,
|
| 278 |
+
mean=mean,
|
| 279 |
+
variance=variance,
|
| 280 |
+
scale=gamma,
|
| 281 |
+
offset=beta,
|
| 282 |
+
variance_epsilon=self.epsilon,
|
| 283 |
+
)
|
| 284 |
+
return normalized_inputs
|
| 285 |
+
|
| 286 |
+
def _get_reshaped_weights(self, input_shape):
|
| 287 |
+
broadcast_shape = self._create_broadcast_shape(input_shape)
|
| 288 |
+
gamma = None
|
| 289 |
+
beta = None
|
| 290 |
+
if self.scale:
|
| 291 |
+
gamma = tf.reshape(self.gamma, broadcast_shape)
|
| 292 |
+
|
| 293 |
+
if self.center:
|
| 294 |
+
beta = tf.reshape(self.beta, broadcast_shape)
|
| 295 |
+
return gamma, beta
|
| 296 |
+
|
| 297 |
+
def _check_if_input_shape_is_none(self, input_shape):
|
| 298 |
+
dim = input_shape[self.axis]
|
| 299 |
+
if dim is None:
|
| 300 |
+
raise ValueError(
|
| 301 |
+
"Axis "
|
| 302 |
+
+ str(self.axis)
|
| 303 |
+
+ " of input tensor should have a defined dimension but the layer received an input with shape "
|
| 304 |
+
+ str(input_shape)
|
| 305 |
+
+ "."
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
def _set_number_of_groups_for_instance_norm(self, input_shape):
|
| 309 |
+
dim = input_shape[self.axis]
|
| 310 |
+
|
| 311 |
+
if self.groups == -1:
|
| 312 |
+
self.groups = dim
|
| 313 |
+
|
| 314 |
+
def _check_size_of_dimensions(self, input_shape):
|
| 315 |
+
dim = input_shape[self.axis]
|
| 316 |
+
if dim < self.groups:
|
| 317 |
+
raise ValueError(
|
| 318 |
+
"Number of groups ("
|
| 319 |
+
+ str(self.groups)
|
| 320 |
+
+ ") cannot be more than the number of channels ("
|
| 321 |
+
+ str(dim)
|
| 322 |
+
+ ")."
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
if dim % self.groups != 0:
|
| 326 |
+
raise ValueError(
|
| 327 |
+
"Number of groups ("
|
| 328 |
+
+ str(self.groups)
|
| 329 |
+
+ ") must be a multiple of the number of channels ("
|
| 330 |
+
+ str(dim)
|
| 331 |
+
+ ")."
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
def _check_axis(self):
|
| 335 |
+
if self.axis == 0:
|
| 336 |
+
raise ValueError(
|
| 337 |
+
"You are trying to normalize your batch axis. Do you want to use tf.layer.batch_normalization instead"
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
def _create_input_spec(self, input_shape):
|
| 341 |
+
dim = input_shape[self.axis]
|
| 342 |
+
self.input_spec = keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim})
|
| 343 |
+
|
| 344 |
+
def _add_gamma_weight(self, input_shape):
|
| 345 |
+
dim = input_shape[self.axis]
|
| 346 |
+
shape = (dim,)
|
| 347 |
+
|
| 348 |
+
if self.scale:
|
| 349 |
+
self.gamma = self.add_weight(
|
| 350 |
+
shape=shape,
|
| 351 |
+
name="gamma",
|
| 352 |
+
initializer=self.gamma_initializer,
|
| 353 |
+
regularizer=self.gamma_regularizer,
|
| 354 |
+
constraint=self.gamma_constraint,
|
| 355 |
+
)
|
| 356 |
+
else:
|
| 357 |
+
self.gamma = None
|
| 358 |
+
|
| 359 |
+
def _add_beta_weight(self, input_shape):
|
| 360 |
+
dim = input_shape[self.axis]
|
| 361 |
+
shape = (dim,)
|
| 362 |
+
|
| 363 |
+
if self.center:
|
| 364 |
+
self.beta = self.add_weight(
|
| 365 |
+
shape=shape,
|
| 366 |
+
name="beta",
|
| 367 |
+
initializer=self.beta_initializer,
|
| 368 |
+
regularizer=self.beta_regularizer,
|
| 369 |
+
constraint=self.beta_constraint,
|
| 370 |
+
)
|
| 371 |
+
else:
|
| 372 |
+
self.beta = None
|
| 373 |
+
|
| 374 |
+
def _create_broadcast_shape(self, input_shape):
|
| 375 |
+
broadcast_shape = [1] * len(input_shape)
|
| 376 |
+
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
|
| 377 |
+
if not is_instance_norm:
|
| 378 |
+
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
|
| 379 |
+
broadcast_shape.insert(self.axis, self.groups)
|
| 380 |
+
else:
|
| 381 |
+
broadcast_shape[self.axis] = self.groups
|
| 382 |
+
return broadcast_shape
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert
|
| 386 |
+
class TFHubertWeightNormConv1D(keras.layers.Conv1D):
|
| 387 |
+
"""Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm"""
|
| 388 |
+
|
| 389 |
+
def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs):
|
| 390 |
+
super().__init__(
|
| 391 |
+
filters=filters,
|
| 392 |
+
kernel_size=kernel_size,
|
| 393 |
+
groups=groups,
|
| 394 |
+
padding="valid",
|
| 395 |
+
use_bias=True,
|
| 396 |
+
bias_initializer="he_normal",
|
| 397 |
+
**kwargs,
|
| 398 |
+
)
|
| 399 |
+
self.explicit_padding = explicit_padding
|
| 400 |
+
self.filter_axis = 2
|
| 401 |
+
self.kernel_norm_axes = tf.constant([0, 1])
|
| 402 |
+
|
| 403 |
+
def _init_norm(self):
|
| 404 |
+
"""Set the norm of the weight vector."""
|
| 405 |
+
kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes))
|
| 406 |
+
self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis])
|
| 407 |
+
|
| 408 |
+
def _normalize_kernel(self):
|
| 409 |
+
"""Generate normalized weights."""
|
| 410 |
+
kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g)
|
| 411 |
+
self.kernel = tf.transpose(kernel)
|
| 412 |
+
|
| 413 |
+
def build(self, input_shape):
|
| 414 |
+
if not self.built:
|
| 415 |
+
super().build(input_shape)
|
| 416 |
+
|
| 417 |
+
self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True)
|
| 418 |
+
self.weight_v = self.kernel
|
| 419 |
+
|
| 420 |
+
self.weight_g = self.add_weight(
|
| 421 |
+
name="weight_g",
|
| 422 |
+
shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1),
|
| 423 |
+
initializer="ones",
|
| 424 |
+
dtype=self.weight_v.dtype,
|
| 425 |
+
trainable=True,
|
| 426 |
+
)
|
| 427 |
+
self._init_norm()
|
| 428 |
+
self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True)
|
| 429 |
+
|
| 430 |
+
def call(self, inputs):
|
| 431 |
+
# TODO Matt: Assigning to attributes in call() is deeply sinful in TensorFlow, as it should be idempotent.
|
| 432 |
+
# This whole layer should be replaced by a layer that doesn't inherit from Conv1D, but instead calls
|
| 433 |
+
# a functional 1d convolution with normalized weights that it generates (but does not store!)
|
| 434 |
+
self._normalize_kernel()
|
| 435 |
+
|
| 436 |
+
padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0)))
|
| 437 |
+
output = super().call(padded_inputs)
|
| 438 |
+
|
| 439 |
+
return output
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
|
| 443 |
+
class TFHubertNoLayerNormConvLayer(keras.layers.Layer):
|
| 444 |
+
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
|
| 445 |
+
super().__init__(**kwargs)
|
| 446 |
+
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
|
| 447 |
+
self.out_conv_dim = config.conv_dim[layer_id]
|
| 448 |
+
|
| 449 |
+
self.conv = keras.layers.Conv1D(
|
| 450 |
+
filters=self.out_conv_dim,
|
| 451 |
+
kernel_size=config.conv_kernel[layer_id],
|
| 452 |
+
strides=config.conv_stride[layer_id],
|
| 453 |
+
use_bias=config.conv_bias,
|
| 454 |
+
name="conv",
|
| 455 |
+
)
|
| 456 |
+
self.activation = get_tf_activation(config.feat_extract_activation)
|
| 457 |
+
|
| 458 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
| 459 |
+
hidden_states = self.conv(hidden_states)
|
| 460 |
+
hidden_states = self.activation(hidden_states)
|
| 461 |
+
return hidden_states
|
| 462 |
+
|
| 463 |
+
def build(self, input_shape=None):
|
| 464 |
+
if self.built:
|
| 465 |
+
return
|
| 466 |
+
self.built = True
|
| 467 |
+
if getattr(self, "conv", None) is not None:
|
| 468 |
+
with tf.name_scope(self.conv.name):
|
| 469 |
+
self.conv.build([None, None, self.in_conv_dim])
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
|
| 473 |
+
class TFHubertLayerNormConvLayer(keras.layers.Layer):
|
| 474 |
+
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
|
| 475 |
+
super().__init__(**kwargs)
|
| 476 |
+
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
|
| 477 |
+
self.out_conv_dim = config.conv_dim[layer_id]
|
| 478 |
+
|
| 479 |
+
self.conv = keras.layers.Conv1D(
|
| 480 |
+
filters=self.out_conv_dim,
|
| 481 |
+
kernel_size=config.conv_kernel[layer_id],
|
| 482 |
+
strides=config.conv_stride[layer_id],
|
| 483 |
+
use_bias=config.conv_bias,
|
| 484 |
+
name="conv",
|
| 485 |
+
)
|
| 486 |
+
self.layer_norm = keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps)
|
| 487 |
+
self.activation = get_tf_activation(config.feat_extract_activation)
|
| 488 |
+
|
| 489 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
| 490 |
+
hidden_states = self.conv(hidden_states)
|
| 491 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 492 |
+
hidden_states = self.activation(hidden_states)
|
| 493 |
+
return hidden_states
|
| 494 |
+
|
| 495 |
+
def build(self, input_shape=None):
|
| 496 |
+
if self.built:
|
| 497 |
+
return
|
| 498 |
+
self.built = True
|
| 499 |
+
if getattr(self, "conv", None) is not None:
|
| 500 |
+
with tf.name_scope(self.conv.name):
|
| 501 |
+
self.conv.build([None, None, self.in_conv_dim])
|
| 502 |
+
if getattr(self, "layer_norm", None) is not None:
|
| 503 |
+
with tf.name_scope(self.layer_norm.name):
|
| 504 |
+
self.layer_norm.build([None, None, self.out_conv_dim])
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
|
| 508 |
+
class TFHubertGroupNormConvLayer(keras.layers.Layer):
|
| 509 |
+
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
|
| 510 |
+
super().__init__(**kwargs)
|
| 511 |
+
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
|
| 512 |
+
self.out_conv_dim = config.conv_dim[layer_id]
|
| 513 |
+
|
| 514 |
+
self.conv = keras.layers.Conv1D(
|
| 515 |
+
filters=self.out_conv_dim,
|
| 516 |
+
kernel_size=config.conv_kernel[layer_id],
|
| 517 |
+
strides=config.conv_stride[layer_id],
|
| 518 |
+
use_bias=config.conv_bias,
|
| 519 |
+
name="conv",
|
| 520 |
+
)
|
| 521 |
+
self.activation = get_tf_activation(config.feat_extract_activation)
|
| 522 |
+
self.layer_norm = TFHubertGroupNorm(groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm")
|
| 523 |
+
|
| 524 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
| 525 |
+
hidden_states = self.conv(hidden_states)
|
| 526 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 527 |
+
hidden_states = self.activation(hidden_states)
|
| 528 |
+
return hidden_states
|
| 529 |
+
|
| 530 |
+
def build(self, input_shape=None):
|
| 531 |
+
if self.built:
|
| 532 |
+
return
|
| 533 |
+
self.built = True
|
| 534 |
+
if getattr(self, "conv", None) is not None:
|
| 535 |
+
with tf.name_scope(self.conv.name):
|
| 536 |
+
self.conv.build([None, None, self.in_conv_dim])
|
| 537 |
+
if getattr(self, "layer_norm", None) is not None:
|
| 538 |
+
with tf.name_scope(self.layer_norm.name):
|
| 539 |
+
self.layer_norm.build([None, None, self.out_conv_dim])
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
|
| 543 |
+
class TFHubertPositionalConvEmbedding(keras.layers.Layer):
|
| 544 |
+
def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
|
| 545 |
+
super().__init__(**kwargs)
|
| 546 |
+
self.conv = TFHubertWeightNormConv1D(
|
| 547 |
+
filters=config.hidden_size,
|
| 548 |
+
kernel_size=config.num_conv_pos_embeddings,
|
| 549 |
+
groups=config.num_conv_pos_embedding_groups,
|
| 550 |
+
explicit_padding=config.num_conv_pos_embeddings // 2,
|
| 551 |
+
name="conv",
|
| 552 |
+
)
|
| 553 |
+
self.padding = TFHubertSamePadLayer(config.num_conv_pos_embeddings)
|
| 554 |
+
self.activation = get_tf_activation(config.feat_extract_activation)
|
| 555 |
+
self.config = config
|
| 556 |
+
|
| 557 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
| 558 |
+
hidden_states = self.conv(hidden_states)
|
| 559 |
+
hidden_states = self.padding(hidden_states)
|
| 560 |
+
hidden_states = self.activation(hidden_states)
|
| 561 |
+
return hidden_states
|
| 562 |
+
|
| 563 |
+
def build(self, input_shape=None):
|
| 564 |
+
if self.built:
|
| 565 |
+
return
|
| 566 |
+
self.built = True
|
| 567 |
+
if getattr(self, "conv", None) is not None:
|
| 568 |
+
with tf.name_scope(self.conv.name):
|
| 569 |
+
self.conv.build([None, None, self.config.hidden_size])
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert
|
| 573 |
+
class TFHubertSamePadLayer(keras.layers.Layer):
|
| 574 |
+
def __init__(self, num_conv_pos_embeddings, **kwargs):
|
| 575 |
+
super().__init__(**kwargs)
|
| 576 |
+
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
|
| 577 |
+
|
| 578 |
+
def call(self, hidden_states):
|
| 579 |
+
if self.num_pad_remove > 0:
|
| 580 |
+
hidden_states = hidden_states[:, : -self.num_pad_remove, :]
|
| 581 |
+
return hidden_states
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
class TFHubertFeatureEncoder(keras.layers.Layer):
|
| 585 |
+
def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
|
| 586 |
+
super().__init__(**kwargs)
|
| 587 |
+
|
| 588 |
+
if config.feat_extract_norm == "group":
|
| 589 |
+
conv_layers = [TFHubertGroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [
|
| 590 |
+
TFHubertNoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i+1}")
|
| 591 |
+
for i in range(config.num_feat_extract_layers - 1)
|
| 592 |
+
]
|
| 593 |
+
elif config.feat_extract_norm == "layer":
|
| 594 |
+
conv_layers = [
|
| 595 |
+
TFHubertLayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}")
|
| 596 |
+
for i in range(config.num_feat_extract_layers)
|
| 597 |
+
]
|
| 598 |
+
else:
|
| 599 |
+
raise ValueError(
|
| 600 |
+
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
|
| 601 |
+
)
|
| 602 |
+
self.conv_layers = conv_layers
|
| 603 |
+
|
| 604 |
+
def call(self, input_values):
|
| 605 |
+
hidden_states = tf.expand_dims(input_values, -1)
|
| 606 |
+
for conv_layer in self.conv_layers:
|
| 607 |
+
hidden_states = conv_layer(hidden_states)
|
| 608 |
+
return hidden_states
|
| 609 |
+
|
| 610 |
+
def build(self, input_shape=None):
|
| 611 |
+
if self.built:
|
| 612 |
+
return
|
| 613 |
+
self.built = True
|
| 614 |
+
for conv_layer in self.conv_layers:
|
| 615 |
+
with tf.name_scope(conv_layer.name):
|
| 616 |
+
conv_layer.build(None)
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
class TFHubertFeatureExtractor(TFHubertFeatureEncoder):
|
| 620 |
+
def __init__(self, config, **kwargs):
|
| 621 |
+
super().__init__(config, **kwargs)
|
| 622 |
+
warnings.warn(
|
| 623 |
+
f"The class `{self.__class__.__name__}` has been depreciated "
|
| 624 |
+
"and will be removed in Transformers v5. "
|
| 625 |
+
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
|
| 626 |
+
FutureWarning,
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
class TFHubertFeatureProjection(keras.layers.Layer):
|
| 631 |
+
def __init__(self, config: HubertConfig, **kwargs):
|
| 632 |
+
super().__init__(**kwargs)
|
| 633 |
+
|
| 634 |
+
self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
|
| 635 |
+
self.projection = keras.layers.Dense(
|
| 636 |
+
units=config.hidden_size,
|
| 637 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 638 |
+
bias_initializer="zeros",
|
| 639 |
+
name="projection",
|
| 640 |
+
)
|
| 641 |
+
self.dropout = keras.layers.Dropout(rate=config.feat_proj_dropout)
|
| 642 |
+
self.config = config
|
| 643 |
+
|
| 644 |
+
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
|
| 645 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 646 |
+
hidden_states = self.projection(hidden_states)
|
| 647 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
| 648 |
+
return hidden_states
|
| 649 |
+
|
| 650 |
+
def build(self, input_shape=None):
|
| 651 |
+
if self.built:
|
| 652 |
+
return
|
| 653 |
+
self.built = True
|
| 654 |
+
if getattr(self, "layer_norm", None) is not None:
|
| 655 |
+
with tf.name_scope(self.layer_norm.name):
|
| 656 |
+
self.layer_norm.build([None, None, self.config.conv_dim[-1]])
|
| 657 |
+
if getattr(self, "projection", None) is not None:
|
| 658 |
+
with tf.name_scope(self.projection.name):
|
| 659 |
+
self.projection.build([None, None, self.config.conv_dim[-1]])
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert
|
| 663 |
+
class TFHubertAttention(keras.layers.Layer):
|
| 664 |
+
"""Multi-headed attention from "Attention Is All You Need"""
|
| 665 |
+
|
| 666 |
+
def __init__(
|
| 667 |
+
self,
|
| 668 |
+
embed_dim: int,
|
| 669 |
+
num_heads: int,
|
| 670 |
+
dropout: float = 0.0,
|
| 671 |
+
is_decoder: bool = False,
|
| 672 |
+
bias: bool = True,
|
| 673 |
+
**kwargs,
|
| 674 |
+
):
|
| 675 |
+
super().__init__(**kwargs)
|
| 676 |
+
self.embed_dim = embed_dim
|
| 677 |
+
|
| 678 |
+
self.num_heads = num_heads
|
| 679 |
+
self.dropout = keras.layers.Dropout(dropout)
|
| 680 |
+
self.head_dim = embed_dim // num_heads
|
| 681 |
+
if (self.head_dim * num_heads) != self.embed_dim:
|
| 682 |
+
raise ValueError(
|
| 683 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
|
| 684 |
+
f" and `num_heads`: {num_heads})."
|
| 685 |
+
)
|
| 686 |
+
self.scaling = self.head_dim**-0.5
|
| 687 |
+
self.is_decoder = is_decoder
|
| 688 |
+
|
| 689 |
+
self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
|
| 690 |
+
self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
|
| 691 |
+
self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
|
| 692 |
+
self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
|
| 693 |
+
|
| 694 |
+
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
|
| 695 |
+
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
|
| 696 |
+
|
| 697 |
+
def call(
|
| 698 |
+
self,
|
| 699 |
+
hidden_states: tf.Tensor,
|
| 700 |
+
key_value_states: tf.Tensor | None = None,
|
| 701 |
+
past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
|
| 702 |
+
attention_mask: tf.Tensor | None = None,
|
| 703 |
+
layer_head_mask: tf.Tensor | None = None,
|
| 704 |
+
training: Optional[bool] = False,
|
| 705 |
+
) -> Tuple[tf.Tensor, tf.Tensor | None]:
|
| 706 |
+
"""Input shape: Batch x Time x Channel"""
|
| 707 |
+
|
| 708 |
+
# if key_value_states are provided this layer is used as a cross-attention layer
|
| 709 |
+
# for the decoder
|
| 710 |
+
is_cross_attention = key_value_states is not None
|
| 711 |
+
bsz, tgt_len, embed_dim = shape_list(hidden_states)
|
| 712 |
+
|
| 713 |
+
# get query proj
|
| 714 |
+
query_states = self.q_proj(hidden_states) * self.scaling
|
| 715 |
+
# get key, value proj
|
| 716 |
+
if is_cross_attention and past_key_value is not None:
|
| 717 |
+
# reuse k,v, cross_attentions
|
| 718 |
+
key_states = past_key_value[0]
|
| 719 |
+
value_states = past_key_value[1]
|
| 720 |
+
elif is_cross_attention:
|
| 721 |
+
# cross_attentions
|
| 722 |
+
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
|
| 723 |
+
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
|
| 724 |
+
elif past_key_value is not None:
|
| 725 |
+
# reuse k, v, self_attention
|
| 726 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 727 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 728 |
+
key_states = tf.concat([past_key_value[0], key_states], axis=2)
|
| 729 |
+
value_states = tf.concat([past_key_value[1], value_states], axis=2)
|
| 730 |
+
else:
|
| 731 |
+
# self_attention
|
| 732 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 733 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 734 |
+
|
| 735 |
+
if self.is_decoder:
|
| 736 |
+
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
|
| 737 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
| 738 |
+
# key/value_states (first "if" case)
|
| 739 |
+
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
|
| 740 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
| 741 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
| 742 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
| 743 |
+
past_key_value = (key_states, value_states)
|
| 744 |
+
|
| 745 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
| 746 |
+
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
|
| 747 |
+
key_states = tf.reshape(key_states, proj_shape)
|
| 748 |
+
value_states = tf.reshape(value_states, proj_shape)
|
| 749 |
+
|
| 750 |
+
src_len = shape_list(key_states)[1]
|
| 751 |
+
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
|
| 752 |
+
|
| 753 |
+
tf.debugging.assert_equal(
|
| 754 |
+
shape_list(attn_weights),
|
| 755 |
+
[bsz * self.num_heads, tgt_len, src_len],
|
| 756 |
+
message=(
|
| 757 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
| 758 |
+
f" {shape_list(attn_weights)}"
|
| 759 |
+
),
|
| 760 |
+
)
|
| 761 |
+
|
| 762 |
+
if attention_mask is not None:
|
| 763 |
+
tf.debugging.assert_equal(
|
| 764 |
+
shape_list(attention_mask),
|
| 765 |
+
[bsz, 1, tgt_len, src_len],
|
| 766 |
+
message=(
|
| 767 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
|
| 768 |
+
f" {shape_list(attention_mask)}"
|
| 769 |
+
),
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
|
| 773 |
+
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
|
| 774 |
+
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
|
| 775 |
+
|
| 776 |
+
attn_weights = stable_softmax(attn_weights, axis=-1)
|
| 777 |
+
|
| 778 |
+
if layer_head_mask is not None:
|
| 779 |
+
tf.debugging.assert_equal(
|
| 780 |
+
shape_list(layer_head_mask),
|
| 781 |
+
[self.num_heads],
|
| 782 |
+
message=(
|
| 783 |
+
f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
|
| 784 |
+
f" {shape_list(layer_head_mask)}"
|
| 785 |
+
),
|
| 786 |
+
)
|
| 787 |
+
|
| 788 |
+
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
|
| 789 |
+
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
|
| 790 |
+
)
|
| 791 |
+
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
|
| 792 |
+
|
| 793 |
+
attn_probs = self.dropout(attn_weights, training=training)
|
| 794 |
+
attn_output = tf.matmul(attn_probs, value_states)
|
| 795 |
+
|
| 796 |
+
tf.debugging.assert_equal(
|
| 797 |
+
shape_list(attn_output),
|
| 798 |
+
[bsz * self.num_heads, tgt_len, self.head_dim],
|
| 799 |
+
message=(
|
| 800 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
| 801 |
+
f" {shape_list(attn_output)}"
|
| 802 |
+
),
|
| 803 |
+
)
|
| 804 |
+
|
| 805 |
+
attn_output = tf.transpose(
|
| 806 |
+
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
|
| 807 |
+
)
|
| 808 |
+
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
|
| 809 |
+
|
| 810 |
+
attn_output = self.out_proj(attn_output)
|
| 811 |
+
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
|
| 812 |
+
|
| 813 |
+
return attn_output, attn_weights, past_key_value
|
| 814 |
+
|
| 815 |
+
def build(self, input_shape=None):
|
| 816 |
+
if self.built:
|
| 817 |
+
return
|
| 818 |
+
self.built = True
|
| 819 |
+
if getattr(self, "k_proj", None) is not None:
|
| 820 |
+
with tf.name_scope(self.k_proj.name):
|
| 821 |
+
self.k_proj.build([None, None, self.embed_dim])
|
| 822 |
+
if getattr(self, "q_proj", None) is not None:
|
| 823 |
+
with tf.name_scope(self.q_proj.name):
|
| 824 |
+
self.q_proj.build([None, None, self.embed_dim])
|
| 825 |
+
if getattr(self, "v_proj", None) is not None:
|
| 826 |
+
with tf.name_scope(self.v_proj.name):
|
| 827 |
+
self.v_proj.build([None, None, self.embed_dim])
|
| 828 |
+
if getattr(self, "out_proj", None) is not None:
|
| 829 |
+
with tf.name_scope(self.out_proj.name):
|
| 830 |
+
self.out_proj.build([None, None, self.embed_dim])
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert
|
| 834 |
+
class TFHubertFeedForward(keras.layers.Layer):
|
| 835 |
+
def __init__(self, config: HubertConfig, **kwargs):
|
| 836 |
+
super().__init__(**kwargs)
|
| 837 |
+
|
| 838 |
+
self.intermediate_dropout = keras.layers.Dropout(config.activation_dropout)
|
| 839 |
+
|
| 840 |
+
self.intermediate_dense = keras.layers.Dense(
|
| 841 |
+
units=config.intermediate_size,
|
| 842 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 843 |
+
bias_initializer="zeros",
|
| 844 |
+
name="intermediate_dense",
|
| 845 |
+
)
|
| 846 |
+
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
|
| 847 |
+
|
| 848 |
+
self.output_dense = keras.layers.Dense(
|
| 849 |
+
units=config.hidden_size,
|
| 850 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 851 |
+
bias_initializer="zeros",
|
| 852 |
+
name="output_dense",
|
| 853 |
+
)
|
| 854 |
+
self.output_dropout = keras.layers.Dropout(config.hidden_dropout)
|
| 855 |
+
self.config = config
|
| 856 |
+
|
| 857 |
+
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
|
| 858 |
+
hidden_states = self.intermediate_dense(hidden_states)
|
| 859 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 860 |
+
hidden_states = self.intermediate_dropout(hidden_states, training=training)
|
| 861 |
+
|
| 862 |
+
hidden_states = self.output_dense(hidden_states)
|
| 863 |
+
hidden_states = self.output_dropout(hidden_states, training=training)
|
| 864 |
+
return hidden_states
|
| 865 |
+
|
| 866 |
+
def build(self, input_shape=None):
|
| 867 |
+
if self.built:
|
| 868 |
+
return
|
| 869 |
+
self.built = True
|
| 870 |
+
if getattr(self, "intermediate_dense", None) is not None:
|
| 871 |
+
with tf.name_scope(self.intermediate_dense.name):
|
| 872 |
+
self.intermediate_dense.build([None, None, self.config.hidden_size])
|
| 873 |
+
if getattr(self, "output_dense", None) is not None:
|
| 874 |
+
with tf.name_scope(self.output_dense.name):
|
| 875 |
+
self.output_dense.build([None, None, self.config.intermediate_size])
|
| 876 |
+
|
| 877 |
+
|
| 878 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert
|
| 879 |
+
class TFHubertEncoderLayer(keras.layers.Layer):
|
| 880 |
+
def __init__(self, config: HubertConfig, **kwargs):
|
| 881 |
+
super().__init__(**kwargs)
|
| 882 |
+
self.attention = TFHubertAttention(
|
| 883 |
+
embed_dim=config.hidden_size,
|
| 884 |
+
num_heads=config.num_attention_heads,
|
| 885 |
+
dropout=config.attention_dropout,
|
| 886 |
+
is_decoder=False,
|
| 887 |
+
name="attention",
|
| 888 |
+
)
|
| 889 |
+
self.dropout = keras.layers.Dropout(config.hidden_dropout)
|
| 890 |
+
self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
|
| 891 |
+
self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
|
| 892 |
+
self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
|
| 893 |
+
self.config = config
|
| 894 |
+
|
| 895 |
+
def call(
|
| 896 |
+
self,
|
| 897 |
+
hidden_states: tf.Tensor,
|
| 898 |
+
attention_mask: tf.Tensor | None = None,
|
| 899 |
+
output_attentions: Optional[bool] = False,
|
| 900 |
+
training: bool = False,
|
| 901 |
+
) -> Tuple[tf.Tensor]:
|
| 902 |
+
attn_residual = hidden_states
|
| 903 |
+
hidden_states, attn_weights, _ = self.attention(
|
| 904 |
+
hidden_states, attention_mask=attention_mask, training=training
|
| 905 |
+
)
|
| 906 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
| 907 |
+
hidden_states = attn_residual + hidden_states
|
| 908 |
+
|
| 909 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 910 |
+
hidden_states = hidden_states + self.feed_forward(hidden_states)
|
| 911 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
| 912 |
+
|
| 913 |
+
outputs = (hidden_states,)
|
| 914 |
+
|
| 915 |
+
if output_attentions:
|
| 916 |
+
outputs += (attn_weights,)
|
| 917 |
+
|
| 918 |
+
return outputs
|
| 919 |
+
|
| 920 |
+
def build(self, input_shape=None):
|
| 921 |
+
if self.built:
|
| 922 |
+
return
|
| 923 |
+
self.built = True
|
| 924 |
+
if getattr(self, "attention", None) is not None:
|
| 925 |
+
with tf.name_scope(self.attention.name):
|
| 926 |
+
self.attention.build(None)
|
| 927 |
+
if getattr(self, "layer_norm", None) is not None:
|
| 928 |
+
with tf.name_scope(self.layer_norm.name):
|
| 929 |
+
self.layer_norm.build([None, None, self.config.hidden_size])
|
| 930 |
+
if getattr(self, "feed_forward", None) is not None:
|
| 931 |
+
with tf.name_scope(self.feed_forward.name):
|
| 932 |
+
self.feed_forward.build(None)
|
| 933 |
+
if getattr(self, "final_layer_norm", None) is not None:
|
| 934 |
+
with tf.name_scope(self.final_layer_norm.name):
|
| 935 |
+
self.final_layer_norm.build([None, None, self.config.hidden_size])
|
| 936 |
+
|
| 937 |
+
|
| 938 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert
|
| 939 |
+
class TFHubertEncoderLayerStableLayerNorm(keras.layers.Layer):
|
| 940 |
+
def __init__(self, config: HubertConfig, **kwargs):
|
| 941 |
+
super().__init__(**kwargs)
|
| 942 |
+
self.attention = TFHubertAttention(
|
| 943 |
+
embed_dim=config.hidden_size,
|
| 944 |
+
num_heads=config.num_attention_heads,
|
| 945 |
+
dropout=config.attention_dropout,
|
| 946 |
+
is_decoder=False,
|
| 947 |
+
name="attention",
|
| 948 |
+
)
|
| 949 |
+
self.dropout = keras.layers.Dropout(config.hidden_dropout)
|
| 950 |
+
self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
|
| 951 |
+
self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
|
| 952 |
+
self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
|
| 953 |
+
self.config = config
|
| 954 |
+
|
| 955 |
+
def call(
|
| 956 |
+
self,
|
| 957 |
+
hidden_states: tf.Tensor,
|
| 958 |
+
attention_mask: tf.Tensor | None = None,
|
| 959 |
+
output_attentions: Optional[bool] = False,
|
| 960 |
+
training: bool = False,
|
| 961 |
+
) -> Tuple[tf.Tensor]:
|
| 962 |
+
attn_residual = hidden_states
|
| 963 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 964 |
+
hidden_states, attn_weights, _ = self.attention(
|
| 965 |
+
hidden_states, attention_mask=attention_mask, training=training
|
| 966 |
+
)
|
| 967 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
| 968 |
+
hidden_states = attn_residual + hidden_states
|
| 969 |
+
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
|
| 970 |
+
|
| 971 |
+
outputs = (hidden_states,)
|
| 972 |
+
|
| 973 |
+
if output_attentions:
|
| 974 |
+
outputs += (attn_weights,)
|
| 975 |
+
|
| 976 |
+
return outputs
|
| 977 |
+
|
| 978 |
+
def build(self, input_shape=None):
|
| 979 |
+
if self.built:
|
| 980 |
+
return
|
| 981 |
+
self.built = True
|
| 982 |
+
if getattr(self, "attention", None) is not None:
|
| 983 |
+
with tf.name_scope(self.attention.name):
|
| 984 |
+
self.attention.build(None)
|
| 985 |
+
if getattr(self, "layer_norm", None) is not None:
|
| 986 |
+
with tf.name_scope(self.layer_norm.name):
|
| 987 |
+
self.layer_norm.build([None, None, self.config.hidden_size])
|
| 988 |
+
if getattr(self, "feed_forward", None) is not None:
|
| 989 |
+
with tf.name_scope(self.feed_forward.name):
|
| 990 |
+
self.feed_forward.build(None)
|
| 991 |
+
if getattr(self, "final_layer_norm", None) is not None:
|
| 992 |
+
with tf.name_scope(self.final_layer_norm.name):
|
| 993 |
+
self.final_layer_norm.build([None, None, self.config.hidden_size])
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert
|
| 997 |
+
class TFHubertEncoder(keras.layers.Layer):
|
| 998 |
+
def __init__(self, config: HubertConfig, **kwargs):
|
| 999 |
+
super().__init__(**kwargs)
|
| 1000 |
+
self.config = config
|
| 1001 |
+
self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
|
| 1002 |
+
self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
|
| 1003 |
+
self.dropout = keras.layers.Dropout(config.hidden_dropout)
|
| 1004 |
+
self.layer = [TFHubertEncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)]
|
| 1005 |
+
|
| 1006 |
+
def call(
|
| 1007 |
+
self,
|
| 1008 |
+
hidden_states: tf.Tensor,
|
| 1009 |
+
attention_mask: tf.Tensor | None = None,
|
| 1010 |
+
output_attentions: Optional[bool] = False,
|
| 1011 |
+
output_hidden_states: Optional[bool] = False,
|
| 1012 |
+
return_dict: Optional[bool] = True,
|
| 1013 |
+
training: Optional[bool] = False,
|
| 1014 |
+
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
|
| 1015 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1016 |
+
all_self_attentions = () if output_attentions else None
|
| 1017 |
+
|
| 1018 |
+
if attention_mask is not None:
|
| 1019 |
+
hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
|
| 1020 |
+
attention_mask = _expand_mask(attention_mask)
|
| 1021 |
+
else:
|
| 1022 |
+
attention_mask = None
|
| 1023 |
+
|
| 1024 |
+
position_embeddings = self.pos_conv_embed(hidden_states)
|
| 1025 |
+
hidden_states = hidden_states + position_embeddings
|
| 1026 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 1027 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
| 1028 |
+
|
| 1029 |
+
for i, layer_module in enumerate(self.layer):
|
| 1030 |
+
if output_hidden_states:
|
| 1031 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1032 |
+
|
| 1033 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
| 1034 |
+
dropout_probability = np.random.uniform(0, 1)
|
| 1035 |
+
if training and (dropout_probability < self.config.layerdrop): # skip the layer
|
| 1036 |
+
continue
|
| 1037 |
+
|
| 1038 |
+
layer_outputs = layer_module(
|
| 1039 |
+
hidden_states=hidden_states,
|
| 1040 |
+
attention_mask=attention_mask,
|
| 1041 |
+
output_attentions=output_attentions,
|
| 1042 |
+
training=training,
|
| 1043 |
+
)
|
| 1044 |
+
hidden_states = layer_outputs[0]
|
| 1045 |
+
|
| 1046 |
+
if output_attentions:
|
| 1047 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 1048 |
+
|
| 1049 |
+
# Add last layer
|
| 1050 |
+
if output_hidden_states:
|
| 1051 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1052 |
+
|
| 1053 |
+
if not return_dict:
|
| 1054 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 1055 |
+
return TFBaseModelOutput(
|
| 1056 |
+
last_hidden_state=hidden_states,
|
| 1057 |
+
hidden_states=all_hidden_states,
|
| 1058 |
+
attentions=all_self_attentions,
|
| 1059 |
+
)
|
| 1060 |
+
|
| 1061 |
+
def build(self, input_shape=None):
|
| 1062 |
+
if self.built:
|
| 1063 |
+
return
|
| 1064 |
+
self.built = True
|
| 1065 |
+
if getattr(self, "pos_conv_embed", None) is not None:
|
| 1066 |
+
with tf.name_scope(self.pos_conv_embed.name):
|
| 1067 |
+
self.pos_conv_embed.build(None)
|
| 1068 |
+
if getattr(self, "layer_norm", None) is not None:
|
| 1069 |
+
with tf.name_scope(self.layer_norm.name):
|
| 1070 |
+
self.layer_norm.build([None, None, self.config.hidden_size])
|
| 1071 |
+
if getattr(self, "layer", None) is not None:
|
| 1072 |
+
for layer in self.layer:
|
| 1073 |
+
with tf.name_scope(layer.name):
|
| 1074 |
+
layer.build(None)
|
| 1075 |
+
|
| 1076 |
+
|
| 1077 |
+
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
|
| 1078 |
+
class TFHubertEncoderStableLayerNorm(keras.layers.Layer):
|
| 1079 |
+
def __init__(self, config: HubertConfig, **kwargs):
|
| 1080 |
+
super().__init__(**kwargs)
|
| 1081 |
+
self.config = config
|
| 1082 |
+
self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
|
| 1083 |
+
self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
|
| 1084 |
+
self.dropout = keras.layers.Dropout(config.hidden_dropout)
|
| 1085 |
+
self.layer = [
|
| 1086 |
+
TFHubertEncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)
|
| 1087 |
+
]
|
| 1088 |
+
|
| 1089 |
+
def call(
|
| 1090 |
+
self,
|
| 1091 |
+
hidden_states: tf.Tensor,
|
| 1092 |
+
attention_mask: tf.Tensor | None = None,
|
| 1093 |
+
output_attentions: Optional[bool] = False,
|
| 1094 |
+
output_hidden_states: Optional[bool] = False,
|
| 1095 |
+
return_dict: Optional[bool] = True,
|
| 1096 |
+
training: Optional[bool] = False,
|
| 1097 |
+
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
|
| 1098 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1099 |
+
all_self_attentions = () if output_attentions else None
|
| 1100 |
+
|
| 1101 |
+
if attention_mask is not None:
|
| 1102 |
+
hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
|
| 1103 |
+
attention_mask = _expand_mask(attention_mask)
|
| 1104 |
+
else:
|
| 1105 |
+
attention_mask = None
|
| 1106 |
+
|
| 1107 |
+
position_embeddings = self.pos_conv_embed(hidden_states)
|
| 1108 |
+
hidden_states = hidden_states + position_embeddings
|
| 1109 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
| 1110 |
+
|
| 1111 |
+
for i, layer_module in enumerate(self.layer):
|
| 1112 |
+
if output_hidden_states:
|
| 1113 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1114 |
+
|
| 1115 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
| 1116 |
+
dropout_probability = np.random.uniform(0, 1)
|
| 1117 |
+
if training and (dropout_probability < self.config.layerdrop): # skip the layer
|
| 1118 |
+
continue
|
| 1119 |
+
|
| 1120 |
+
layer_outputs = layer_module(
|
| 1121 |
+
hidden_states=hidden_states,
|
| 1122 |
+
attention_mask=attention_mask,
|
| 1123 |
+
output_attentions=output_attentions,
|
| 1124 |
+
training=training,
|
| 1125 |
+
)
|
| 1126 |
+
hidden_states = layer_outputs[0]
|
| 1127 |
+
|
| 1128 |
+
if output_attentions:
|
| 1129 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 1130 |
+
|
| 1131 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 1132 |
+
|
| 1133 |
+
if output_hidden_states:
|
| 1134 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1135 |
+
|
| 1136 |
+
if not return_dict:
|
| 1137 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 1138 |
+
return TFBaseModelOutput(
|
| 1139 |
+
last_hidden_state=hidden_states,
|
| 1140 |
+
hidden_states=all_hidden_states,
|
| 1141 |
+
attentions=all_self_attentions,
|
| 1142 |
+
)
|
| 1143 |
+
|
| 1144 |
+
def build(self, input_shape=None):
|
| 1145 |
+
if self.built:
|
| 1146 |
+
return
|
| 1147 |
+
self.built = True
|
| 1148 |
+
if getattr(self, "pos_conv_embed", None) is not None:
|
| 1149 |
+
with tf.name_scope(self.pos_conv_embed.name):
|
| 1150 |
+
self.pos_conv_embed.build(None)
|
| 1151 |
+
if getattr(self, "layer_norm", None) is not None:
|
| 1152 |
+
with tf.name_scope(self.layer_norm.name):
|
| 1153 |
+
self.layer_norm.build([None, None, self.config.hidden_size])
|
| 1154 |
+
if getattr(self, "layer", None) is not None:
|
| 1155 |
+
for layer in self.layer:
|
| 1156 |
+
with tf.name_scope(layer.name):
|
| 1157 |
+
layer.build(None)
|
| 1158 |
+
|
| 1159 |
+
|
| 1160 |
+
@keras_serializable
|
| 1161 |
+
class TFHubertMainLayer(keras.layers.Layer):
|
| 1162 |
+
config_class = HubertConfig
|
| 1163 |
+
|
| 1164 |
+
def __init__(self, config: HubertConfig, **kwargs):
|
| 1165 |
+
super().__init__(**kwargs)
|
| 1166 |
+
self.config = config
|
| 1167 |
+
self.feature_extractor = TFHubertFeatureEncoder(config, name="feature_extractor")
|
| 1168 |
+
self.feature_projection = TFHubertFeatureProjection(config, name="feature_projection")
|
| 1169 |
+
|
| 1170 |
+
if config.do_stable_layer_norm:
|
| 1171 |
+
self.encoder = TFHubertEncoderStableLayerNorm(config, name="encoder")
|
| 1172 |
+
else:
|
| 1173 |
+
self.encoder = TFHubertEncoder(config, name="encoder")
|
| 1174 |
+
|
| 1175 |
+
def build(self, input_shape=None):
|
| 1176 |
+
self.masked_spec_embed = self.add_weight(
|
| 1177 |
+
shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed"
|
| 1178 |
+
)
|
| 1179 |
+
|
| 1180 |
+
if self.built:
|
| 1181 |
+
return
|
| 1182 |
+
self.built = True
|
| 1183 |
+
if getattr(self, "feature_extractor", None) is not None:
|
| 1184 |
+
with tf.name_scope(self.feature_extractor.name):
|
| 1185 |
+
self.feature_extractor.build(None)
|
| 1186 |
+
if getattr(self, "feature_projection", None) is not None:
|
| 1187 |
+
with tf.name_scope(self.feature_projection.name):
|
| 1188 |
+
self.feature_projection.build(None)
|
| 1189 |
+
if getattr(self, "encoder", None) is not None:
|
| 1190 |
+
with tf.name_scope(self.encoder.name):
|
| 1191 |
+
self.encoder.build(None)
|
| 1192 |
+
|
| 1193 |
+
def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
|
| 1194 |
+
"""
|
| 1195 |
+
Computes the output length of the convolutional layers
|
| 1196 |
+
"""
|
| 1197 |
+
|
| 1198 |
+
def _conv_out_length(input_length, kernel_size, stride):
|
| 1199 |
+
# 1D convolutional layer output length formula taken
|
| 1200 |
+
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
|
| 1201 |
+
return (input_length - kernel_size) // stride + 1
|
| 1202 |
+
|
| 1203 |
+
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
|
| 1204 |
+
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
|
| 1205 |
+
|
| 1206 |
+
return input_lengths
|
| 1207 |
+
|
| 1208 |
+
def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: tf.Tensor | None = None):
|
| 1209 |
+
"""
|
| 1210 |
+
Masks extracted features along time axis and/or along feature axis according to
|
| 1211 |
+
[SpecAugment](https://arxiv.org/abs/1904.08779).
|
| 1212 |
+
"""
|
| 1213 |
+
batch_size, sequence_length, hidden_size = shape_list(hidden_states)
|
| 1214 |
+
|
| 1215 |
+
# `config.apply_spec_augment` can set masking to False
|
| 1216 |
+
if not getattr(self.config, "apply_spec_augment", True):
|
| 1217 |
+
return hidden_states
|
| 1218 |
+
|
| 1219 |
+
if mask_time_indices is not None:
|
| 1220 |
+
# apply SpecAugment along time axis with given mask_time_indices
|
| 1221 |
+
hidden_states = tf.where(
|
| 1222 |
+
tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
|
| 1223 |
+
self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
|
| 1224 |
+
hidden_states,
|
| 1225 |
+
)
|
| 1226 |
+
|
| 1227 |
+
elif self.config.mask_time_prob > 0:
|
| 1228 |
+
# generate indices & apply SpecAugment along time axis
|
| 1229 |
+
mask_time_indices = _compute_mask_indices(
|
| 1230 |
+
(batch_size, sequence_length),
|
| 1231 |
+
mask_prob=self.config.mask_time_prob,
|
| 1232 |
+
mask_length=self.config.mask_time_length,
|
| 1233 |
+
min_masks=2,
|
| 1234 |
+
)
|
| 1235 |
+
hidden_states = tf.where(
|
| 1236 |
+
tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
|
| 1237 |
+
self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
|
| 1238 |
+
hidden_states,
|
| 1239 |
+
)
|
| 1240 |
+
|
| 1241 |
+
# apply SpecAugment along feature axis
|
| 1242 |
+
if self.config.mask_feature_prob > 0:
|
| 1243 |
+
mask_feature_indices = _compute_mask_indices(
|
| 1244 |
+
(batch_size, hidden_size),
|
| 1245 |
+
mask_prob=self.config.mask_feature_prob,
|
| 1246 |
+
mask_length=self.config.mask_feature_length,
|
| 1247 |
+
)
|
| 1248 |
+
hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0)
|
| 1249 |
+
|
| 1250 |
+
return hidden_states
|
| 1251 |
+
|
| 1252 |
+
@unpack_inputs
|
| 1253 |
+
def call(
|
| 1254 |
+
self,
|
| 1255 |
+
input_values: tf.Tensor,
|
| 1256 |
+
attention_mask: tf.Tensor | None = None,
|
| 1257 |
+
token_type_ids: tf.Tensor | None = None,
|
| 1258 |
+
position_ids: tf.Tensor | None = None,
|
| 1259 |
+
head_mask: tf.Tensor | None = None,
|
| 1260 |
+
inputs_embeds: tf.Tensor | None = None,
|
| 1261 |
+
output_attentions: tf.Tensor | None = None,
|
| 1262 |
+
output_hidden_states: tf.Tensor | None = None,
|
| 1263 |
+
return_dict: Optional[bool] = None,
|
| 1264 |
+
training: bool = False,
|
| 1265 |
+
**kwargs: Any,
|
| 1266 |
+
):
|
| 1267 |
+
hidden_states = self.feature_extractor(tf.cast(input_values, tf.float32), training=training)
|
| 1268 |
+
|
| 1269 |
+
if attention_mask is not None:
|
| 1270 |
+
# compute real output lengths according to convolution formula
|
| 1271 |
+
output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, -1))
|
| 1272 |
+
|
| 1273 |
+
attention_mask = tf.sequence_mask(
|
| 1274 |
+
output_lengths, maxlen=shape_list(hidden_states)[1], dtype=hidden_states.dtype
|
| 1275 |
+
)
|
| 1276 |
+
|
| 1277 |
+
hidden_states = self.feature_projection(hidden_states, training=training)
|
| 1278 |
+
|
| 1279 |
+
mask_time_indices = kwargs.get("mask_time_indices", None)
|
| 1280 |
+
if training:
|
| 1281 |
+
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
|
| 1282 |
+
|
| 1283 |
+
encoder_outputs = self.encoder(
|
| 1284 |
+
hidden_states,
|
| 1285 |
+
attention_mask=attention_mask,
|
| 1286 |
+
output_attentions=output_attentions,
|
| 1287 |
+
output_hidden_states=output_hidden_states,
|
| 1288 |
+
return_dict=return_dict,
|
| 1289 |
+
training=training,
|
| 1290 |
+
)
|
| 1291 |
+
hidden_states = encoder_outputs[0]
|
| 1292 |
+
|
| 1293 |
+
if not return_dict:
|
| 1294 |
+
return (hidden_states,) + encoder_outputs[1:]
|
| 1295 |
+
|
| 1296 |
+
return TFBaseModelOutput(
|
| 1297 |
+
last_hidden_state=hidden_states,
|
| 1298 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 1299 |
+
attentions=encoder_outputs.attentions,
|
| 1300 |
+
)
|
| 1301 |
+
|
| 1302 |
+
|
| 1303 |
+
class TFHubertPreTrainedModel(TFPreTrainedModel):
|
| 1304 |
+
"""
|
| 1305 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 1306 |
+
models.
|
| 1307 |
+
"""
|
| 1308 |
+
|
| 1309 |
+
config_class = HubertConfig
|
| 1310 |
+
base_model_prefix = "hubert"
|
| 1311 |
+
main_input_name = "input_values"
|
| 1312 |
+
|
| 1313 |
+
@property
|
| 1314 |
+
def input_signature(self):
|
| 1315 |
+
return {
|
| 1316 |
+
"input_values": tf.TensorSpec((None, 16000), tf.float32, name="input_values"),
|
| 1317 |
+
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
|
| 1318 |
+
"token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
|
| 1319 |
+
}
|
| 1320 |
+
|
| 1321 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 1322 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1323 |
+
logger.warning(
|
| 1324 |
+
f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish "
|
| 1325 |
+
"to train/fine-tune this model, you need a GPU or a TPU"
|
| 1326 |
+
)
|
| 1327 |
+
|
| 1328 |
+
|
| 1329 |
+
HUBERT_START_DOCSTRING = r"""
|
| 1330 |
+
|
| 1331 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 1332 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 1333 |
+
etc.)
|
| 1334 |
+
|
| 1335 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
| 1336 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
| 1337 |
+
behavior.
|
| 1338 |
+
|
| 1339 |
+
<Tip>
|
| 1340 |
+
|
| 1341 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
| 1342 |
+
|
| 1343 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
| 1344 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
| 1345 |
+
|
| 1346 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
| 1347 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
| 1348 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
| 1349 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
| 1350 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
| 1351 |
+
positional argument:
|
| 1352 |
+
|
| 1353 |
+
- a single Tensor with `input_values` only and nothing else: `model(input_values)`
|
| 1354 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
| 1355 |
+
`model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])`
|
| 1356 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
| 1357 |
+
`model({"input_values": input_values, "token_type_ids": token_type_ids})`
|
| 1358 |
+
|
| 1359 |
+
Note that when creating models and layers with
|
| 1360 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
| 1361 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
| 1362 |
+
|
| 1363 |
+
</Tip>
|
| 1364 |
+
|
| 1365 |
+
Args:
|
| 1366 |
+
config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
|
| 1367 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 1368 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 1369 |
+
"""
|
| 1370 |
+
|
| 1371 |
+
HUBERT_INPUTS_DOCSTRING = r"""
|
| 1372 |
+
Args:
|
| 1373 |
+
input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
|
| 1374 |
+
Indices of input sequence tokens in the vocabulary.
|
| 1375 |
+
|
| 1376 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
| 1377 |
+
[`PreTrainedTokenizer.encode`] for details.
|
| 1378 |
+
|
| 1379 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1380 |
+
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 1381 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1382 |
+
|
| 1383 |
+
- 1 for tokens that are **not masked**,
|
| 1384 |
+
- 0 for tokens that are **masked**.
|
| 1385 |
+
|
| 1386 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1387 |
+
token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 1388 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 1389 |
+
1]`:
|
| 1390 |
+
|
| 1391 |
+
- 0 corresponds to a *sentence A* token,
|
| 1392 |
+
- 1 corresponds to a *sentence B* token.
|
| 1393 |
+
|
| 1394 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 1395 |
+
position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 1396 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 1397 |
+
config.max_position_embeddings - 1]`.
|
| 1398 |
+
|
| 1399 |
+
[What are position IDs?](../glossary#position-ids)
|
| 1400 |
+
head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 1401 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 1402 |
+
|
| 1403 |
+
- 1 indicates the head is **not masked**,
|
| 1404 |
+
- 0 indicates the head is **masked**.
|
| 1405 |
+
|
| 1406 |
+
inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
|
| 1407 |
+
Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation.
|
| 1408 |
+
This is useful if you want more control over how to convert `input_values` indices into associated vectors
|
| 1409 |
+
than the model's internal embedding lookup matrix.
|
| 1410 |
+
output_attentions (`bool`, *optional*):
|
| 1411 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1412 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
| 1413 |
+
config will be used instead.
|
| 1414 |
+
output_hidden_states (`bool`, *optional*):
|
| 1415 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1416 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
| 1417 |
+
used instead.
|
| 1418 |
+
return_dict (`bool`, *optional*):
|
| 1419 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
| 1420 |
+
eager mode, in graph mode the value will always be set to True.
|
| 1421 |
+
training (`bool`, *optional*, defaults to `False``):
|
| 1422 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
| 1423 |
+
behaviors between training and evaluation).
|
| 1424 |
+
"""
|
| 1425 |
+
|
| 1426 |
+
|
| 1427 |
+
@add_start_docstrings(
|
| 1428 |
+
"The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top.",
|
| 1429 |
+
HUBERT_START_DOCSTRING,
|
| 1430 |
+
)
|
| 1431 |
+
class TFHubertModel(TFHubertPreTrainedModel):
|
| 1432 |
+
def __init__(self, config: HubertConfig, *inputs, **kwargs):
|
| 1433 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1434 |
+
self.config = config
|
| 1435 |
+
self.hubert = TFHubertMainLayer(config, name="hubert")
|
| 1436 |
+
|
| 1437 |
+
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
|
| 1438 |
+
@replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
|
| 1439 |
+
@unpack_inputs
|
| 1440 |
+
def call(
|
| 1441 |
+
self,
|
| 1442 |
+
input_values: tf.Tensor,
|
| 1443 |
+
attention_mask: tf.Tensor | None = None,
|
| 1444 |
+
token_type_ids: tf.Tensor | None = None,
|
| 1445 |
+
position_ids: tf.Tensor | None = None,
|
| 1446 |
+
head_mask: tf.Tensor | None = None,
|
| 1447 |
+
inputs_embeds: tf.Tensor | None = None,
|
| 1448 |
+
output_attentions: Optional[bool] = None,
|
| 1449 |
+
output_hidden_states: Optional[bool] = None,
|
| 1450 |
+
return_dict: Optional[bool] = None,
|
| 1451 |
+
training: bool = False,
|
| 1452 |
+
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
|
| 1453 |
+
"""
|
| 1454 |
+
|
| 1455 |
+
Returns:
|
| 1456 |
+
|
| 1457 |
+
Example:
|
| 1458 |
+
|
| 1459 |
+
```python
|
| 1460 |
+
>>> from transformers import AutoProcessor, TFHubertModel
|
| 1461 |
+
>>> from datasets import load_dataset
|
| 1462 |
+
>>> import soundfile as sf
|
| 1463 |
+
|
| 1464 |
+
>>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
|
| 1465 |
+
>>> model = TFHubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
|
| 1466 |
+
|
| 1467 |
+
|
| 1468 |
+
>>> def map_to_array(batch):
|
| 1469 |
+
... speech, _ = sf.read(batch["file"])
|
| 1470 |
+
... batch["speech"] = speech
|
| 1471 |
+
... return batch
|
| 1472 |
+
|
| 1473 |
+
|
| 1474 |
+
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
| 1475 |
+
>>> ds = ds.map(map_to_array)
|
| 1476 |
+
|
| 1477 |
+
>>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
|
| 1478 |
+
>>> hidden_states = model(input_values).last_hidden_state
|
| 1479 |
+
```"""
|
| 1480 |
+
|
| 1481 |
+
output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states
|
| 1482 |
+
output_attentions = output_attentions if output_attentions else self.config.output_attentions
|
| 1483 |
+
return_dict = return_dict if return_dict else self.config.return_dict
|
| 1484 |
+
|
| 1485 |
+
outputs = self.hubert(
|
| 1486 |
+
input_values=input_values,
|
| 1487 |
+
attention_mask=attention_mask,
|
| 1488 |
+
token_type_ids=token_type_ids,
|
| 1489 |
+
position_ids=position_ids,
|
| 1490 |
+
head_mask=head_mask,
|
| 1491 |
+
inputs_embeds=inputs_embeds,
|
| 1492 |
+
output_attentions=output_attentions,
|
| 1493 |
+
output_hidden_states=output_hidden_states,
|
| 1494 |
+
return_dict=return_dict,
|
| 1495 |
+
training=training,
|
| 1496 |
+
)
|
| 1497 |
+
|
| 1498 |
+
return outputs
|
| 1499 |
+
|
| 1500 |
+
def build(self, input_shape=None):
|
| 1501 |
+
if self.built:
|
| 1502 |
+
return
|
| 1503 |
+
self.built = True
|
| 1504 |
+
if getattr(self, "hubert", None) is not None:
|
| 1505 |
+
with tf.name_scope(self.hubert.name):
|
| 1506 |
+
self.hubert.build(None)
|
| 1507 |
+
|
| 1508 |
+
|
| 1509 |
+
@add_start_docstrings(
|
| 1510 |
+
"""TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
|
| 1511 |
+
HUBERT_START_DOCSTRING,
|
| 1512 |
+
)
|
| 1513 |
+
class TFHubertForCTC(TFHubertPreTrainedModel):
|
| 1514 |
+
def __init__(self, config: HubertConfig, *inputs, **kwargs):
|
| 1515 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1516 |
+
|
| 1517 |
+
self.hubert = TFHubertMainLayer(config, name="hubert")
|
| 1518 |
+
self.dropout = keras.layers.Dropout(config.final_dropout)
|
| 1519 |
+
self.lm_head = keras.layers.Dense(config.vocab_size, name="lm_head")
|
| 1520 |
+
self.output_hidden_size = (
|
| 1521 |
+
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
|
| 1522 |
+
)
|
| 1523 |
+
|
| 1524 |
+
def freeze_feature_extractor(self):
|
| 1525 |
+
"""
|
| 1526 |
+
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
|
| 1527 |
+
not be updated during training.
|
| 1528 |
+
"""
|
| 1529 |
+
warnings.warn(
|
| 1530 |
+
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
|
| 1531 |
+
"Please use the equivalent `freeze_feature_encoder` method instead.",
|
| 1532 |
+
FutureWarning,
|
| 1533 |
+
)
|
| 1534 |
+
self.freeze_feature_encoder()
|
| 1535 |
+
|
| 1536 |
+
def freeze_feature_encoder(self):
|
| 1537 |
+
"""
|
| 1538 |
+
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
|
| 1539 |
+
not be updated during training.
|
| 1540 |
+
"""
|
| 1541 |
+
self.hubert.feature_extractor.trainable = False
|
| 1542 |
+
|
| 1543 |
+
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
|
| 1544 |
+
@replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC)
|
| 1545 |
+
@unpack_inputs
|
| 1546 |
+
def call(
|
| 1547 |
+
self,
|
| 1548 |
+
input_values: tf.Tensor,
|
| 1549 |
+
attention_mask: tf.Tensor | None = None,
|
| 1550 |
+
token_type_ids: tf.Tensor | None = None,
|
| 1551 |
+
position_ids: tf.Tensor | None = None,
|
| 1552 |
+
head_mask: tf.Tensor | None = None,
|
| 1553 |
+
inputs_embeds: tf.Tensor | None = None,
|
| 1554 |
+
output_attentions: Optional[bool] = None,
|
| 1555 |
+
labels: tf.Tensor | None = None,
|
| 1556 |
+
output_hidden_states: Optional[bool] = None,
|
| 1557 |
+
return_dict: Optional[bool] = None,
|
| 1558 |
+
training: Optional[bool] = False,
|
| 1559 |
+
) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
|
| 1560 |
+
r"""
|
| 1561 |
+
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1562 |
+
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
|
| 1563 |
+
config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked),
|
| 1564 |
+
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
|
| 1565 |
+
|
| 1566 |
+
Returns:
|
| 1567 |
+
|
| 1568 |
+
Example:
|
| 1569 |
+
|
| 1570 |
+
```python
|
| 1571 |
+
>>> import tensorflow as tf
|
| 1572 |
+
>>> from transformers import AutoProcessor, TFHubertForCTC
|
| 1573 |
+
>>> from datasets import load_dataset
|
| 1574 |
+
>>> import soundfile as sf
|
| 1575 |
+
|
| 1576 |
+
>>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
|
| 1577 |
+
>>> model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft")
|
| 1578 |
+
|
| 1579 |
+
|
| 1580 |
+
>>> def map_to_array(batch):
|
| 1581 |
+
... speech, _ = sf.read(batch["file"])
|
| 1582 |
+
... batch["speech"] = speech
|
| 1583 |
+
... return batch
|
| 1584 |
+
|
| 1585 |
+
|
| 1586 |
+
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
| 1587 |
+
>>> ds = ds.map(map_to_array)
|
| 1588 |
+
|
| 1589 |
+
>>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
|
| 1590 |
+
>>> logits = model(input_values).logits
|
| 1591 |
+
>>> predicted_ids = tf.argmax(logits, axis=-1)
|
| 1592 |
+
|
| 1593 |
+
>>> transcription = processor.decode(predicted_ids[0])
|
| 1594 |
+
|
| 1595 |
+
>>> # compute loss
|
| 1596 |
+
>>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST"
|
| 1597 |
+
|
| 1598 |
+
>>> # Pass the transcription as text to encode labels
|
| 1599 |
+
>>> labels = processor(text=transcription, return_tensors="tf").input_values
|
| 1600 |
+
|
| 1601 |
+
>>> loss = model(input_values, labels=labels).loss
|
| 1602 |
+
```"""
|
| 1603 |
+
|
| 1604 |
+
outputs = self.hubert(
|
| 1605 |
+
input_values=input_values,
|
| 1606 |
+
attention_mask=attention_mask,
|
| 1607 |
+
token_type_ids=token_type_ids,
|
| 1608 |
+
position_ids=position_ids,
|
| 1609 |
+
head_mask=head_mask,
|
| 1610 |
+
inputs_embeds=inputs_embeds,
|
| 1611 |
+
output_attentions=output_attentions,
|
| 1612 |
+
output_hidden_states=output_hidden_states,
|
| 1613 |
+
return_dict=return_dict,
|
| 1614 |
+
training=training,
|
| 1615 |
+
)
|
| 1616 |
+
hidden_states = outputs[0]
|
| 1617 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
| 1618 |
+
|
| 1619 |
+
logits = self.lm_head(hidden_states)
|
| 1620 |
+
|
| 1621 |
+
if labels is not None:
|
| 1622 |
+
if tf.reduce_max(labels) >= self.config.vocab_size:
|
| 1623 |
+
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
|
| 1624 |
+
|
| 1625 |
+
attention_mask = (
|
| 1626 |
+
attention_mask if attention_mask is not None else tf.ones_like(input_values, dtype=tf.float32)
|
| 1627 |
+
)
|
| 1628 |
+
input_lengths = self.hubert._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1))
|
| 1629 |
+
|
| 1630 |
+
# assuming that padded tokens are filled with -100
|
| 1631 |
+
# when not being attended to
|
| 1632 |
+
labels_mask = tf.cast(labels >= 0, tf.int32)
|
| 1633 |
+
target_lengths = tf.reduce_sum(labels_mask, axis=-1)
|
| 1634 |
+
|
| 1635 |
+
loss = tf.nn.ctc_loss(
|
| 1636 |
+
logits=logits,
|
| 1637 |
+
labels=labels,
|
| 1638 |
+
logit_length=input_lengths,
|
| 1639 |
+
label_length=target_lengths,
|
| 1640 |
+
blank_index=self.config.pad_token_id,
|
| 1641 |
+
logits_time_major=False,
|
| 1642 |
+
)
|
| 1643 |
+
|
| 1644 |
+
if self.config.ctc_loss_reduction == "sum":
|
| 1645 |
+
loss = tf.reduce_sum(loss)
|
| 1646 |
+
loss = tf.reshape(loss, (1,))
|
| 1647 |
+
if self.config.ctc_loss_reduction == "mean":
|
| 1648 |
+
loss = tf.reduce_mean(loss)
|
| 1649 |
+
loss = tf.reshape(loss, (1,))
|
| 1650 |
+
else:
|
| 1651 |
+
loss = None
|
| 1652 |
+
|
| 1653 |
+
if not return_dict:
|
| 1654 |
+
output = (logits,) + outputs[1:]
|
| 1655 |
+
return ((loss,) + output) if loss is not None else output
|
| 1656 |
+
|
| 1657 |
+
return TFCausalLMOutput(
|
| 1658 |
+
loss=loss,
|
| 1659 |
+
logits=logits,
|
| 1660 |
+
hidden_states=outputs.hidden_states,
|
| 1661 |
+
attentions=outputs.attentions,
|
| 1662 |
+
)
|
| 1663 |
+
|
| 1664 |
+
def build(self, input_shape=None):
|
| 1665 |
+
if self.built:
|
| 1666 |
+
return
|
| 1667 |
+
self.built = True
|
| 1668 |
+
if getattr(self, "hubert", None) is not None:
|
| 1669 |
+
with tf.name_scope(self.hubert.name):
|
| 1670 |
+
self.hubert.build(None)
|
| 1671 |
+
if getattr(self, "lm_head", None) is not None:
|
| 1672 |
+
with tf.name_scope(self.lm_head.name):
|
| 1673 |
+
self.lm_head.build([None, None, self.output_hidden_size])
|
parrot/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.36 kB). View file
|
|
|