Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_chaining_and_caching.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_datetime.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_getitem.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_iloc.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_indexing_slow.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_loc.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_multiindex.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_partial.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_setitem.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_slice.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_sorted.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/internals/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_api.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_internals.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_managers.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/internals/test_api.py +86 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/internals/test_internals.py +1422 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/internals/test_managers.py +103 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py +753 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py +606 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py +472 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py +54 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py +175 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_crosstab.py +886 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_cut.py +791 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_get_dummies.py +743 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_melt.py +1252 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_pivot.py +2714 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_pivot_multilevel.py +254 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_util.py +79 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__init__.py +15 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/conftest.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_api.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_case_justify.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_cat.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_extract.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_find_replace.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_get_dummies.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_split_partition.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_string_array.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_strings.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/conftest.py +132 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/test_api.py +198 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/test_case_justify.py +427 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/strings/test_cat.py +427 -0
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (188 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_chaining_and_caching.cpython-310.pyc
ADDED
|
Binary file (2.78 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_datetime.cpython-310.pyc
ADDED
|
Binary file (1.29 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_getitem.cpython-310.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_iloc.cpython-310.pyc
ADDED
|
Binary file (5.82 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_indexing_slow.cpython-310.pyc
ADDED
|
Binary file (3.31 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_loc.cpython-310.pyc
ADDED
|
Binary file (29.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_multiindex.cpython-310.pyc
ADDED
|
Binary file (7.88 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_partial.cpython-310.pyc
ADDED
|
Binary file (7.04 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_setitem.cpython-310.pyc
ADDED
|
Binary file (16.9 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_slice.cpython-310.pyc
ADDED
|
Binary file (17.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_sorted.cpython-310.pyc
ADDED
|
Binary file (5.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/internals/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (178 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_api.cpython-310.pyc
ADDED
|
Binary file (2.38 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_internals.cpython-310.pyc
ADDED
|
Binary file (40.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_managers.cpython-310.pyc
ADDED
|
Binary file (3.07 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/internals/test_api.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for the pseudo-public API implemented in internals/api.py and exposed
|
| 3 |
+
in core.internals
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import pandas._testing as tm
|
| 10 |
+
from pandas.core import internals
|
| 11 |
+
from pandas.core.internals import api
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def test_internals_api():
|
| 15 |
+
assert internals.make_block is api.make_block
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def test_namespace():
|
| 19 |
+
# SUBJECT TO CHANGE
|
| 20 |
+
|
| 21 |
+
modules = [
|
| 22 |
+
"blocks",
|
| 23 |
+
"concat",
|
| 24 |
+
"managers",
|
| 25 |
+
"construction",
|
| 26 |
+
"array_manager",
|
| 27 |
+
"base",
|
| 28 |
+
"api",
|
| 29 |
+
"ops",
|
| 30 |
+
]
|
| 31 |
+
expected = [
|
| 32 |
+
"make_block",
|
| 33 |
+
"DataManager",
|
| 34 |
+
"ArrayManager",
|
| 35 |
+
"BlockManager",
|
| 36 |
+
"SingleDataManager",
|
| 37 |
+
"SingleBlockManager",
|
| 38 |
+
"SingleArrayManager",
|
| 39 |
+
"concatenate_managers",
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
result = [x for x in dir(internals) if not x.startswith("__")]
|
| 43 |
+
assert set(result) == set(expected + modules)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@pytest.mark.parametrize(
|
| 47 |
+
"name",
|
| 48 |
+
[
|
| 49 |
+
"NumericBlock",
|
| 50 |
+
"ObjectBlock",
|
| 51 |
+
"Block",
|
| 52 |
+
"ExtensionBlock",
|
| 53 |
+
"DatetimeTZBlock",
|
| 54 |
+
],
|
| 55 |
+
)
|
| 56 |
+
def test_deprecations(name):
|
| 57 |
+
# GH#55139
|
| 58 |
+
msg = f"{name} is deprecated.* Use public APIs instead"
|
| 59 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
| 60 |
+
getattr(internals, name)
|
| 61 |
+
|
| 62 |
+
if name not in ["NumericBlock", "ObjectBlock"]:
|
| 63 |
+
# NumericBlock and ObjectBlock are not in the internals.api namespace
|
| 64 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
| 65 |
+
getattr(api, name)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def test_make_block_2d_with_dti():
|
| 69 |
+
# GH#41168
|
| 70 |
+
dti = pd.date_range("2012", periods=3, tz="UTC")
|
| 71 |
+
blk = api.make_block(dti, placement=[0])
|
| 72 |
+
|
| 73 |
+
assert blk.shape == (1, 3)
|
| 74 |
+
assert blk.values.shape == (1, 3)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def test_create_block_manager_from_blocks_deprecated():
|
| 78 |
+
# GH#33892
|
| 79 |
+
# If they must, downstream packages should get this from internals.api,
|
| 80 |
+
# not internals.
|
| 81 |
+
msg = (
|
| 82 |
+
"create_block_manager_from_blocks is deprecated and will be "
|
| 83 |
+
"removed in a future version. Use public APIs instead"
|
| 84 |
+
)
|
| 85 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
| 86 |
+
internals.create_block_manager_from_blocks
|
llava_next/lib/python3.10/site-packages/pandas/tests/internals/test_internals.py
ADDED
|
@@ -0,0 +1,1422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import (
|
| 2 |
+
date,
|
| 3 |
+
datetime,
|
| 4 |
+
)
|
| 5 |
+
import itertools
|
| 6 |
+
import re
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pytest
|
| 10 |
+
|
| 11 |
+
from pandas._libs.internals import BlockPlacement
|
| 12 |
+
from pandas.compat import IS64
|
| 13 |
+
import pandas.util._test_decorators as td
|
| 14 |
+
|
| 15 |
+
from pandas.core.dtypes.common import is_scalar
|
| 16 |
+
|
| 17 |
+
import pandas as pd
|
| 18 |
+
from pandas import (
|
| 19 |
+
Categorical,
|
| 20 |
+
DataFrame,
|
| 21 |
+
DatetimeIndex,
|
| 22 |
+
Index,
|
| 23 |
+
IntervalIndex,
|
| 24 |
+
Series,
|
| 25 |
+
Timedelta,
|
| 26 |
+
Timestamp,
|
| 27 |
+
period_range,
|
| 28 |
+
)
|
| 29 |
+
import pandas._testing as tm
|
| 30 |
+
import pandas.core.algorithms as algos
|
| 31 |
+
from pandas.core.arrays import (
|
| 32 |
+
DatetimeArray,
|
| 33 |
+
SparseArray,
|
| 34 |
+
TimedeltaArray,
|
| 35 |
+
)
|
| 36 |
+
from pandas.core.internals import (
|
| 37 |
+
BlockManager,
|
| 38 |
+
SingleBlockManager,
|
| 39 |
+
make_block,
|
| 40 |
+
)
|
| 41 |
+
from pandas.core.internals.blocks import (
|
| 42 |
+
ensure_block_shape,
|
| 43 |
+
maybe_coerce_values,
|
| 44 |
+
new_block,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# this file contains BlockManager specific tests
|
| 48 |
+
# TODO(ArrayManager) factor out interleave_dtype tests
|
| 49 |
+
pytestmark = td.skip_array_manager_invalid_test
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@pytest.fixture(params=[new_block, make_block])
|
| 53 |
+
def block_maker(request):
|
| 54 |
+
"""
|
| 55 |
+
Fixture to test both the internal new_block and pseudo-public make_block.
|
| 56 |
+
"""
|
| 57 |
+
return request.param
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@pytest.fixture
|
| 61 |
+
def mgr():
|
| 62 |
+
return create_mgr(
|
| 63 |
+
"a: f8; b: object; c: f8; d: object; e: f8;"
|
| 64 |
+
"f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;"
|
| 65 |
+
"k: M8[ns, US/Eastern]; l: M8[ns, CET];"
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def assert_block_equal(left, right):
|
| 70 |
+
tm.assert_numpy_array_equal(left.values, right.values)
|
| 71 |
+
assert left.dtype == right.dtype
|
| 72 |
+
assert isinstance(left.mgr_locs, BlockPlacement)
|
| 73 |
+
assert isinstance(right.mgr_locs, BlockPlacement)
|
| 74 |
+
tm.assert_numpy_array_equal(left.mgr_locs.as_array, right.mgr_locs.as_array)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def get_numeric_mat(shape):
|
| 78 |
+
arr = np.arange(shape[0])
|
| 79 |
+
return np.lib.stride_tricks.as_strided(
|
| 80 |
+
x=arr, shape=shape, strides=(arr.itemsize,) + (0,) * (len(shape) - 1)
|
| 81 |
+
).copy()
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
N = 10
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def create_block(typestr, placement, item_shape=None, num_offset=0, maker=new_block):
|
| 88 |
+
"""
|
| 89 |
+
Supported typestr:
|
| 90 |
+
|
| 91 |
+
* float, f8, f4, f2
|
| 92 |
+
* int, i8, i4, i2, i1
|
| 93 |
+
* uint, u8, u4, u2, u1
|
| 94 |
+
* complex, c16, c8
|
| 95 |
+
* bool
|
| 96 |
+
* object, string, O
|
| 97 |
+
* datetime, dt, M8[ns], M8[ns, tz]
|
| 98 |
+
* timedelta, td, m8[ns]
|
| 99 |
+
* sparse (SparseArray with fill_value=0.0)
|
| 100 |
+
* sparse_na (SparseArray with fill_value=np.nan)
|
| 101 |
+
* category, category2
|
| 102 |
+
|
| 103 |
+
"""
|
| 104 |
+
placement = BlockPlacement(placement)
|
| 105 |
+
num_items = len(placement)
|
| 106 |
+
|
| 107 |
+
if item_shape is None:
|
| 108 |
+
item_shape = (N,)
|
| 109 |
+
|
| 110 |
+
shape = (num_items,) + item_shape
|
| 111 |
+
|
| 112 |
+
mat = get_numeric_mat(shape)
|
| 113 |
+
|
| 114 |
+
if typestr in (
|
| 115 |
+
"float",
|
| 116 |
+
"f8",
|
| 117 |
+
"f4",
|
| 118 |
+
"f2",
|
| 119 |
+
"int",
|
| 120 |
+
"i8",
|
| 121 |
+
"i4",
|
| 122 |
+
"i2",
|
| 123 |
+
"i1",
|
| 124 |
+
"uint",
|
| 125 |
+
"u8",
|
| 126 |
+
"u4",
|
| 127 |
+
"u2",
|
| 128 |
+
"u1",
|
| 129 |
+
):
|
| 130 |
+
values = mat.astype(typestr) + num_offset
|
| 131 |
+
elif typestr in ("complex", "c16", "c8"):
|
| 132 |
+
values = 1.0j * (mat.astype(typestr) + num_offset)
|
| 133 |
+
elif typestr in ("object", "string", "O"):
|
| 134 |
+
values = np.reshape([f"A{i:d}" for i in mat.ravel() + num_offset], shape)
|
| 135 |
+
elif typestr in ("b", "bool"):
|
| 136 |
+
values = np.ones(shape, dtype=np.bool_)
|
| 137 |
+
elif typestr in ("datetime", "dt", "M8[ns]"):
|
| 138 |
+
values = (mat * 1e9).astype("M8[ns]")
|
| 139 |
+
elif typestr.startswith("M8[ns"):
|
| 140 |
+
# datetime with tz
|
| 141 |
+
m = re.search(r"M8\[ns,\s*(\w+\/?\w*)\]", typestr)
|
| 142 |
+
assert m is not None, f"incompatible typestr -> {typestr}"
|
| 143 |
+
tz = m.groups()[0]
|
| 144 |
+
assert num_items == 1, "must have only 1 num items for a tz-aware"
|
| 145 |
+
values = DatetimeIndex(np.arange(N) * 10**9, tz=tz)._data
|
| 146 |
+
values = ensure_block_shape(values, ndim=len(shape))
|
| 147 |
+
elif typestr in ("timedelta", "td", "m8[ns]"):
|
| 148 |
+
values = (mat * 1).astype("m8[ns]")
|
| 149 |
+
elif typestr in ("category",):
|
| 150 |
+
values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
|
| 151 |
+
elif typestr in ("category2",):
|
| 152 |
+
values = Categorical(["a", "a", "a", "a", "b", "b", "c", "c", "c", "d"])
|
| 153 |
+
elif typestr in ("sparse", "sparse_na"):
|
| 154 |
+
if shape[-1] != 10:
|
| 155 |
+
# We also are implicitly assuming this in the category cases above
|
| 156 |
+
raise NotImplementedError
|
| 157 |
+
|
| 158 |
+
assert all(s == 1 for s in shape[:-1])
|
| 159 |
+
if typestr.endswith("_na"):
|
| 160 |
+
fill_value = np.nan
|
| 161 |
+
else:
|
| 162 |
+
fill_value = 0.0
|
| 163 |
+
values = SparseArray(
|
| 164 |
+
[fill_value, fill_value, 1, 2, 3, fill_value, 4, 5, fill_value, 6],
|
| 165 |
+
fill_value=fill_value,
|
| 166 |
+
)
|
| 167 |
+
arr = values.sp_values.view()
|
| 168 |
+
arr += num_offset - 1
|
| 169 |
+
else:
|
| 170 |
+
raise ValueError(f'Unsupported typestr: "{typestr}"')
|
| 171 |
+
|
| 172 |
+
values = maybe_coerce_values(values)
|
| 173 |
+
return maker(values, placement=placement, ndim=len(shape))
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def create_single_mgr(typestr, num_rows=None):
|
| 177 |
+
if num_rows is None:
|
| 178 |
+
num_rows = N
|
| 179 |
+
|
| 180 |
+
return SingleBlockManager(
|
| 181 |
+
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
|
| 182 |
+
Index(np.arange(num_rows)),
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def create_mgr(descr, item_shape=None):
|
| 187 |
+
"""
|
| 188 |
+
Construct BlockManager from string description.
|
| 189 |
+
|
| 190 |
+
String description syntax looks similar to np.matrix initializer. It looks
|
| 191 |
+
like this::
|
| 192 |
+
|
| 193 |
+
a,b,c: f8; d,e,f: i8
|
| 194 |
+
|
| 195 |
+
Rules are rather simple:
|
| 196 |
+
|
| 197 |
+
* see list of supported datatypes in `create_block` method
|
| 198 |
+
* components are semicolon-separated
|
| 199 |
+
* each component is `NAME,NAME,NAME: DTYPE_ID`
|
| 200 |
+
* whitespace around colons & semicolons are removed
|
| 201 |
+
* components with same DTYPE_ID are combined into single block
|
| 202 |
+
* to force multiple blocks with same dtype, use '-SUFFIX'::
|
| 203 |
+
|
| 204 |
+
'a:f8-1; b:f8-2; c:f8-foobar'
|
| 205 |
+
|
| 206 |
+
"""
|
| 207 |
+
if item_shape is None:
|
| 208 |
+
item_shape = (N,)
|
| 209 |
+
|
| 210 |
+
offset = 0
|
| 211 |
+
mgr_items = []
|
| 212 |
+
block_placements = {}
|
| 213 |
+
for d in descr.split(";"):
|
| 214 |
+
d = d.strip()
|
| 215 |
+
if not len(d):
|
| 216 |
+
continue
|
| 217 |
+
names, blockstr = d.partition(":")[::2]
|
| 218 |
+
blockstr = blockstr.strip()
|
| 219 |
+
names = names.strip().split(",")
|
| 220 |
+
|
| 221 |
+
mgr_items.extend(names)
|
| 222 |
+
placement = list(np.arange(len(names)) + offset)
|
| 223 |
+
try:
|
| 224 |
+
block_placements[blockstr].extend(placement)
|
| 225 |
+
except KeyError:
|
| 226 |
+
block_placements[blockstr] = placement
|
| 227 |
+
offset += len(names)
|
| 228 |
+
|
| 229 |
+
mgr_items = Index(mgr_items)
|
| 230 |
+
|
| 231 |
+
blocks = []
|
| 232 |
+
num_offset = 0
|
| 233 |
+
for blockstr, placement in block_placements.items():
|
| 234 |
+
typestr = blockstr.split("-")[0]
|
| 235 |
+
blocks.append(
|
| 236 |
+
create_block(
|
| 237 |
+
typestr, placement, item_shape=item_shape, num_offset=num_offset
|
| 238 |
+
)
|
| 239 |
+
)
|
| 240 |
+
num_offset += len(placement)
|
| 241 |
+
|
| 242 |
+
sblocks = sorted(blocks, key=lambda b: b.mgr_locs[0])
|
| 243 |
+
return BlockManager(
|
| 244 |
+
tuple(sblocks),
|
| 245 |
+
[mgr_items] + [Index(np.arange(n)) for n in item_shape],
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@pytest.fixture
|
| 250 |
+
def fblock():
|
| 251 |
+
return create_block("float", [0, 2, 4])
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class TestBlock:
|
| 255 |
+
def test_constructor(self):
|
| 256 |
+
int32block = create_block("i4", [0])
|
| 257 |
+
assert int32block.dtype == np.int32
|
| 258 |
+
|
| 259 |
+
@pytest.mark.parametrize(
|
| 260 |
+
"typ, data",
|
| 261 |
+
[
|
| 262 |
+
["float", [0, 2, 4]],
|
| 263 |
+
["complex", [7]],
|
| 264 |
+
["object", [1, 3]],
|
| 265 |
+
["bool", [5]],
|
| 266 |
+
],
|
| 267 |
+
)
|
| 268 |
+
def test_pickle(self, typ, data):
|
| 269 |
+
blk = create_block(typ, data)
|
| 270 |
+
assert_block_equal(tm.round_trip_pickle(blk), blk)
|
| 271 |
+
|
| 272 |
+
def test_mgr_locs(self, fblock):
|
| 273 |
+
assert isinstance(fblock.mgr_locs, BlockPlacement)
|
| 274 |
+
tm.assert_numpy_array_equal(
|
| 275 |
+
fblock.mgr_locs.as_array, np.array([0, 2, 4], dtype=np.intp)
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
def test_attrs(self, fblock):
|
| 279 |
+
assert fblock.shape == fblock.values.shape
|
| 280 |
+
assert fblock.dtype == fblock.values.dtype
|
| 281 |
+
assert len(fblock) == len(fblock.values)
|
| 282 |
+
|
| 283 |
+
def test_copy(self, fblock):
|
| 284 |
+
cop = fblock.copy()
|
| 285 |
+
assert cop is not fblock
|
| 286 |
+
assert_block_equal(fblock, cop)
|
| 287 |
+
|
| 288 |
+
def test_delete(self, fblock):
|
| 289 |
+
newb = fblock.copy()
|
| 290 |
+
locs = newb.mgr_locs
|
| 291 |
+
nb = newb.delete(0)[0]
|
| 292 |
+
assert newb.mgr_locs is locs
|
| 293 |
+
|
| 294 |
+
assert nb is not newb
|
| 295 |
+
|
| 296 |
+
tm.assert_numpy_array_equal(
|
| 297 |
+
nb.mgr_locs.as_array, np.array([2, 4], dtype=np.intp)
|
| 298 |
+
)
|
| 299 |
+
assert not (newb.values[0] == 1).all()
|
| 300 |
+
assert (nb.values[0] == 1).all()
|
| 301 |
+
|
| 302 |
+
newb = fblock.copy()
|
| 303 |
+
locs = newb.mgr_locs
|
| 304 |
+
nb = newb.delete(1)
|
| 305 |
+
assert len(nb) == 2
|
| 306 |
+
assert newb.mgr_locs is locs
|
| 307 |
+
|
| 308 |
+
tm.assert_numpy_array_equal(
|
| 309 |
+
nb[0].mgr_locs.as_array, np.array([0], dtype=np.intp)
|
| 310 |
+
)
|
| 311 |
+
tm.assert_numpy_array_equal(
|
| 312 |
+
nb[1].mgr_locs.as_array, np.array([4], dtype=np.intp)
|
| 313 |
+
)
|
| 314 |
+
assert not (newb.values[1] == 2).all()
|
| 315 |
+
assert (nb[1].values[0] == 2).all()
|
| 316 |
+
|
| 317 |
+
newb = fblock.copy()
|
| 318 |
+
nb = newb.delete(2)
|
| 319 |
+
assert len(nb) == 1
|
| 320 |
+
tm.assert_numpy_array_equal(
|
| 321 |
+
nb[0].mgr_locs.as_array, np.array([0, 2], dtype=np.intp)
|
| 322 |
+
)
|
| 323 |
+
assert (nb[0].values[1] == 1).all()
|
| 324 |
+
|
| 325 |
+
newb = fblock.copy()
|
| 326 |
+
|
| 327 |
+
with pytest.raises(IndexError, match=None):
|
| 328 |
+
newb.delete(3)
|
| 329 |
+
|
| 330 |
+
def test_delete_datetimelike(self):
|
| 331 |
+
# dont use np.delete on values, as that will coerce from DTA/TDA to ndarray
|
| 332 |
+
arr = np.arange(20, dtype="i8").reshape(5, 4).view("m8[ns]")
|
| 333 |
+
df = DataFrame(arr)
|
| 334 |
+
blk = df._mgr.blocks[0]
|
| 335 |
+
assert isinstance(blk.values, TimedeltaArray)
|
| 336 |
+
|
| 337 |
+
nb = blk.delete(1)
|
| 338 |
+
assert len(nb) == 2
|
| 339 |
+
assert isinstance(nb[0].values, TimedeltaArray)
|
| 340 |
+
assert isinstance(nb[1].values, TimedeltaArray)
|
| 341 |
+
|
| 342 |
+
df = DataFrame(arr.view("M8[ns]"))
|
| 343 |
+
blk = df._mgr.blocks[0]
|
| 344 |
+
assert isinstance(blk.values, DatetimeArray)
|
| 345 |
+
|
| 346 |
+
nb = blk.delete([1, 3])
|
| 347 |
+
assert len(nb) == 2
|
| 348 |
+
assert isinstance(nb[0].values, DatetimeArray)
|
| 349 |
+
assert isinstance(nb[1].values, DatetimeArray)
|
| 350 |
+
|
| 351 |
+
def test_split(self):
|
| 352 |
+
# GH#37799
|
| 353 |
+
values = np.random.default_rng(2).standard_normal((3, 4))
|
| 354 |
+
blk = new_block(values, placement=BlockPlacement([3, 1, 6]), ndim=2)
|
| 355 |
+
result = blk._split()
|
| 356 |
+
|
| 357 |
+
# check that we get views, not copies
|
| 358 |
+
values[:] = -9999
|
| 359 |
+
assert (blk.values == -9999).all()
|
| 360 |
+
|
| 361 |
+
assert len(result) == 3
|
| 362 |
+
expected = [
|
| 363 |
+
new_block(values[[0]], placement=BlockPlacement([3]), ndim=2),
|
| 364 |
+
new_block(values[[1]], placement=BlockPlacement([1]), ndim=2),
|
| 365 |
+
new_block(values[[2]], placement=BlockPlacement([6]), ndim=2),
|
| 366 |
+
]
|
| 367 |
+
for res, exp in zip(result, expected):
|
| 368 |
+
assert_block_equal(res, exp)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
class TestBlockManager:
|
| 372 |
+
def test_attrs(self):
|
| 373 |
+
mgr = create_mgr("a,b,c: f8-1; d,e,f: f8-2")
|
| 374 |
+
assert mgr.nblocks == 2
|
| 375 |
+
assert len(mgr) == 6
|
| 376 |
+
|
| 377 |
+
def test_duplicate_ref_loc_failure(self):
|
| 378 |
+
tmp_mgr = create_mgr("a:bool; a: f8")
|
| 379 |
+
|
| 380 |
+
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
|
| 381 |
+
|
| 382 |
+
blocks[0].mgr_locs = BlockPlacement(np.array([0]))
|
| 383 |
+
blocks[1].mgr_locs = BlockPlacement(np.array([0]))
|
| 384 |
+
|
| 385 |
+
# test trying to create block manager with overlapping ref locs
|
| 386 |
+
|
| 387 |
+
msg = "Gaps in blk ref_locs"
|
| 388 |
+
|
| 389 |
+
with pytest.raises(AssertionError, match=msg):
|
| 390 |
+
mgr = BlockManager(blocks, axes)
|
| 391 |
+
mgr._rebuild_blknos_and_blklocs()
|
| 392 |
+
|
| 393 |
+
blocks[0].mgr_locs = BlockPlacement(np.array([0]))
|
| 394 |
+
blocks[1].mgr_locs = BlockPlacement(np.array([1]))
|
| 395 |
+
mgr = BlockManager(blocks, axes)
|
| 396 |
+
mgr.iget(1)
|
| 397 |
+
|
| 398 |
+
def test_pickle(self, mgr):
|
| 399 |
+
mgr2 = tm.round_trip_pickle(mgr)
|
| 400 |
+
tm.assert_frame_equal(
|
| 401 |
+
DataFrame._from_mgr(mgr, axes=mgr.axes),
|
| 402 |
+
DataFrame._from_mgr(mgr2, axes=mgr2.axes),
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
# GH2431
|
| 406 |
+
assert hasattr(mgr2, "_is_consolidated")
|
| 407 |
+
assert hasattr(mgr2, "_known_consolidated")
|
| 408 |
+
|
| 409 |
+
# reset to False on load
|
| 410 |
+
assert not mgr2._is_consolidated
|
| 411 |
+
assert not mgr2._known_consolidated
|
| 412 |
+
|
| 413 |
+
@pytest.mark.parametrize("mgr_string", ["a,a,a:f8", "a: f8; a: i8"])
|
| 414 |
+
def test_non_unique_pickle(self, mgr_string):
|
| 415 |
+
mgr = create_mgr(mgr_string)
|
| 416 |
+
mgr2 = tm.round_trip_pickle(mgr)
|
| 417 |
+
tm.assert_frame_equal(
|
| 418 |
+
DataFrame._from_mgr(mgr, axes=mgr.axes),
|
| 419 |
+
DataFrame._from_mgr(mgr2, axes=mgr2.axes),
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
def test_categorical_block_pickle(self):
|
| 423 |
+
mgr = create_mgr("a: category")
|
| 424 |
+
mgr2 = tm.round_trip_pickle(mgr)
|
| 425 |
+
tm.assert_frame_equal(
|
| 426 |
+
DataFrame._from_mgr(mgr, axes=mgr.axes),
|
| 427 |
+
DataFrame._from_mgr(mgr2, axes=mgr2.axes),
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
smgr = create_single_mgr("category")
|
| 431 |
+
smgr2 = tm.round_trip_pickle(smgr)
|
| 432 |
+
tm.assert_series_equal(
|
| 433 |
+
Series()._constructor_from_mgr(smgr, axes=smgr.axes),
|
| 434 |
+
Series()._constructor_from_mgr(smgr2, axes=smgr2.axes),
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
def test_iget(self):
|
| 438 |
+
cols = Index(list("abc"))
|
| 439 |
+
values = np.random.default_rng(2).random((3, 3))
|
| 440 |
+
block = new_block(
|
| 441 |
+
values=values.copy(),
|
| 442 |
+
placement=BlockPlacement(np.arange(3, dtype=np.intp)),
|
| 443 |
+
ndim=values.ndim,
|
| 444 |
+
)
|
| 445 |
+
mgr = BlockManager(blocks=(block,), axes=[cols, Index(np.arange(3))])
|
| 446 |
+
|
| 447 |
+
tm.assert_almost_equal(mgr.iget(0).internal_values(), values[0])
|
| 448 |
+
tm.assert_almost_equal(mgr.iget(1).internal_values(), values[1])
|
| 449 |
+
tm.assert_almost_equal(mgr.iget(2).internal_values(), values[2])
|
| 450 |
+
|
| 451 |
+
def test_set(self):
|
| 452 |
+
mgr = create_mgr("a,b,c: int", item_shape=(3,))
|
| 453 |
+
|
| 454 |
+
mgr.insert(len(mgr.items), "d", np.array(["foo"] * 3))
|
| 455 |
+
mgr.iset(1, np.array(["bar"] * 3))
|
| 456 |
+
tm.assert_numpy_array_equal(mgr.iget(0).internal_values(), np.array([0] * 3))
|
| 457 |
+
tm.assert_numpy_array_equal(
|
| 458 |
+
mgr.iget(1).internal_values(), np.array(["bar"] * 3, dtype=np.object_)
|
| 459 |
+
)
|
| 460 |
+
tm.assert_numpy_array_equal(mgr.iget(2).internal_values(), np.array([2] * 3))
|
| 461 |
+
tm.assert_numpy_array_equal(
|
| 462 |
+
mgr.iget(3).internal_values(), np.array(["foo"] * 3, dtype=np.object_)
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
def test_set_change_dtype(self, mgr):
|
| 466 |
+
mgr.insert(len(mgr.items), "baz", np.zeros(N, dtype=bool))
|
| 467 |
+
|
| 468 |
+
mgr.iset(mgr.items.get_loc("baz"), np.repeat("foo", N))
|
| 469 |
+
idx = mgr.items.get_loc("baz")
|
| 470 |
+
assert mgr.iget(idx).dtype == np.object_
|
| 471 |
+
|
| 472 |
+
mgr2 = mgr.consolidate()
|
| 473 |
+
mgr2.iset(mgr2.items.get_loc("baz"), np.repeat("foo", N))
|
| 474 |
+
idx = mgr2.items.get_loc("baz")
|
| 475 |
+
assert mgr2.iget(idx).dtype == np.object_
|
| 476 |
+
|
| 477 |
+
mgr2.insert(
|
| 478 |
+
len(mgr2.items),
|
| 479 |
+
"quux",
|
| 480 |
+
np.random.default_rng(2).standard_normal(N).astype(int),
|
| 481 |
+
)
|
| 482 |
+
idx = mgr2.items.get_loc("quux")
|
| 483 |
+
assert mgr2.iget(idx).dtype == np.dtype(int)
|
| 484 |
+
|
| 485 |
+
mgr2.iset(
|
| 486 |
+
mgr2.items.get_loc("quux"), np.random.default_rng(2).standard_normal(N)
|
| 487 |
+
)
|
| 488 |
+
assert mgr2.iget(idx).dtype == np.float64
|
| 489 |
+
|
| 490 |
+
def test_copy(self, mgr):
|
| 491 |
+
cp = mgr.copy(deep=False)
|
| 492 |
+
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
|
| 493 |
+
# view assertion
|
| 494 |
+
tm.assert_equal(cp_blk.values, blk.values)
|
| 495 |
+
if isinstance(blk.values, np.ndarray):
|
| 496 |
+
assert cp_blk.values.base is blk.values.base
|
| 497 |
+
else:
|
| 498 |
+
# DatetimeTZBlock has DatetimeIndex values
|
| 499 |
+
assert cp_blk.values._ndarray.base is blk.values._ndarray.base
|
| 500 |
+
|
| 501 |
+
# copy(deep=True) consolidates, so the block-wise assertions will
|
| 502 |
+
# fail is mgr is not consolidated
|
| 503 |
+
mgr._consolidate_inplace()
|
| 504 |
+
cp = mgr.copy(deep=True)
|
| 505 |
+
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
|
| 506 |
+
bvals = blk.values
|
| 507 |
+
cpvals = cp_blk.values
|
| 508 |
+
|
| 509 |
+
tm.assert_equal(cpvals, bvals)
|
| 510 |
+
|
| 511 |
+
if isinstance(cpvals, np.ndarray):
|
| 512 |
+
lbase = cpvals.base
|
| 513 |
+
rbase = bvals.base
|
| 514 |
+
else:
|
| 515 |
+
lbase = cpvals._ndarray.base
|
| 516 |
+
rbase = bvals._ndarray.base
|
| 517 |
+
|
| 518 |
+
# copy assertion we either have a None for a base or in case of
|
| 519 |
+
# some blocks it is an array (e.g. datetimetz), but was copied
|
| 520 |
+
if isinstance(cpvals, DatetimeArray):
|
| 521 |
+
assert (lbase is None and rbase is None) or (lbase is not rbase)
|
| 522 |
+
elif not isinstance(cpvals, np.ndarray):
|
| 523 |
+
assert lbase is not rbase
|
| 524 |
+
else:
|
| 525 |
+
assert lbase is None and rbase is None
|
| 526 |
+
|
| 527 |
+
def test_sparse(self):
|
| 528 |
+
mgr = create_mgr("a: sparse-1; b: sparse-2")
|
| 529 |
+
assert mgr.as_array().dtype == np.float64
|
| 530 |
+
|
| 531 |
+
def test_sparse_mixed(self):
|
| 532 |
+
mgr = create_mgr("a: sparse-1; b: sparse-2; c: f8")
|
| 533 |
+
assert len(mgr.blocks) == 3
|
| 534 |
+
assert isinstance(mgr, BlockManager)
|
| 535 |
+
|
| 536 |
+
@pytest.mark.parametrize(
|
| 537 |
+
"mgr_string, dtype",
|
| 538 |
+
[("c: f4; d: f2", np.float32), ("c: f4; d: f2; e: f8", np.float64)],
|
| 539 |
+
)
|
| 540 |
+
def test_as_array_float(self, mgr_string, dtype):
|
| 541 |
+
mgr = create_mgr(mgr_string)
|
| 542 |
+
assert mgr.as_array().dtype == dtype
|
| 543 |
+
|
| 544 |
+
@pytest.mark.parametrize(
|
| 545 |
+
"mgr_string, dtype",
|
| 546 |
+
[
|
| 547 |
+
("a: bool-1; b: bool-2", np.bool_),
|
| 548 |
+
("a: i8-1; b: i8-2; c: i4; d: i2; e: u1", np.int64),
|
| 549 |
+
("c: i4; d: i2; e: u1", np.int32),
|
| 550 |
+
],
|
| 551 |
+
)
|
| 552 |
+
def test_as_array_int_bool(self, mgr_string, dtype):
|
| 553 |
+
mgr = create_mgr(mgr_string)
|
| 554 |
+
assert mgr.as_array().dtype == dtype
|
| 555 |
+
|
| 556 |
+
def test_as_array_datetime(self):
|
| 557 |
+
mgr = create_mgr("h: datetime-1; g: datetime-2")
|
| 558 |
+
assert mgr.as_array().dtype == "M8[ns]"
|
| 559 |
+
|
| 560 |
+
def test_as_array_datetime_tz(self):
|
| 561 |
+
mgr = create_mgr("h: M8[ns, US/Eastern]; g: M8[ns, CET]")
|
| 562 |
+
assert mgr.iget(0).dtype == "datetime64[ns, US/Eastern]"
|
| 563 |
+
assert mgr.iget(1).dtype == "datetime64[ns, CET]"
|
| 564 |
+
assert mgr.as_array().dtype == "object"
|
| 565 |
+
|
| 566 |
+
@pytest.mark.parametrize("t", ["float16", "float32", "float64", "int32", "int64"])
|
| 567 |
+
def test_astype(self, t):
|
| 568 |
+
# coerce all
|
| 569 |
+
mgr = create_mgr("c: f4; d: f2; e: f8")
|
| 570 |
+
|
| 571 |
+
t = np.dtype(t)
|
| 572 |
+
tmgr = mgr.astype(t)
|
| 573 |
+
assert tmgr.iget(0).dtype.type == t
|
| 574 |
+
assert tmgr.iget(1).dtype.type == t
|
| 575 |
+
assert tmgr.iget(2).dtype.type == t
|
| 576 |
+
|
| 577 |
+
# mixed
|
| 578 |
+
mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8")
|
| 579 |
+
|
| 580 |
+
t = np.dtype(t)
|
| 581 |
+
tmgr = mgr.astype(t, errors="ignore")
|
| 582 |
+
assert tmgr.iget(2).dtype.type == t
|
| 583 |
+
assert tmgr.iget(4).dtype.type == t
|
| 584 |
+
assert tmgr.iget(5).dtype.type == t
|
| 585 |
+
assert tmgr.iget(6).dtype.type == t
|
| 586 |
+
|
| 587 |
+
assert tmgr.iget(0).dtype.type == np.object_
|
| 588 |
+
assert tmgr.iget(1).dtype.type == np.object_
|
| 589 |
+
if t != np.int64:
|
| 590 |
+
assert tmgr.iget(3).dtype.type == np.datetime64
|
| 591 |
+
else:
|
| 592 |
+
assert tmgr.iget(3).dtype.type == t
|
| 593 |
+
|
| 594 |
+
def test_convert(self, using_infer_string):
|
| 595 |
+
def _compare(old_mgr, new_mgr):
|
| 596 |
+
"""compare the blocks, numeric compare ==, object don't"""
|
| 597 |
+
old_blocks = set(old_mgr.blocks)
|
| 598 |
+
new_blocks = set(new_mgr.blocks)
|
| 599 |
+
assert len(old_blocks) == len(new_blocks)
|
| 600 |
+
|
| 601 |
+
# compare non-numeric
|
| 602 |
+
for b in old_blocks:
|
| 603 |
+
found = False
|
| 604 |
+
for nb in new_blocks:
|
| 605 |
+
if (b.values == nb.values).all():
|
| 606 |
+
found = True
|
| 607 |
+
break
|
| 608 |
+
assert found
|
| 609 |
+
|
| 610 |
+
for b in new_blocks:
|
| 611 |
+
found = False
|
| 612 |
+
for ob in old_blocks:
|
| 613 |
+
if (b.values == ob.values).all():
|
| 614 |
+
found = True
|
| 615 |
+
break
|
| 616 |
+
assert found
|
| 617 |
+
|
| 618 |
+
# noops
|
| 619 |
+
mgr = create_mgr("f: i8; g: f8")
|
| 620 |
+
new_mgr = mgr.convert(copy=True)
|
| 621 |
+
_compare(mgr, new_mgr)
|
| 622 |
+
|
| 623 |
+
# convert
|
| 624 |
+
mgr = create_mgr("a,b,foo: object; f: i8; g: f8")
|
| 625 |
+
mgr.iset(0, np.array(["1"] * N, dtype=np.object_))
|
| 626 |
+
mgr.iset(1, np.array(["2."] * N, dtype=np.object_))
|
| 627 |
+
mgr.iset(2, np.array(["foo."] * N, dtype=np.object_))
|
| 628 |
+
new_mgr = mgr.convert(copy=True)
|
| 629 |
+
dtype = "string[pyarrow_numpy]" if using_infer_string else np.object_
|
| 630 |
+
assert new_mgr.iget(0).dtype == dtype
|
| 631 |
+
assert new_mgr.iget(1).dtype == dtype
|
| 632 |
+
assert new_mgr.iget(2).dtype == dtype
|
| 633 |
+
assert new_mgr.iget(3).dtype == np.int64
|
| 634 |
+
assert new_mgr.iget(4).dtype == np.float64
|
| 635 |
+
|
| 636 |
+
mgr = create_mgr(
|
| 637 |
+
"a,b,foo: object; f: i4; bool: bool; dt: datetime; i: i8; g: f8; h: f2"
|
| 638 |
+
)
|
| 639 |
+
mgr.iset(0, np.array(["1"] * N, dtype=np.object_))
|
| 640 |
+
mgr.iset(1, np.array(["2."] * N, dtype=np.object_))
|
| 641 |
+
mgr.iset(2, np.array(["foo."] * N, dtype=np.object_))
|
| 642 |
+
new_mgr = mgr.convert(copy=True)
|
| 643 |
+
assert new_mgr.iget(0).dtype == dtype
|
| 644 |
+
assert new_mgr.iget(1).dtype == dtype
|
| 645 |
+
assert new_mgr.iget(2).dtype == dtype
|
| 646 |
+
assert new_mgr.iget(3).dtype == np.int32
|
| 647 |
+
assert new_mgr.iget(4).dtype == np.bool_
|
| 648 |
+
assert new_mgr.iget(5).dtype.type, np.datetime64
|
| 649 |
+
assert new_mgr.iget(6).dtype == np.int64
|
| 650 |
+
assert new_mgr.iget(7).dtype == np.float64
|
| 651 |
+
assert new_mgr.iget(8).dtype == np.float16
|
| 652 |
+
|
| 653 |
+
def test_interleave(self):
|
| 654 |
+
# self
|
| 655 |
+
for dtype in ["f8", "i8", "object", "bool", "complex", "M8[ns]", "m8[ns]"]:
|
| 656 |
+
mgr = create_mgr(f"a: {dtype}")
|
| 657 |
+
assert mgr.as_array().dtype == dtype
|
| 658 |
+
mgr = create_mgr(f"a: {dtype}; b: {dtype}")
|
| 659 |
+
assert mgr.as_array().dtype == dtype
|
| 660 |
+
|
| 661 |
+
@pytest.mark.parametrize(
|
| 662 |
+
"mgr_string, dtype",
|
| 663 |
+
[
|
| 664 |
+
("a: category", "i8"),
|
| 665 |
+
("a: category; b: category", "i8"),
|
| 666 |
+
("a: category; b: category2", "object"),
|
| 667 |
+
("a: category2", "object"),
|
| 668 |
+
("a: category2; b: category2", "object"),
|
| 669 |
+
("a: f8", "f8"),
|
| 670 |
+
("a: f8; b: i8", "f8"),
|
| 671 |
+
("a: f4; b: i8", "f8"),
|
| 672 |
+
("a: f4; b: i8; d: object", "object"),
|
| 673 |
+
("a: bool; b: i8", "object"),
|
| 674 |
+
("a: complex", "complex"),
|
| 675 |
+
("a: f8; b: category", "object"),
|
| 676 |
+
("a: M8[ns]; b: category", "object"),
|
| 677 |
+
("a: M8[ns]; b: bool", "object"),
|
| 678 |
+
("a: M8[ns]; b: i8", "object"),
|
| 679 |
+
("a: m8[ns]; b: bool", "object"),
|
| 680 |
+
("a: m8[ns]; b: i8", "object"),
|
| 681 |
+
("a: M8[ns]; b: m8[ns]", "object"),
|
| 682 |
+
],
|
| 683 |
+
)
|
| 684 |
+
def test_interleave_dtype(self, mgr_string, dtype):
|
| 685 |
+
# will be converted according the actual dtype of the underlying
|
| 686 |
+
mgr = create_mgr("a: category")
|
| 687 |
+
assert mgr.as_array().dtype == "i8"
|
| 688 |
+
mgr = create_mgr("a: category; b: category2")
|
| 689 |
+
assert mgr.as_array().dtype == "object"
|
| 690 |
+
mgr = create_mgr("a: category2")
|
| 691 |
+
assert mgr.as_array().dtype == "object"
|
| 692 |
+
|
| 693 |
+
# combinations
|
| 694 |
+
mgr = create_mgr("a: f8")
|
| 695 |
+
assert mgr.as_array().dtype == "f8"
|
| 696 |
+
mgr = create_mgr("a: f8; b: i8")
|
| 697 |
+
assert mgr.as_array().dtype == "f8"
|
| 698 |
+
mgr = create_mgr("a: f4; b: i8")
|
| 699 |
+
assert mgr.as_array().dtype == "f8"
|
| 700 |
+
mgr = create_mgr("a: f4; b: i8; d: object")
|
| 701 |
+
assert mgr.as_array().dtype == "object"
|
| 702 |
+
mgr = create_mgr("a: bool; b: i8")
|
| 703 |
+
assert mgr.as_array().dtype == "object"
|
| 704 |
+
mgr = create_mgr("a: complex")
|
| 705 |
+
assert mgr.as_array().dtype == "complex"
|
| 706 |
+
mgr = create_mgr("a: f8; b: category")
|
| 707 |
+
assert mgr.as_array().dtype == "f8"
|
| 708 |
+
mgr = create_mgr("a: M8[ns]; b: category")
|
| 709 |
+
assert mgr.as_array().dtype == "object"
|
| 710 |
+
mgr = create_mgr("a: M8[ns]; b: bool")
|
| 711 |
+
assert mgr.as_array().dtype == "object"
|
| 712 |
+
mgr = create_mgr("a: M8[ns]; b: i8")
|
| 713 |
+
assert mgr.as_array().dtype == "object"
|
| 714 |
+
mgr = create_mgr("a: m8[ns]; b: bool")
|
| 715 |
+
assert mgr.as_array().dtype == "object"
|
| 716 |
+
mgr = create_mgr("a: m8[ns]; b: i8")
|
| 717 |
+
assert mgr.as_array().dtype == "object"
|
| 718 |
+
mgr = create_mgr("a: M8[ns]; b: m8[ns]")
|
| 719 |
+
assert mgr.as_array().dtype == "object"
|
| 720 |
+
|
| 721 |
+
def test_consolidate_ordering_issues(self, mgr):
|
| 722 |
+
mgr.iset(mgr.items.get_loc("f"), np.random.default_rng(2).standard_normal(N))
|
| 723 |
+
mgr.iset(mgr.items.get_loc("d"), np.random.default_rng(2).standard_normal(N))
|
| 724 |
+
mgr.iset(mgr.items.get_loc("b"), np.random.default_rng(2).standard_normal(N))
|
| 725 |
+
mgr.iset(mgr.items.get_loc("g"), np.random.default_rng(2).standard_normal(N))
|
| 726 |
+
mgr.iset(mgr.items.get_loc("h"), np.random.default_rng(2).standard_normal(N))
|
| 727 |
+
|
| 728 |
+
# we have datetime/tz blocks in mgr
|
| 729 |
+
cons = mgr.consolidate()
|
| 730 |
+
assert cons.nblocks == 4
|
| 731 |
+
cons = mgr.consolidate().get_numeric_data()
|
| 732 |
+
assert cons.nblocks == 1
|
| 733 |
+
assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement)
|
| 734 |
+
tm.assert_numpy_array_equal(
|
| 735 |
+
cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.intp)
|
| 736 |
+
)
|
| 737 |
+
|
| 738 |
+
def test_reindex_items(self):
|
| 739 |
+
# mgr is not consolidated, f8 & f8-2 blocks
|
| 740 |
+
mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2")
|
| 741 |
+
|
| 742 |
+
reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0)
|
| 743 |
+
# reindex_axis does not consolidate_inplace, as that risks failing to
|
| 744 |
+
# invalidate _item_cache
|
| 745 |
+
assert not reindexed.is_consolidated()
|
| 746 |
+
|
| 747 |
+
tm.assert_index_equal(reindexed.items, Index(["g", "c", "a", "d"]))
|
| 748 |
+
tm.assert_almost_equal(
|
| 749 |
+
mgr.iget(6).internal_values(), reindexed.iget(0).internal_values()
|
| 750 |
+
)
|
| 751 |
+
tm.assert_almost_equal(
|
| 752 |
+
mgr.iget(2).internal_values(), reindexed.iget(1).internal_values()
|
| 753 |
+
)
|
| 754 |
+
tm.assert_almost_equal(
|
| 755 |
+
mgr.iget(0).internal_values(), reindexed.iget(2).internal_values()
|
| 756 |
+
)
|
| 757 |
+
tm.assert_almost_equal(
|
| 758 |
+
mgr.iget(3).internal_values(), reindexed.iget(3).internal_values()
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
def test_get_numeric_data(self, using_copy_on_write):
|
| 762 |
+
mgr = create_mgr(
|
| 763 |
+
"int: int; float: float; complex: complex;"
|
| 764 |
+
"str: object; bool: bool; obj: object; dt: datetime",
|
| 765 |
+
item_shape=(3,),
|
| 766 |
+
)
|
| 767 |
+
mgr.iset(5, np.array([1, 2, 3], dtype=np.object_))
|
| 768 |
+
|
| 769 |
+
numeric = mgr.get_numeric_data()
|
| 770 |
+
tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"]))
|
| 771 |
+
tm.assert_almost_equal(
|
| 772 |
+
mgr.iget(mgr.items.get_loc("float")).internal_values(),
|
| 773 |
+
numeric.iget(numeric.items.get_loc("float")).internal_values(),
|
| 774 |
+
)
|
| 775 |
+
|
| 776 |
+
# Check sharing
|
| 777 |
+
numeric.iset(
|
| 778 |
+
numeric.items.get_loc("float"),
|
| 779 |
+
np.array([100.0, 200.0, 300.0]),
|
| 780 |
+
inplace=True,
|
| 781 |
+
)
|
| 782 |
+
if using_copy_on_write:
|
| 783 |
+
tm.assert_almost_equal(
|
| 784 |
+
mgr.iget(mgr.items.get_loc("float")).internal_values(),
|
| 785 |
+
np.array([1.0, 1.0, 1.0]),
|
| 786 |
+
)
|
| 787 |
+
else:
|
| 788 |
+
tm.assert_almost_equal(
|
| 789 |
+
mgr.iget(mgr.items.get_loc("float")).internal_values(),
|
| 790 |
+
np.array([100.0, 200.0, 300.0]),
|
| 791 |
+
)
|
| 792 |
+
|
| 793 |
+
def test_get_bool_data(self, using_copy_on_write):
|
| 794 |
+
mgr = create_mgr(
|
| 795 |
+
"int: int; float: float; complex: complex;"
|
| 796 |
+
"str: object; bool: bool; obj: object; dt: datetime",
|
| 797 |
+
item_shape=(3,),
|
| 798 |
+
)
|
| 799 |
+
mgr.iset(6, np.array([True, False, True], dtype=np.object_))
|
| 800 |
+
|
| 801 |
+
bools = mgr.get_bool_data()
|
| 802 |
+
tm.assert_index_equal(bools.items, Index(["bool"]))
|
| 803 |
+
tm.assert_almost_equal(
|
| 804 |
+
mgr.iget(mgr.items.get_loc("bool")).internal_values(),
|
| 805 |
+
bools.iget(bools.items.get_loc("bool")).internal_values(),
|
| 806 |
+
)
|
| 807 |
+
|
| 808 |
+
bools.iset(0, np.array([True, False, True]), inplace=True)
|
| 809 |
+
if using_copy_on_write:
|
| 810 |
+
tm.assert_numpy_array_equal(
|
| 811 |
+
mgr.iget(mgr.items.get_loc("bool")).internal_values(),
|
| 812 |
+
np.array([True, True, True]),
|
| 813 |
+
)
|
| 814 |
+
else:
|
| 815 |
+
tm.assert_numpy_array_equal(
|
| 816 |
+
mgr.iget(mgr.items.get_loc("bool")).internal_values(),
|
| 817 |
+
np.array([True, False, True]),
|
| 818 |
+
)
|
| 819 |
+
|
| 820 |
+
def test_unicode_repr_doesnt_raise(self):
|
| 821 |
+
repr(create_mgr("b,\u05d0: object"))
|
| 822 |
+
|
| 823 |
+
@pytest.mark.parametrize(
|
| 824 |
+
"mgr_string", ["a,b,c: i8-1; d,e,f: i8-2", "a,a,a: i8-1; b,b,b: i8-2"]
|
| 825 |
+
)
|
| 826 |
+
def test_equals(self, mgr_string):
|
| 827 |
+
# unique items
|
| 828 |
+
bm1 = create_mgr(mgr_string)
|
| 829 |
+
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
|
| 830 |
+
assert bm1.equals(bm2)
|
| 831 |
+
|
| 832 |
+
@pytest.mark.parametrize(
|
| 833 |
+
"mgr_string",
|
| 834 |
+
[
|
| 835 |
+
"a:i8;b:f8", # basic case
|
| 836 |
+
"a:i8;b:f8;c:c8;d:b", # many types
|
| 837 |
+
"a:i8;e:dt;f:td;g:string", # more types
|
| 838 |
+
"a:i8;b:category;c:category2", # categories
|
| 839 |
+
"c:sparse;d:sparse_na;b:f8", # sparse
|
| 840 |
+
],
|
| 841 |
+
)
|
| 842 |
+
def test_equals_block_order_different_dtypes(self, mgr_string):
|
| 843 |
+
# GH 9330
|
| 844 |
+
bm = create_mgr(mgr_string)
|
| 845 |
+
block_perms = itertools.permutations(bm.blocks)
|
| 846 |
+
for bm_perm in block_perms:
|
| 847 |
+
bm_this = BlockManager(tuple(bm_perm), bm.axes)
|
| 848 |
+
assert bm.equals(bm_this)
|
| 849 |
+
assert bm_this.equals(bm)
|
| 850 |
+
|
| 851 |
+
def test_single_mgr_ctor(self):
|
| 852 |
+
mgr = create_single_mgr("f8", num_rows=5)
|
| 853 |
+
assert mgr.external_values().tolist() == [0.0, 1.0, 2.0, 3.0, 4.0]
|
| 854 |
+
|
| 855 |
+
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
|
| 856 |
+
def test_validate_bool_args(self, value):
|
| 857 |
+
bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2")
|
| 858 |
+
|
| 859 |
+
msg = (
|
| 860 |
+
'For argument "inplace" expected type bool, '
|
| 861 |
+
f"received type {type(value).__name__}."
|
| 862 |
+
)
|
| 863 |
+
with pytest.raises(ValueError, match=msg):
|
| 864 |
+
bm1.replace_list([1], [2], inplace=value)
|
| 865 |
+
|
| 866 |
+
def test_iset_split_block(self):
|
| 867 |
+
bm = create_mgr("a,b,c: i8; d: f8")
|
| 868 |
+
bm._iset_split_block(0, np.array([0]))
|
| 869 |
+
tm.assert_numpy_array_equal(
|
| 870 |
+
bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32")
|
| 871 |
+
)
|
| 872 |
+
# First indexer currently does not have a block associated with it in case
|
| 873 |
+
tm.assert_numpy_array_equal(
|
| 874 |
+
bm.blknos, np.array([0, 0, 0, 1], dtype="int64" if IS64 else "int32")
|
| 875 |
+
)
|
| 876 |
+
assert len(bm.blocks) == 2
|
| 877 |
+
|
| 878 |
+
def test_iset_split_block_values(self):
|
| 879 |
+
bm = create_mgr("a,b,c: i8; d: f8")
|
| 880 |
+
bm._iset_split_block(0, np.array([0]), np.array([list(range(10))]))
|
| 881 |
+
tm.assert_numpy_array_equal(
|
| 882 |
+
bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32")
|
| 883 |
+
)
|
| 884 |
+
# First indexer currently does not have a block associated with it in case
|
| 885 |
+
tm.assert_numpy_array_equal(
|
| 886 |
+
bm.blknos, np.array([0, 2, 2, 1], dtype="int64" if IS64 else "int32")
|
| 887 |
+
)
|
| 888 |
+
assert len(bm.blocks) == 3
|
| 889 |
+
|
| 890 |
+
|
| 891 |
+
def _as_array(mgr):
|
| 892 |
+
if mgr.ndim == 1:
|
| 893 |
+
return mgr.external_values()
|
| 894 |
+
return mgr.as_array().T
|
| 895 |
+
|
| 896 |
+
|
| 897 |
+
class TestIndexing:
|
| 898 |
+
# Nosetests-style data-driven tests.
|
| 899 |
+
#
|
| 900 |
+
# This test applies different indexing routines to block managers and
|
| 901 |
+
# compares the outcome to the result of same operations on np.ndarray.
|
| 902 |
+
#
|
| 903 |
+
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
|
| 904 |
+
# and are disabled.
|
| 905 |
+
|
| 906 |
+
MANAGERS = [
|
| 907 |
+
create_single_mgr("f8", N),
|
| 908 |
+
create_single_mgr("i8", N),
|
| 909 |
+
# 2-dim
|
| 910 |
+
create_mgr("a,b,c,d,e,f: f8", item_shape=(N,)),
|
| 911 |
+
create_mgr("a,b,c,d,e,f: i8", item_shape=(N,)),
|
| 912 |
+
create_mgr("a,b: f8; c,d: i8; e,f: string", item_shape=(N,)),
|
| 913 |
+
create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N,)),
|
| 914 |
+
]
|
| 915 |
+
|
| 916 |
+
@pytest.mark.parametrize("mgr", MANAGERS)
|
| 917 |
+
def test_get_slice(self, mgr):
|
| 918 |
+
def assert_slice_ok(mgr, axis, slobj):
|
| 919 |
+
mat = _as_array(mgr)
|
| 920 |
+
|
| 921 |
+
# we maybe using an ndarray to test slicing and
|
| 922 |
+
# might not be the full length of the axis
|
| 923 |
+
if isinstance(slobj, np.ndarray):
|
| 924 |
+
ax = mgr.axes[axis]
|
| 925 |
+
if len(ax) and len(slobj) and len(slobj) != len(ax):
|
| 926 |
+
slobj = np.concatenate(
|
| 927 |
+
[slobj, np.zeros(len(ax) - len(slobj), dtype=bool)]
|
| 928 |
+
)
|
| 929 |
+
|
| 930 |
+
if isinstance(slobj, slice):
|
| 931 |
+
sliced = mgr.get_slice(slobj, axis=axis)
|
| 932 |
+
elif (
|
| 933 |
+
mgr.ndim == 1
|
| 934 |
+
and axis == 0
|
| 935 |
+
and isinstance(slobj, np.ndarray)
|
| 936 |
+
and slobj.dtype == bool
|
| 937 |
+
):
|
| 938 |
+
sliced = mgr.get_rows_with_mask(slobj)
|
| 939 |
+
else:
|
| 940 |
+
# BlockManager doesn't support non-slice, SingleBlockManager
|
| 941 |
+
# doesn't support axis > 0
|
| 942 |
+
raise TypeError(slobj)
|
| 943 |
+
|
| 944 |
+
mat_slobj = (slice(None),) * axis + (slobj,)
|
| 945 |
+
tm.assert_numpy_array_equal(
|
| 946 |
+
mat[mat_slobj], _as_array(sliced), check_dtype=False
|
| 947 |
+
)
|
| 948 |
+
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
|
| 949 |
+
|
| 950 |
+
assert mgr.ndim <= 2, mgr.ndim
|
| 951 |
+
for ax in range(mgr.ndim):
|
| 952 |
+
# slice
|
| 953 |
+
assert_slice_ok(mgr, ax, slice(None))
|
| 954 |
+
assert_slice_ok(mgr, ax, slice(3))
|
| 955 |
+
assert_slice_ok(mgr, ax, slice(100))
|
| 956 |
+
assert_slice_ok(mgr, ax, slice(1, 4))
|
| 957 |
+
assert_slice_ok(mgr, ax, slice(3, 0, -2))
|
| 958 |
+
|
| 959 |
+
if mgr.ndim < 2:
|
| 960 |
+
# 2D only support slice objects
|
| 961 |
+
|
| 962 |
+
# boolean mask
|
| 963 |
+
assert_slice_ok(mgr, ax, np.ones(mgr.shape[ax], dtype=np.bool_))
|
| 964 |
+
assert_slice_ok(mgr, ax, np.zeros(mgr.shape[ax], dtype=np.bool_))
|
| 965 |
+
|
| 966 |
+
if mgr.shape[ax] >= 3:
|
| 967 |
+
assert_slice_ok(mgr, ax, np.arange(mgr.shape[ax]) % 3 == 0)
|
| 968 |
+
assert_slice_ok(
|
| 969 |
+
mgr, ax, np.array([True, True, False], dtype=np.bool_)
|
| 970 |
+
)
|
| 971 |
+
|
| 972 |
+
@pytest.mark.parametrize("mgr", MANAGERS)
|
| 973 |
+
def test_take(self, mgr):
|
| 974 |
+
def assert_take_ok(mgr, axis, indexer):
|
| 975 |
+
mat = _as_array(mgr)
|
| 976 |
+
taken = mgr.take(indexer, axis)
|
| 977 |
+
tm.assert_numpy_array_equal(
|
| 978 |
+
np.take(mat, indexer, axis), _as_array(taken), check_dtype=False
|
| 979 |
+
)
|
| 980 |
+
tm.assert_index_equal(mgr.axes[axis].take(indexer), taken.axes[axis])
|
| 981 |
+
|
| 982 |
+
for ax in range(mgr.ndim):
|
| 983 |
+
# take/fancy indexer
|
| 984 |
+
assert_take_ok(mgr, ax, indexer=np.array([], dtype=np.intp))
|
| 985 |
+
assert_take_ok(mgr, ax, indexer=np.array([0, 0, 0], dtype=np.intp))
|
| 986 |
+
assert_take_ok(
|
| 987 |
+
mgr, ax, indexer=np.array(list(range(mgr.shape[ax])), dtype=np.intp)
|
| 988 |
+
)
|
| 989 |
+
|
| 990 |
+
if mgr.shape[ax] >= 3:
|
| 991 |
+
assert_take_ok(mgr, ax, indexer=np.array([0, 1, 2], dtype=np.intp))
|
| 992 |
+
assert_take_ok(mgr, ax, indexer=np.array([-1, -2, -3], dtype=np.intp))
|
| 993 |
+
|
| 994 |
+
@pytest.mark.parametrize("mgr", MANAGERS)
|
| 995 |
+
@pytest.mark.parametrize("fill_value", [None, np.nan, 100.0])
|
| 996 |
+
def test_reindex_axis(self, fill_value, mgr):
|
| 997 |
+
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
|
| 998 |
+
mat = _as_array(mgr)
|
| 999 |
+
indexer = mgr.axes[axis].get_indexer_for(new_labels)
|
| 1000 |
+
|
| 1001 |
+
reindexed = mgr.reindex_axis(new_labels, axis, fill_value=fill_value)
|
| 1002 |
+
tm.assert_numpy_array_equal(
|
| 1003 |
+
algos.take_nd(mat, indexer, axis, fill_value=fill_value),
|
| 1004 |
+
_as_array(reindexed),
|
| 1005 |
+
check_dtype=False,
|
| 1006 |
+
)
|
| 1007 |
+
tm.assert_index_equal(reindexed.axes[axis], new_labels)
|
| 1008 |
+
|
| 1009 |
+
for ax in range(mgr.ndim):
|
| 1010 |
+
assert_reindex_axis_is_ok(mgr, ax, Index([]), fill_value)
|
| 1011 |
+
assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax], fill_value)
|
| 1012 |
+
assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][[0, 0, 0]], fill_value)
|
| 1013 |
+
assert_reindex_axis_is_ok(mgr, ax, Index(["foo", "bar", "baz"]), fill_value)
|
| 1014 |
+
assert_reindex_axis_is_ok(
|
| 1015 |
+
mgr, ax, Index(["foo", mgr.axes[ax][0], "baz"]), fill_value
|
| 1016 |
+
)
|
| 1017 |
+
|
| 1018 |
+
if mgr.shape[ax] >= 3:
|
| 1019 |
+
assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][:-3], fill_value)
|
| 1020 |
+
assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][-3::-1], fill_value)
|
| 1021 |
+
assert_reindex_axis_is_ok(
|
| 1022 |
+
mgr, ax, mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value
|
| 1023 |
+
)
|
| 1024 |
+
|
| 1025 |
+
@pytest.mark.parametrize("mgr", MANAGERS)
|
| 1026 |
+
@pytest.mark.parametrize("fill_value", [None, np.nan, 100.0])
|
| 1027 |
+
def test_reindex_indexer(self, fill_value, mgr):
|
| 1028 |
+
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value):
|
| 1029 |
+
mat = _as_array(mgr)
|
| 1030 |
+
reindexed_mat = algos.take_nd(mat, indexer, axis, fill_value=fill_value)
|
| 1031 |
+
reindexed = mgr.reindex_indexer(
|
| 1032 |
+
new_labels, indexer, axis, fill_value=fill_value
|
| 1033 |
+
)
|
| 1034 |
+
tm.assert_numpy_array_equal(
|
| 1035 |
+
reindexed_mat, _as_array(reindexed), check_dtype=False
|
| 1036 |
+
)
|
| 1037 |
+
tm.assert_index_equal(reindexed.axes[axis], new_labels)
|
| 1038 |
+
|
| 1039 |
+
for ax in range(mgr.ndim):
|
| 1040 |
+
assert_reindex_indexer_is_ok(
|
| 1041 |
+
mgr, ax, Index([]), np.array([], dtype=np.intp), fill_value
|
| 1042 |
+
)
|
| 1043 |
+
assert_reindex_indexer_is_ok(
|
| 1044 |
+
mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value
|
| 1045 |
+
)
|
| 1046 |
+
assert_reindex_indexer_is_ok(
|
| 1047 |
+
mgr,
|
| 1048 |
+
ax,
|
| 1049 |
+
Index(["foo"] * mgr.shape[ax]),
|
| 1050 |
+
np.arange(mgr.shape[ax]),
|
| 1051 |
+
fill_value,
|
| 1052 |
+
)
|
| 1053 |
+
assert_reindex_indexer_is_ok(
|
| 1054 |
+
mgr, ax, mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), fill_value
|
| 1055 |
+
)
|
| 1056 |
+
assert_reindex_indexer_is_ok(
|
| 1057 |
+
mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], fill_value
|
| 1058 |
+
)
|
| 1059 |
+
assert_reindex_indexer_is_ok(
|
| 1060 |
+
mgr, ax, Index(["foo", "bar", "baz"]), np.array([0, 0, 0]), fill_value
|
| 1061 |
+
)
|
| 1062 |
+
assert_reindex_indexer_is_ok(
|
| 1063 |
+
mgr, ax, Index(["foo", "bar", "baz"]), np.array([-1, 0, -1]), fill_value
|
| 1064 |
+
)
|
| 1065 |
+
assert_reindex_indexer_is_ok(
|
| 1066 |
+
mgr,
|
| 1067 |
+
ax,
|
| 1068 |
+
Index(["foo", mgr.axes[ax][0], "baz"]),
|
| 1069 |
+
np.array([-1, -1, -1]),
|
| 1070 |
+
fill_value,
|
| 1071 |
+
)
|
| 1072 |
+
|
| 1073 |
+
if mgr.shape[ax] >= 3:
|
| 1074 |
+
assert_reindex_indexer_is_ok(
|
| 1075 |
+
mgr,
|
| 1076 |
+
ax,
|
| 1077 |
+
Index(["foo", "bar", "baz"]),
|
| 1078 |
+
np.array([0, 1, 2]),
|
| 1079 |
+
fill_value,
|
| 1080 |
+
)
|
| 1081 |
+
|
| 1082 |
+
|
| 1083 |
+
class TestBlockPlacement:
|
| 1084 |
+
@pytest.mark.parametrize(
|
| 1085 |
+
"slc, expected",
|
| 1086 |
+
[
|
| 1087 |
+
(slice(0, 4), 4),
|
| 1088 |
+
(slice(0, 4, 2), 2),
|
| 1089 |
+
(slice(0, 3, 2), 2),
|
| 1090 |
+
(slice(0, 1, 2), 1),
|
| 1091 |
+
(slice(1, 0, -1), 1),
|
| 1092 |
+
],
|
| 1093 |
+
)
|
| 1094 |
+
def test_slice_len(self, slc, expected):
|
| 1095 |
+
assert len(BlockPlacement(slc)) == expected
|
| 1096 |
+
|
| 1097 |
+
@pytest.mark.parametrize("slc", [slice(1, 1, 0), slice(1, 2, 0)])
|
| 1098 |
+
def test_zero_step_raises(self, slc):
|
| 1099 |
+
msg = "slice step cannot be zero"
|
| 1100 |
+
with pytest.raises(ValueError, match=msg):
|
| 1101 |
+
BlockPlacement(slc)
|
| 1102 |
+
|
| 1103 |
+
def test_slice_canonize_negative_stop(self):
|
| 1104 |
+
# GH#37524 negative stop is OK with negative step and positive start
|
| 1105 |
+
slc = slice(3, -1, -2)
|
| 1106 |
+
|
| 1107 |
+
bp = BlockPlacement(slc)
|
| 1108 |
+
assert bp.indexer == slice(3, None, -2)
|
| 1109 |
+
|
| 1110 |
+
@pytest.mark.parametrize(
|
| 1111 |
+
"slc",
|
| 1112 |
+
[
|
| 1113 |
+
slice(None, None),
|
| 1114 |
+
slice(10, None),
|
| 1115 |
+
slice(None, None, -1),
|
| 1116 |
+
slice(None, 10, -1),
|
| 1117 |
+
# These are "unbounded" because negative index will
|
| 1118 |
+
# change depending on container shape.
|
| 1119 |
+
slice(-1, None),
|
| 1120 |
+
slice(None, -1),
|
| 1121 |
+
slice(-1, -1),
|
| 1122 |
+
slice(-1, None, -1),
|
| 1123 |
+
slice(None, -1, -1),
|
| 1124 |
+
slice(-1, -1, -1),
|
| 1125 |
+
],
|
| 1126 |
+
)
|
| 1127 |
+
def test_unbounded_slice_raises(self, slc):
|
| 1128 |
+
msg = "unbounded slice"
|
| 1129 |
+
with pytest.raises(ValueError, match=msg):
|
| 1130 |
+
BlockPlacement(slc)
|
| 1131 |
+
|
| 1132 |
+
@pytest.mark.parametrize(
|
| 1133 |
+
"slc",
|
| 1134 |
+
[
|
| 1135 |
+
slice(0, 0),
|
| 1136 |
+
slice(100, 0),
|
| 1137 |
+
slice(100, 100),
|
| 1138 |
+
slice(100, 100, -1),
|
| 1139 |
+
slice(0, 100, -1),
|
| 1140 |
+
],
|
| 1141 |
+
)
|
| 1142 |
+
def test_not_slice_like_slices(self, slc):
|
| 1143 |
+
assert not BlockPlacement(slc).is_slice_like
|
| 1144 |
+
|
| 1145 |
+
@pytest.mark.parametrize(
|
| 1146 |
+
"arr, slc",
|
| 1147 |
+
[
|
| 1148 |
+
([0], slice(0, 1, 1)),
|
| 1149 |
+
([100], slice(100, 101, 1)),
|
| 1150 |
+
([0, 1, 2], slice(0, 3, 1)),
|
| 1151 |
+
([0, 5, 10], slice(0, 15, 5)),
|
| 1152 |
+
([0, 100], slice(0, 200, 100)),
|
| 1153 |
+
([2, 1], slice(2, 0, -1)),
|
| 1154 |
+
],
|
| 1155 |
+
)
|
| 1156 |
+
def test_array_to_slice_conversion(self, arr, slc):
|
| 1157 |
+
assert BlockPlacement(arr).as_slice == slc
|
| 1158 |
+
|
| 1159 |
+
@pytest.mark.parametrize(
|
| 1160 |
+
"arr",
|
| 1161 |
+
[
|
| 1162 |
+
[],
|
| 1163 |
+
[-1],
|
| 1164 |
+
[-1, -2, -3],
|
| 1165 |
+
[-10],
|
| 1166 |
+
[-1],
|
| 1167 |
+
[-1, 0, 1, 2],
|
| 1168 |
+
[-2, 0, 2, 4],
|
| 1169 |
+
[1, 0, -1],
|
| 1170 |
+
[1, 1, 1],
|
| 1171 |
+
],
|
| 1172 |
+
)
|
| 1173 |
+
def test_not_slice_like_arrays(self, arr):
|
| 1174 |
+
assert not BlockPlacement(arr).is_slice_like
|
| 1175 |
+
|
| 1176 |
+
@pytest.mark.parametrize(
|
| 1177 |
+
"slc, expected",
|
| 1178 |
+
[(slice(0, 3), [0, 1, 2]), (slice(0, 0), []), (slice(3, 0), [])],
|
| 1179 |
+
)
|
| 1180 |
+
def test_slice_iter(self, slc, expected):
|
| 1181 |
+
assert list(BlockPlacement(slc)) == expected
|
| 1182 |
+
|
| 1183 |
+
@pytest.mark.parametrize(
|
| 1184 |
+
"slc, arr",
|
| 1185 |
+
[
|
| 1186 |
+
(slice(0, 3), [0, 1, 2]),
|
| 1187 |
+
(slice(0, 0), []),
|
| 1188 |
+
(slice(3, 0), []),
|
| 1189 |
+
(slice(3, 0, -1), [3, 2, 1]),
|
| 1190 |
+
],
|
| 1191 |
+
)
|
| 1192 |
+
def test_slice_to_array_conversion(self, slc, arr):
|
| 1193 |
+
tm.assert_numpy_array_equal(
|
| 1194 |
+
BlockPlacement(slc).as_array, np.asarray(arr, dtype=np.intp)
|
| 1195 |
+
)
|
| 1196 |
+
|
| 1197 |
+
def test_blockplacement_add(self):
|
| 1198 |
+
bpl = BlockPlacement(slice(0, 5))
|
| 1199 |
+
assert bpl.add(1).as_slice == slice(1, 6, 1)
|
| 1200 |
+
assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)
|
| 1201 |
+
assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]
|
| 1202 |
+
|
| 1203 |
+
@pytest.mark.parametrize(
|
| 1204 |
+
"val, inc, expected",
|
| 1205 |
+
[
|
| 1206 |
+
(slice(0, 0), 0, []),
|
| 1207 |
+
(slice(1, 4), 0, [1, 2, 3]),
|
| 1208 |
+
(slice(3, 0, -1), 0, [3, 2, 1]),
|
| 1209 |
+
([1, 2, 4], 0, [1, 2, 4]),
|
| 1210 |
+
(slice(0, 0), 10, []),
|
| 1211 |
+
(slice(1, 4), 10, [11, 12, 13]),
|
| 1212 |
+
(slice(3, 0, -1), 10, [13, 12, 11]),
|
| 1213 |
+
([1, 2, 4], 10, [11, 12, 14]),
|
| 1214 |
+
(slice(0, 0), -1, []),
|
| 1215 |
+
(slice(1, 4), -1, [0, 1, 2]),
|
| 1216 |
+
([1, 2, 4], -1, [0, 1, 3]),
|
| 1217 |
+
],
|
| 1218 |
+
)
|
| 1219 |
+
def test_blockplacement_add_int(self, val, inc, expected):
|
| 1220 |
+
assert list(BlockPlacement(val).add(inc)) == expected
|
| 1221 |
+
|
| 1222 |
+
@pytest.mark.parametrize("val", [slice(1, 4), [1, 2, 4]])
|
| 1223 |
+
def test_blockplacement_add_int_raises(self, val):
|
| 1224 |
+
msg = "iadd causes length change"
|
| 1225 |
+
with pytest.raises(ValueError, match=msg):
|
| 1226 |
+
BlockPlacement(val).add(-10)
|
| 1227 |
+
|
| 1228 |
+
|
| 1229 |
+
class TestCanHoldElement:
|
| 1230 |
+
@pytest.fixture(
|
| 1231 |
+
params=[
|
| 1232 |
+
lambda x: x,
|
| 1233 |
+
lambda x: x.to_series(),
|
| 1234 |
+
lambda x: x._data,
|
| 1235 |
+
lambda x: list(x),
|
| 1236 |
+
lambda x: x.astype(object),
|
| 1237 |
+
lambda x: np.asarray(x),
|
| 1238 |
+
lambda x: x[0],
|
| 1239 |
+
lambda x: x[:0],
|
| 1240 |
+
]
|
| 1241 |
+
)
|
| 1242 |
+
def element(self, request):
|
| 1243 |
+
"""
|
| 1244 |
+
Functions that take an Index and return an element that should have
|
| 1245 |
+
blk._can_hold_element(element) for a Block with this index's dtype.
|
| 1246 |
+
"""
|
| 1247 |
+
return request.param
|
| 1248 |
+
|
| 1249 |
+
def test_datetime_block_can_hold_element(self):
|
| 1250 |
+
block = create_block("datetime", [0])
|
| 1251 |
+
|
| 1252 |
+
assert block._can_hold_element([])
|
| 1253 |
+
|
| 1254 |
+
# We will check that block._can_hold_element iff arr.__setitem__ works
|
| 1255 |
+
arr = pd.array(block.values.ravel())
|
| 1256 |
+
|
| 1257 |
+
# coerce None
|
| 1258 |
+
assert block._can_hold_element(None)
|
| 1259 |
+
arr[0] = None
|
| 1260 |
+
assert arr[0] is pd.NaT
|
| 1261 |
+
|
| 1262 |
+
# coerce different types of datetime objects
|
| 1263 |
+
vals = [np.datetime64("2010-10-10"), datetime(2010, 10, 10)]
|
| 1264 |
+
for val in vals:
|
| 1265 |
+
assert block._can_hold_element(val)
|
| 1266 |
+
arr[0] = val
|
| 1267 |
+
|
| 1268 |
+
val = date(2010, 10, 10)
|
| 1269 |
+
assert not block._can_hold_element(val)
|
| 1270 |
+
|
| 1271 |
+
msg = (
|
| 1272 |
+
"value should be a 'Timestamp', 'NaT', "
|
| 1273 |
+
"or array of those. Got 'date' instead."
|
| 1274 |
+
)
|
| 1275 |
+
with pytest.raises(TypeError, match=msg):
|
| 1276 |
+
arr[0] = val
|
| 1277 |
+
|
| 1278 |
+
@pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
|
| 1279 |
+
def test_interval_can_hold_element_emptylist(self, dtype, element):
|
| 1280 |
+
arr = np.array([1, 3, 4], dtype=dtype)
|
| 1281 |
+
ii = IntervalIndex.from_breaks(arr)
|
| 1282 |
+
blk = new_block(ii._data, BlockPlacement([1]), ndim=2)
|
| 1283 |
+
|
| 1284 |
+
assert blk._can_hold_element([])
|
| 1285 |
+
# TODO: check this holds for all blocks
|
| 1286 |
+
|
| 1287 |
+
@pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
|
| 1288 |
+
def test_interval_can_hold_element(self, dtype, element):
|
| 1289 |
+
arr = np.array([1, 3, 4, 9], dtype=dtype)
|
| 1290 |
+
ii = IntervalIndex.from_breaks(arr)
|
| 1291 |
+
blk = new_block(ii._data, BlockPlacement([1]), ndim=2)
|
| 1292 |
+
|
| 1293 |
+
elem = element(ii)
|
| 1294 |
+
self.check_series_setitem(elem, ii, True)
|
| 1295 |
+
assert blk._can_hold_element(elem)
|
| 1296 |
+
|
| 1297 |
+
# Careful: to get the expected Series-inplace behavior we need
|
| 1298 |
+
# `elem` to not have the same length as `arr`
|
| 1299 |
+
ii2 = IntervalIndex.from_breaks(arr[:-1], closed="neither")
|
| 1300 |
+
elem = element(ii2)
|
| 1301 |
+
with tm.assert_produces_warning(FutureWarning):
|
| 1302 |
+
self.check_series_setitem(elem, ii, False)
|
| 1303 |
+
assert not blk._can_hold_element(elem)
|
| 1304 |
+
|
| 1305 |
+
ii3 = IntervalIndex.from_breaks([Timestamp(1), Timestamp(3), Timestamp(4)])
|
| 1306 |
+
elem = element(ii3)
|
| 1307 |
+
with tm.assert_produces_warning(FutureWarning):
|
| 1308 |
+
self.check_series_setitem(elem, ii, False)
|
| 1309 |
+
assert not blk._can_hold_element(elem)
|
| 1310 |
+
|
| 1311 |
+
ii4 = IntervalIndex.from_breaks([Timedelta(1), Timedelta(3), Timedelta(4)])
|
| 1312 |
+
elem = element(ii4)
|
| 1313 |
+
with tm.assert_produces_warning(FutureWarning):
|
| 1314 |
+
self.check_series_setitem(elem, ii, False)
|
| 1315 |
+
assert not blk._can_hold_element(elem)
|
| 1316 |
+
|
| 1317 |
+
def test_period_can_hold_element_emptylist(self):
|
| 1318 |
+
pi = period_range("2016", periods=3, freq="Y")
|
| 1319 |
+
blk = new_block(pi._data.reshape(1, 3), BlockPlacement([1]), ndim=2)
|
| 1320 |
+
|
| 1321 |
+
assert blk._can_hold_element([])
|
| 1322 |
+
|
| 1323 |
+
def test_period_can_hold_element(self, element):
|
| 1324 |
+
pi = period_range("2016", periods=3, freq="Y")
|
| 1325 |
+
|
| 1326 |
+
elem = element(pi)
|
| 1327 |
+
self.check_series_setitem(elem, pi, True)
|
| 1328 |
+
|
| 1329 |
+
# Careful: to get the expected Series-inplace behavior we need
|
| 1330 |
+
# `elem` to not have the same length as `arr`
|
| 1331 |
+
pi2 = pi.asfreq("D")[:-1]
|
| 1332 |
+
elem = element(pi2)
|
| 1333 |
+
with tm.assert_produces_warning(FutureWarning):
|
| 1334 |
+
self.check_series_setitem(elem, pi, False)
|
| 1335 |
+
|
| 1336 |
+
dti = pi.to_timestamp("s")[:-1]
|
| 1337 |
+
elem = element(dti)
|
| 1338 |
+
with tm.assert_produces_warning(FutureWarning):
|
| 1339 |
+
self.check_series_setitem(elem, pi, False)
|
| 1340 |
+
|
| 1341 |
+
def check_can_hold_element(self, obj, elem, inplace: bool):
|
| 1342 |
+
blk = obj._mgr.blocks[0]
|
| 1343 |
+
if inplace:
|
| 1344 |
+
assert blk._can_hold_element(elem)
|
| 1345 |
+
else:
|
| 1346 |
+
assert not blk._can_hold_element(elem)
|
| 1347 |
+
|
| 1348 |
+
def check_series_setitem(self, elem, index: Index, inplace: bool):
|
| 1349 |
+
arr = index._data.copy()
|
| 1350 |
+
ser = Series(arr, copy=False)
|
| 1351 |
+
|
| 1352 |
+
self.check_can_hold_element(ser, elem, inplace)
|
| 1353 |
+
|
| 1354 |
+
if is_scalar(elem):
|
| 1355 |
+
ser[0] = elem
|
| 1356 |
+
else:
|
| 1357 |
+
ser[: len(elem)] = elem
|
| 1358 |
+
|
| 1359 |
+
if inplace:
|
| 1360 |
+
assert ser.array is arr # i.e. setting was done inplace
|
| 1361 |
+
else:
|
| 1362 |
+
assert ser.dtype == object
|
| 1363 |
+
|
| 1364 |
+
|
| 1365 |
+
class TestShouldStore:
|
| 1366 |
+
def test_should_store_categorical(self):
|
| 1367 |
+
cat = Categorical(["A", "B", "C"])
|
| 1368 |
+
df = DataFrame(cat)
|
| 1369 |
+
blk = df._mgr.blocks[0]
|
| 1370 |
+
|
| 1371 |
+
# matching dtype
|
| 1372 |
+
assert blk.should_store(cat)
|
| 1373 |
+
assert blk.should_store(cat[:-1])
|
| 1374 |
+
|
| 1375 |
+
# different dtype
|
| 1376 |
+
assert not blk.should_store(cat.as_ordered())
|
| 1377 |
+
|
| 1378 |
+
# ndarray instead of Categorical
|
| 1379 |
+
assert not blk.should_store(np.asarray(cat))
|
| 1380 |
+
|
| 1381 |
+
|
| 1382 |
+
def test_validate_ndim():
|
| 1383 |
+
values = np.array([1.0, 2.0])
|
| 1384 |
+
placement = BlockPlacement(slice(2))
|
| 1385 |
+
msg = r"Wrong number of dimensions. values.ndim != ndim \[1 != 2\]"
|
| 1386 |
+
|
| 1387 |
+
with pytest.raises(ValueError, match=msg):
|
| 1388 |
+
make_block(values, placement, ndim=2)
|
| 1389 |
+
|
| 1390 |
+
|
| 1391 |
+
def test_block_shape():
|
| 1392 |
+
idx = Index([0, 1, 2, 3, 4])
|
| 1393 |
+
a = Series([1, 2, 3]).reindex(idx)
|
| 1394 |
+
b = Series(Categorical([1, 2, 3])).reindex(idx)
|
| 1395 |
+
|
| 1396 |
+
assert a._mgr.blocks[0].mgr_locs.indexer == b._mgr.blocks[0].mgr_locs.indexer
|
| 1397 |
+
|
| 1398 |
+
|
| 1399 |
+
def test_make_block_no_pandas_array(block_maker):
|
| 1400 |
+
# https://github.com/pandas-dev/pandas/pull/24866
|
| 1401 |
+
arr = pd.arrays.NumpyExtensionArray(np.array([1, 2]))
|
| 1402 |
+
|
| 1403 |
+
# NumpyExtensionArray, no dtype
|
| 1404 |
+
result = block_maker(arr, BlockPlacement(slice(len(arr))), ndim=arr.ndim)
|
| 1405 |
+
assert result.dtype.kind in ["i", "u"]
|
| 1406 |
+
|
| 1407 |
+
if block_maker is make_block:
|
| 1408 |
+
# new_block requires caller to unwrap NumpyExtensionArray
|
| 1409 |
+
assert result.is_extension is False
|
| 1410 |
+
|
| 1411 |
+
# NumpyExtensionArray, NumpyEADtype
|
| 1412 |
+
result = block_maker(arr, slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim)
|
| 1413 |
+
assert result.dtype.kind in ["i", "u"]
|
| 1414 |
+
assert result.is_extension is False
|
| 1415 |
+
|
| 1416 |
+
# new_block no longer taked dtype keyword
|
| 1417 |
+
# ndarray, NumpyEADtype
|
| 1418 |
+
result = block_maker(
|
| 1419 |
+
arr.to_numpy(), slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim
|
| 1420 |
+
)
|
| 1421 |
+
assert result.dtype.kind in ["i", "u"]
|
| 1422 |
+
assert result.is_extension is False
|
llava_next/lib/python3.10/site-packages/pandas/tests/internals/test_managers.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Testing interaction between the different managers (BlockManager, ArrayManager)
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
import subprocess
|
| 6 |
+
import sys
|
| 7 |
+
|
| 8 |
+
import pytest
|
| 9 |
+
|
| 10 |
+
from pandas.core.dtypes.missing import array_equivalent
|
| 11 |
+
|
| 12 |
+
import pandas as pd
|
| 13 |
+
import pandas._testing as tm
|
| 14 |
+
from pandas.core.internals import (
|
| 15 |
+
ArrayManager,
|
| 16 |
+
BlockManager,
|
| 17 |
+
SingleArrayManager,
|
| 18 |
+
SingleBlockManager,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_dataframe_creation():
|
| 23 |
+
msg = "data_manager option is deprecated"
|
| 24 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 25 |
+
with pd.option_context("mode.data_manager", "block"):
|
| 26 |
+
df_block = pd.DataFrame(
|
| 27 |
+
{"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]}
|
| 28 |
+
)
|
| 29 |
+
assert isinstance(df_block._mgr, BlockManager)
|
| 30 |
+
|
| 31 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 32 |
+
with pd.option_context("mode.data_manager", "array"):
|
| 33 |
+
df_array = pd.DataFrame(
|
| 34 |
+
{"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]}
|
| 35 |
+
)
|
| 36 |
+
assert isinstance(df_array._mgr, ArrayManager)
|
| 37 |
+
|
| 38 |
+
# also ensure both are seen as equal
|
| 39 |
+
tm.assert_frame_equal(df_block, df_array)
|
| 40 |
+
|
| 41 |
+
# conversion from one manager to the other
|
| 42 |
+
result = df_block._as_manager("block")
|
| 43 |
+
assert isinstance(result._mgr, BlockManager)
|
| 44 |
+
result = df_block._as_manager("array")
|
| 45 |
+
assert isinstance(result._mgr, ArrayManager)
|
| 46 |
+
tm.assert_frame_equal(result, df_block)
|
| 47 |
+
assert all(
|
| 48 |
+
array_equivalent(left, right)
|
| 49 |
+
for left, right in zip(result._mgr.arrays, df_array._mgr.arrays)
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
result = df_array._as_manager("array")
|
| 53 |
+
assert isinstance(result._mgr, ArrayManager)
|
| 54 |
+
result = df_array._as_manager("block")
|
| 55 |
+
assert isinstance(result._mgr, BlockManager)
|
| 56 |
+
tm.assert_frame_equal(result, df_array)
|
| 57 |
+
assert len(result._mgr.blocks) == 2
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def test_series_creation():
|
| 61 |
+
msg = "data_manager option is deprecated"
|
| 62 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 63 |
+
with pd.option_context("mode.data_manager", "block"):
|
| 64 |
+
s_block = pd.Series([1, 2, 3], name="A", index=["a", "b", "c"])
|
| 65 |
+
assert isinstance(s_block._mgr, SingleBlockManager)
|
| 66 |
+
|
| 67 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 68 |
+
with pd.option_context("mode.data_manager", "array"):
|
| 69 |
+
s_array = pd.Series([1, 2, 3], name="A", index=["a", "b", "c"])
|
| 70 |
+
assert isinstance(s_array._mgr, SingleArrayManager)
|
| 71 |
+
|
| 72 |
+
# also ensure both are seen as equal
|
| 73 |
+
tm.assert_series_equal(s_block, s_array)
|
| 74 |
+
|
| 75 |
+
# conversion from one manager to the other
|
| 76 |
+
result = s_block._as_manager("block")
|
| 77 |
+
assert isinstance(result._mgr, SingleBlockManager)
|
| 78 |
+
result = s_block._as_manager("array")
|
| 79 |
+
assert isinstance(result._mgr, SingleArrayManager)
|
| 80 |
+
tm.assert_series_equal(result, s_block)
|
| 81 |
+
|
| 82 |
+
result = s_array._as_manager("array")
|
| 83 |
+
assert isinstance(result._mgr, SingleArrayManager)
|
| 84 |
+
result = s_array._as_manager("block")
|
| 85 |
+
assert isinstance(result._mgr, SingleBlockManager)
|
| 86 |
+
tm.assert_series_equal(result, s_array)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@pytest.mark.single_cpu
|
| 90 |
+
@pytest.mark.parametrize("manager", ["block", "array"])
|
| 91 |
+
def test_array_manager_depr_env_var(manager):
|
| 92 |
+
# GH#55043
|
| 93 |
+
test_env = os.environ.copy()
|
| 94 |
+
test_env["PANDAS_DATA_MANAGER"] = manager
|
| 95 |
+
response = subprocess.run(
|
| 96 |
+
[sys.executable, "-c", "import pandas"],
|
| 97 |
+
capture_output=True,
|
| 98 |
+
env=test_env,
|
| 99 |
+
check=True,
|
| 100 |
+
)
|
| 101 |
+
msg = "FutureWarning: The env variable PANDAS_DATA_MANAGER is set"
|
| 102 |
+
stderr_msg = response.stderr.decode("utf-8")
|
| 103 |
+
assert msg in stderr_msg, stderr_msg
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc
ADDED
|
Binary file (2.46 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py
ADDED
|
@@ -0,0 +1,753 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import (
|
| 6 |
+
Categorical,
|
| 7 |
+
DataFrame,
|
| 8 |
+
Index,
|
| 9 |
+
Series,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@pytest.fixture(
|
| 15 |
+
params=list(
|
| 16 |
+
{
|
| 17 |
+
"bool": [True, False, True],
|
| 18 |
+
"int64": [1, 2, 3],
|
| 19 |
+
"float64": [1.1, np.nan, 3.3],
|
| 20 |
+
"category": Categorical(["X", "Y", "Z"]),
|
| 21 |
+
"object": ["a", "b", "c"],
|
| 22 |
+
"datetime64[ns]": [
|
| 23 |
+
pd.Timestamp("2011-01-01"),
|
| 24 |
+
pd.Timestamp("2011-01-02"),
|
| 25 |
+
pd.Timestamp("2011-01-03"),
|
| 26 |
+
],
|
| 27 |
+
"datetime64[ns, US/Eastern]": [
|
| 28 |
+
pd.Timestamp("2011-01-01", tz="US/Eastern"),
|
| 29 |
+
pd.Timestamp("2011-01-02", tz="US/Eastern"),
|
| 30 |
+
pd.Timestamp("2011-01-03", tz="US/Eastern"),
|
| 31 |
+
],
|
| 32 |
+
"timedelta64[ns]": [
|
| 33 |
+
pd.Timedelta("1 days"),
|
| 34 |
+
pd.Timedelta("2 days"),
|
| 35 |
+
pd.Timedelta("3 days"),
|
| 36 |
+
],
|
| 37 |
+
"period[M]": [
|
| 38 |
+
pd.Period("2011-01", freq="M"),
|
| 39 |
+
pd.Period("2011-02", freq="M"),
|
| 40 |
+
pd.Period("2011-03", freq="M"),
|
| 41 |
+
],
|
| 42 |
+
}.items()
|
| 43 |
+
)
|
| 44 |
+
)
|
| 45 |
+
def item(request):
|
| 46 |
+
key, data = request.param
|
| 47 |
+
return key, data
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@pytest.fixture
|
| 51 |
+
def item2(item):
|
| 52 |
+
return item
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class TestConcatAppendCommon:
|
| 56 |
+
"""
|
| 57 |
+
Test common dtype coercion rules between concat and append.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
def test_dtypes(self, item, index_or_series, using_infer_string):
|
| 61 |
+
# to confirm test case covers intended dtypes
|
| 62 |
+
typ, vals = item
|
| 63 |
+
obj = index_or_series(vals)
|
| 64 |
+
if typ == "object" and using_infer_string:
|
| 65 |
+
typ = "string"
|
| 66 |
+
if isinstance(obj, Index):
|
| 67 |
+
assert obj.dtype == typ
|
| 68 |
+
elif isinstance(obj, Series):
|
| 69 |
+
if typ.startswith("period"):
|
| 70 |
+
assert obj.dtype == "Period[M]"
|
| 71 |
+
else:
|
| 72 |
+
assert obj.dtype == typ
|
| 73 |
+
|
| 74 |
+
def test_concatlike_same_dtypes(self, item):
|
| 75 |
+
# GH 13660
|
| 76 |
+
typ1, vals1 = item
|
| 77 |
+
|
| 78 |
+
vals2 = vals1
|
| 79 |
+
vals3 = vals1
|
| 80 |
+
|
| 81 |
+
if typ1 == "category":
|
| 82 |
+
exp_data = Categorical(list(vals1) + list(vals2))
|
| 83 |
+
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
|
| 84 |
+
else:
|
| 85 |
+
exp_data = vals1 + vals2
|
| 86 |
+
exp_data3 = vals1 + vals2 + vals3
|
| 87 |
+
|
| 88 |
+
# ----- Index ----- #
|
| 89 |
+
|
| 90 |
+
# index.append
|
| 91 |
+
res = Index(vals1).append(Index(vals2))
|
| 92 |
+
exp = Index(exp_data)
|
| 93 |
+
tm.assert_index_equal(res, exp)
|
| 94 |
+
|
| 95 |
+
# 3 elements
|
| 96 |
+
res = Index(vals1).append([Index(vals2), Index(vals3)])
|
| 97 |
+
exp = Index(exp_data3)
|
| 98 |
+
tm.assert_index_equal(res, exp)
|
| 99 |
+
|
| 100 |
+
# index.append name mismatch
|
| 101 |
+
i1 = Index(vals1, name="x")
|
| 102 |
+
i2 = Index(vals2, name="y")
|
| 103 |
+
res = i1.append(i2)
|
| 104 |
+
exp = Index(exp_data)
|
| 105 |
+
tm.assert_index_equal(res, exp)
|
| 106 |
+
|
| 107 |
+
# index.append name match
|
| 108 |
+
i1 = Index(vals1, name="x")
|
| 109 |
+
i2 = Index(vals2, name="x")
|
| 110 |
+
res = i1.append(i2)
|
| 111 |
+
exp = Index(exp_data, name="x")
|
| 112 |
+
tm.assert_index_equal(res, exp)
|
| 113 |
+
|
| 114 |
+
# cannot append non-index
|
| 115 |
+
with pytest.raises(TypeError, match="all inputs must be Index"):
|
| 116 |
+
Index(vals1).append(vals2)
|
| 117 |
+
|
| 118 |
+
with pytest.raises(TypeError, match="all inputs must be Index"):
|
| 119 |
+
Index(vals1).append([Index(vals2), vals3])
|
| 120 |
+
|
| 121 |
+
# ----- Series ----- #
|
| 122 |
+
|
| 123 |
+
# series.append
|
| 124 |
+
res = Series(vals1)._append(Series(vals2), ignore_index=True)
|
| 125 |
+
exp = Series(exp_data)
|
| 126 |
+
tm.assert_series_equal(res, exp, check_index_type=True)
|
| 127 |
+
|
| 128 |
+
# concat
|
| 129 |
+
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
|
| 130 |
+
tm.assert_series_equal(res, exp, check_index_type=True)
|
| 131 |
+
|
| 132 |
+
# 3 elements
|
| 133 |
+
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
|
| 134 |
+
exp = Series(exp_data3)
|
| 135 |
+
tm.assert_series_equal(res, exp)
|
| 136 |
+
|
| 137 |
+
res = pd.concat(
|
| 138 |
+
[Series(vals1), Series(vals2), Series(vals3)],
|
| 139 |
+
ignore_index=True,
|
| 140 |
+
)
|
| 141 |
+
tm.assert_series_equal(res, exp)
|
| 142 |
+
|
| 143 |
+
# name mismatch
|
| 144 |
+
s1 = Series(vals1, name="x")
|
| 145 |
+
s2 = Series(vals2, name="y")
|
| 146 |
+
res = s1._append(s2, ignore_index=True)
|
| 147 |
+
exp = Series(exp_data)
|
| 148 |
+
tm.assert_series_equal(res, exp, check_index_type=True)
|
| 149 |
+
|
| 150 |
+
res = pd.concat([s1, s2], ignore_index=True)
|
| 151 |
+
tm.assert_series_equal(res, exp, check_index_type=True)
|
| 152 |
+
|
| 153 |
+
# name match
|
| 154 |
+
s1 = Series(vals1, name="x")
|
| 155 |
+
s2 = Series(vals2, name="x")
|
| 156 |
+
res = s1._append(s2, ignore_index=True)
|
| 157 |
+
exp = Series(exp_data, name="x")
|
| 158 |
+
tm.assert_series_equal(res, exp, check_index_type=True)
|
| 159 |
+
|
| 160 |
+
res = pd.concat([s1, s2], ignore_index=True)
|
| 161 |
+
tm.assert_series_equal(res, exp, check_index_type=True)
|
| 162 |
+
|
| 163 |
+
# cannot append non-index
|
| 164 |
+
msg = (
|
| 165 |
+
r"cannot concatenate object of type '.+'; "
|
| 166 |
+
"only Series and DataFrame objs are valid"
|
| 167 |
+
)
|
| 168 |
+
with pytest.raises(TypeError, match=msg):
|
| 169 |
+
Series(vals1)._append(vals2)
|
| 170 |
+
|
| 171 |
+
with pytest.raises(TypeError, match=msg):
|
| 172 |
+
Series(vals1)._append([Series(vals2), vals3])
|
| 173 |
+
|
| 174 |
+
with pytest.raises(TypeError, match=msg):
|
| 175 |
+
pd.concat([Series(vals1), vals2])
|
| 176 |
+
|
| 177 |
+
with pytest.raises(TypeError, match=msg):
|
| 178 |
+
pd.concat([Series(vals1), Series(vals2), vals3])
|
| 179 |
+
|
| 180 |
+
def test_concatlike_dtypes_coercion(self, item, item2, request):
|
| 181 |
+
# GH 13660
|
| 182 |
+
typ1, vals1 = item
|
| 183 |
+
typ2, vals2 = item2
|
| 184 |
+
|
| 185 |
+
vals3 = vals2
|
| 186 |
+
|
| 187 |
+
# basically infer
|
| 188 |
+
exp_index_dtype = None
|
| 189 |
+
exp_series_dtype = None
|
| 190 |
+
|
| 191 |
+
if typ1 == typ2:
|
| 192 |
+
pytest.skip("same dtype is tested in test_concatlike_same_dtypes")
|
| 193 |
+
elif typ1 == "category" or typ2 == "category":
|
| 194 |
+
pytest.skip("categorical type tested elsewhere")
|
| 195 |
+
|
| 196 |
+
# specify expected dtype
|
| 197 |
+
if typ1 == "bool" and typ2 in ("int64", "float64"):
|
| 198 |
+
# series coerces to numeric based on numpy rule
|
| 199 |
+
# index doesn't because bool is object dtype
|
| 200 |
+
exp_series_dtype = typ2
|
| 201 |
+
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
|
| 202 |
+
request.applymarker(mark)
|
| 203 |
+
elif typ2 == "bool" and typ1 in ("int64", "float64"):
|
| 204 |
+
exp_series_dtype = typ1
|
| 205 |
+
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
|
| 206 |
+
request.applymarker(mark)
|
| 207 |
+
elif typ1 in {"datetime64[ns, US/Eastern]", "timedelta64[ns]"} or typ2 in {
|
| 208 |
+
"datetime64[ns, US/Eastern]",
|
| 209 |
+
"timedelta64[ns]",
|
| 210 |
+
}:
|
| 211 |
+
exp_index_dtype = object
|
| 212 |
+
exp_series_dtype = object
|
| 213 |
+
|
| 214 |
+
exp_data = vals1 + vals2
|
| 215 |
+
exp_data3 = vals1 + vals2 + vals3
|
| 216 |
+
|
| 217 |
+
# ----- Index ----- #
|
| 218 |
+
|
| 219 |
+
# index.append
|
| 220 |
+
# GH#39817
|
| 221 |
+
res = Index(vals1).append(Index(vals2))
|
| 222 |
+
exp = Index(exp_data, dtype=exp_index_dtype)
|
| 223 |
+
tm.assert_index_equal(res, exp)
|
| 224 |
+
|
| 225 |
+
# 3 elements
|
| 226 |
+
res = Index(vals1).append([Index(vals2), Index(vals3)])
|
| 227 |
+
exp = Index(exp_data3, dtype=exp_index_dtype)
|
| 228 |
+
tm.assert_index_equal(res, exp)
|
| 229 |
+
|
| 230 |
+
# ----- Series ----- #
|
| 231 |
+
|
| 232 |
+
# series._append
|
| 233 |
+
# GH#39817
|
| 234 |
+
res = Series(vals1)._append(Series(vals2), ignore_index=True)
|
| 235 |
+
exp = Series(exp_data, dtype=exp_series_dtype)
|
| 236 |
+
tm.assert_series_equal(res, exp, check_index_type=True)
|
| 237 |
+
|
| 238 |
+
# concat
|
| 239 |
+
# GH#39817
|
| 240 |
+
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
|
| 241 |
+
tm.assert_series_equal(res, exp, check_index_type=True)
|
| 242 |
+
|
| 243 |
+
# 3 elements
|
| 244 |
+
# GH#39817
|
| 245 |
+
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
|
| 246 |
+
exp = Series(exp_data3, dtype=exp_series_dtype)
|
| 247 |
+
tm.assert_series_equal(res, exp)
|
| 248 |
+
|
| 249 |
+
# GH#39817
|
| 250 |
+
res = pd.concat(
|
| 251 |
+
[Series(vals1), Series(vals2), Series(vals3)],
|
| 252 |
+
ignore_index=True,
|
| 253 |
+
)
|
| 254 |
+
tm.assert_series_equal(res, exp)
|
| 255 |
+
|
| 256 |
+
def test_concatlike_common_coerce_to_pandas_object(self):
|
| 257 |
+
# GH 13626
|
| 258 |
+
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
|
| 259 |
+
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
|
| 260 |
+
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
|
| 261 |
+
|
| 262 |
+
exp = Index(
|
| 263 |
+
[
|
| 264 |
+
pd.Timestamp("2011-01-01"),
|
| 265 |
+
pd.Timestamp("2011-01-02"),
|
| 266 |
+
pd.Timedelta("1 days"),
|
| 267 |
+
pd.Timedelta("2 days"),
|
| 268 |
+
]
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
res = dti.append(tdi)
|
| 272 |
+
tm.assert_index_equal(res, exp)
|
| 273 |
+
assert isinstance(res[0], pd.Timestamp)
|
| 274 |
+
assert isinstance(res[-1], pd.Timedelta)
|
| 275 |
+
|
| 276 |
+
dts = Series(dti)
|
| 277 |
+
tds = Series(tdi)
|
| 278 |
+
res = dts._append(tds)
|
| 279 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 280 |
+
assert isinstance(res.iloc[0], pd.Timestamp)
|
| 281 |
+
assert isinstance(res.iloc[-1], pd.Timedelta)
|
| 282 |
+
|
| 283 |
+
res = pd.concat([dts, tds])
|
| 284 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 285 |
+
assert isinstance(res.iloc[0], pd.Timestamp)
|
| 286 |
+
assert isinstance(res.iloc[-1], pd.Timedelta)
|
| 287 |
+
|
| 288 |
+
def test_concatlike_datetimetz(self, tz_aware_fixture):
|
| 289 |
+
tz = tz_aware_fixture
|
| 290 |
+
# GH 7795
|
| 291 |
+
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
|
| 292 |
+
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
|
| 293 |
+
|
| 294 |
+
exp = pd.DatetimeIndex(
|
| 295 |
+
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
res = dti1.append(dti2)
|
| 299 |
+
tm.assert_index_equal(res, exp)
|
| 300 |
+
|
| 301 |
+
dts1 = Series(dti1)
|
| 302 |
+
dts2 = Series(dti2)
|
| 303 |
+
res = dts1._append(dts2)
|
| 304 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 305 |
+
|
| 306 |
+
res = pd.concat([dts1, dts2])
|
| 307 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 308 |
+
|
| 309 |
+
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
|
| 310 |
+
def test_concatlike_datetimetz_short(self, tz):
|
| 311 |
+
# GH#7795
|
| 312 |
+
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
|
| 313 |
+
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
|
| 314 |
+
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
|
| 315 |
+
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
|
| 316 |
+
|
| 317 |
+
exp_idx = pd.DatetimeIndex(
|
| 318 |
+
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
|
| 319 |
+
tz=tz,
|
| 320 |
+
).as_unit("ns")
|
| 321 |
+
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
|
| 322 |
+
|
| 323 |
+
tm.assert_frame_equal(df1._append(df2), exp)
|
| 324 |
+
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
|
| 325 |
+
|
| 326 |
+
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
|
| 327 |
+
tz = tz_aware_fixture
|
| 328 |
+
# GH 13660
|
| 329 |
+
|
| 330 |
+
# different tz coerces to object
|
| 331 |
+
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
|
| 332 |
+
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
|
| 333 |
+
|
| 334 |
+
exp = Index(
|
| 335 |
+
[
|
| 336 |
+
pd.Timestamp("2011-01-01", tz=tz),
|
| 337 |
+
pd.Timestamp("2011-01-02", tz=tz),
|
| 338 |
+
pd.Timestamp("2012-01-01"),
|
| 339 |
+
pd.Timestamp("2012-01-02"),
|
| 340 |
+
],
|
| 341 |
+
dtype=object,
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
res = dti1.append(dti2)
|
| 345 |
+
tm.assert_index_equal(res, exp)
|
| 346 |
+
|
| 347 |
+
dts1 = Series(dti1)
|
| 348 |
+
dts2 = Series(dti2)
|
| 349 |
+
res = dts1._append(dts2)
|
| 350 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 351 |
+
|
| 352 |
+
res = pd.concat([dts1, dts2])
|
| 353 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 354 |
+
|
| 355 |
+
# different tz
|
| 356 |
+
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
|
| 357 |
+
|
| 358 |
+
exp = Index(
|
| 359 |
+
[
|
| 360 |
+
pd.Timestamp("2011-01-01", tz=tz),
|
| 361 |
+
pd.Timestamp("2011-01-02", tz=tz),
|
| 362 |
+
pd.Timestamp("2012-01-01", tz="US/Pacific"),
|
| 363 |
+
pd.Timestamp("2012-01-02", tz="US/Pacific"),
|
| 364 |
+
],
|
| 365 |
+
dtype=object,
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
res = dti1.append(dti3)
|
| 369 |
+
tm.assert_index_equal(res, exp)
|
| 370 |
+
|
| 371 |
+
dts1 = Series(dti1)
|
| 372 |
+
dts3 = Series(dti3)
|
| 373 |
+
res = dts1._append(dts3)
|
| 374 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 375 |
+
|
| 376 |
+
res = pd.concat([dts1, dts3])
|
| 377 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 378 |
+
|
| 379 |
+
def test_concatlike_common_period(self):
|
| 380 |
+
# GH 13660
|
| 381 |
+
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
|
| 382 |
+
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
|
| 383 |
+
|
| 384 |
+
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
|
| 385 |
+
|
| 386 |
+
res = pi1.append(pi2)
|
| 387 |
+
tm.assert_index_equal(res, exp)
|
| 388 |
+
|
| 389 |
+
ps1 = Series(pi1)
|
| 390 |
+
ps2 = Series(pi2)
|
| 391 |
+
res = ps1._append(ps2)
|
| 392 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 393 |
+
|
| 394 |
+
res = pd.concat([ps1, ps2])
|
| 395 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 396 |
+
|
| 397 |
+
def test_concatlike_common_period_diff_freq_to_object(self):
|
| 398 |
+
# GH 13221
|
| 399 |
+
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
|
| 400 |
+
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
|
| 401 |
+
|
| 402 |
+
exp = Index(
|
| 403 |
+
[
|
| 404 |
+
pd.Period("2011-01", freq="M"),
|
| 405 |
+
pd.Period("2011-02", freq="M"),
|
| 406 |
+
pd.Period("2012-01-01", freq="D"),
|
| 407 |
+
pd.Period("2012-02-01", freq="D"),
|
| 408 |
+
],
|
| 409 |
+
dtype=object,
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
res = pi1.append(pi2)
|
| 413 |
+
tm.assert_index_equal(res, exp)
|
| 414 |
+
|
| 415 |
+
ps1 = Series(pi1)
|
| 416 |
+
ps2 = Series(pi2)
|
| 417 |
+
res = ps1._append(ps2)
|
| 418 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 419 |
+
|
| 420 |
+
res = pd.concat([ps1, ps2])
|
| 421 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 422 |
+
|
| 423 |
+
def test_concatlike_common_period_mixed_dt_to_object(self):
|
| 424 |
+
# GH 13221
|
| 425 |
+
# different datetimelike
|
| 426 |
+
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
|
| 427 |
+
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
|
| 428 |
+
exp = Index(
|
| 429 |
+
[
|
| 430 |
+
pd.Period("2011-01", freq="M"),
|
| 431 |
+
pd.Period("2011-02", freq="M"),
|
| 432 |
+
pd.Timedelta("1 days"),
|
| 433 |
+
pd.Timedelta("2 days"),
|
| 434 |
+
],
|
| 435 |
+
dtype=object,
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
res = pi1.append(tdi)
|
| 439 |
+
tm.assert_index_equal(res, exp)
|
| 440 |
+
|
| 441 |
+
ps1 = Series(pi1)
|
| 442 |
+
tds = Series(tdi)
|
| 443 |
+
res = ps1._append(tds)
|
| 444 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 445 |
+
|
| 446 |
+
res = pd.concat([ps1, tds])
|
| 447 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 448 |
+
|
| 449 |
+
# inverse
|
| 450 |
+
exp = Index(
|
| 451 |
+
[
|
| 452 |
+
pd.Timedelta("1 days"),
|
| 453 |
+
pd.Timedelta("2 days"),
|
| 454 |
+
pd.Period("2011-01", freq="M"),
|
| 455 |
+
pd.Period("2011-02", freq="M"),
|
| 456 |
+
],
|
| 457 |
+
dtype=object,
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
res = tdi.append(pi1)
|
| 461 |
+
tm.assert_index_equal(res, exp)
|
| 462 |
+
|
| 463 |
+
ps1 = Series(pi1)
|
| 464 |
+
tds = Series(tdi)
|
| 465 |
+
res = tds._append(ps1)
|
| 466 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 467 |
+
|
| 468 |
+
res = pd.concat([tds, ps1])
|
| 469 |
+
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
|
| 470 |
+
|
| 471 |
+
def test_concat_categorical(self):
|
| 472 |
+
# GH 13524
|
| 473 |
+
|
| 474 |
+
# same categories -> category
|
| 475 |
+
s1 = Series([1, 2, np.nan], dtype="category")
|
| 476 |
+
s2 = Series([2, 1, 2], dtype="category")
|
| 477 |
+
|
| 478 |
+
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
|
| 479 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 480 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 481 |
+
|
| 482 |
+
# partially different categories => not-category
|
| 483 |
+
s1 = Series([3, 2], dtype="category")
|
| 484 |
+
s2 = Series([2, 1], dtype="category")
|
| 485 |
+
|
| 486 |
+
exp = Series([3, 2, 2, 1])
|
| 487 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 488 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 489 |
+
|
| 490 |
+
# completely different categories (same dtype) => not-category
|
| 491 |
+
s1 = Series([10, 11, np.nan], dtype="category")
|
| 492 |
+
s2 = Series([np.nan, 1, 3, 2], dtype="category")
|
| 493 |
+
|
| 494 |
+
exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
|
| 495 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 496 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 497 |
+
|
| 498 |
+
def test_union_categorical_same_categories_different_order(self):
|
| 499 |
+
# https://github.com/pandas-dev/pandas/issues/19096
|
| 500 |
+
a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
|
| 501 |
+
b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
|
| 502 |
+
result = pd.concat([a, b], ignore_index=True)
|
| 503 |
+
expected = Series(
|
| 504 |
+
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
|
| 505 |
+
)
|
| 506 |
+
tm.assert_series_equal(result, expected)
|
| 507 |
+
|
| 508 |
+
def test_concat_categorical_coercion(self):
|
| 509 |
+
# GH 13524
|
| 510 |
+
|
| 511 |
+
# category + not-category => not-category
|
| 512 |
+
s1 = Series([1, 2, np.nan], dtype="category")
|
| 513 |
+
s2 = Series([2, 1, 2])
|
| 514 |
+
|
| 515 |
+
exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64)
|
| 516 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 517 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 518 |
+
|
| 519 |
+
# result shouldn't be affected by 1st elem dtype
|
| 520 |
+
exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64)
|
| 521 |
+
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
|
| 522 |
+
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
|
| 523 |
+
|
| 524 |
+
# all values are not in category => not-category
|
| 525 |
+
s1 = Series([3, 2], dtype="category")
|
| 526 |
+
s2 = Series([2, 1])
|
| 527 |
+
|
| 528 |
+
exp = Series([3, 2, 2, 1])
|
| 529 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 530 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 531 |
+
|
| 532 |
+
exp = Series([2, 1, 3, 2])
|
| 533 |
+
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
|
| 534 |
+
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
|
| 535 |
+
|
| 536 |
+
# completely different categories => not-category
|
| 537 |
+
s1 = Series([10, 11, np.nan], dtype="category")
|
| 538 |
+
s2 = Series([1, 3, 2])
|
| 539 |
+
|
| 540 |
+
exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64)
|
| 541 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 542 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 543 |
+
|
| 544 |
+
exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64)
|
| 545 |
+
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
|
| 546 |
+
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
|
| 547 |
+
|
| 548 |
+
# different dtype => not-category
|
| 549 |
+
s1 = Series([10, 11, np.nan], dtype="category")
|
| 550 |
+
s2 = Series(["a", "b", "c"])
|
| 551 |
+
|
| 552 |
+
exp = Series([10, 11, np.nan, "a", "b", "c"])
|
| 553 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 554 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 555 |
+
|
| 556 |
+
exp = Series(["a", "b", "c", 10, 11, np.nan])
|
| 557 |
+
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
|
| 558 |
+
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
|
| 559 |
+
|
| 560 |
+
# if normal series only contains NaN-likes => not-category
|
| 561 |
+
s1 = Series([10, 11], dtype="category")
|
| 562 |
+
s2 = Series([np.nan, np.nan, np.nan])
|
| 563 |
+
|
| 564 |
+
exp = Series([10, 11, np.nan, np.nan, np.nan])
|
| 565 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 566 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 567 |
+
|
| 568 |
+
exp = Series([np.nan, np.nan, np.nan, 10, 11])
|
| 569 |
+
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
|
| 570 |
+
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
|
| 571 |
+
|
| 572 |
+
def test_concat_categorical_3elem_coercion(self):
|
| 573 |
+
# GH 13524
|
| 574 |
+
|
| 575 |
+
# mixed dtypes => not-category
|
| 576 |
+
s1 = Series([1, 2, np.nan], dtype="category")
|
| 577 |
+
s2 = Series([2, 1, 2], dtype="category")
|
| 578 |
+
s3 = Series([1, 2, 1, 2, np.nan])
|
| 579 |
+
|
| 580 |
+
exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
|
| 581 |
+
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
|
| 582 |
+
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
|
| 583 |
+
|
| 584 |
+
exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
|
| 585 |
+
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
|
| 586 |
+
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
|
| 587 |
+
|
| 588 |
+
# values are all in either category => not-category
|
| 589 |
+
s1 = Series([4, 5, 6], dtype="category")
|
| 590 |
+
s2 = Series([1, 2, 3], dtype="category")
|
| 591 |
+
s3 = Series([1, 3, 4])
|
| 592 |
+
|
| 593 |
+
exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
|
| 594 |
+
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
|
| 595 |
+
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
|
| 596 |
+
|
| 597 |
+
exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
|
| 598 |
+
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
|
| 599 |
+
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
|
| 600 |
+
|
| 601 |
+
# values are all in either category => not-category
|
| 602 |
+
s1 = Series([4, 5, 6], dtype="category")
|
| 603 |
+
s2 = Series([1, 2, 3], dtype="category")
|
| 604 |
+
s3 = Series([10, 11, 12])
|
| 605 |
+
|
| 606 |
+
exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
|
| 607 |
+
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
|
| 608 |
+
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
|
| 609 |
+
|
| 610 |
+
exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
|
| 611 |
+
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
|
| 612 |
+
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
|
| 613 |
+
|
| 614 |
+
def test_concat_categorical_multi_coercion(self):
|
| 615 |
+
# GH 13524
|
| 616 |
+
|
| 617 |
+
s1 = Series([1, 3], dtype="category")
|
| 618 |
+
s2 = Series([3, 4], dtype="category")
|
| 619 |
+
s3 = Series([2, 3])
|
| 620 |
+
s4 = Series([2, 2], dtype="category")
|
| 621 |
+
s5 = Series([1, np.nan])
|
| 622 |
+
s6 = Series([1, 3, 2], dtype="category")
|
| 623 |
+
|
| 624 |
+
# mixed dtype, values are all in categories => not-category
|
| 625 |
+
exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
|
| 626 |
+
res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)
|
| 627 |
+
tm.assert_series_equal(res, exp)
|
| 628 |
+
res = s1._append([s2, s3, s4, s5, s6], ignore_index=True)
|
| 629 |
+
tm.assert_series_equal(res, exp)
|
| 630 |
+
|
| 631 |
+
exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
|
| 632 |
+
res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)
|
| 633 |
+
tm.assert_series_equal(res, exp)
|
| 634 |
+
res = s6._append([s5, s4, s3, s2, s1], ignore_index=True)
|
| 635 |
+
tm.assert_series_equal(res, exp)
|
| 636 |
+
|
| 637 |
+
def test_concat_categorical_ordered(self):
|
| 638 |
+
# GH 13524
|
| 639 |
+
|
| 640 |
+
s1 = Series(Categorical([1, 2, np.nan], ordered=True))
|
| 641 |
+
s2 = Series(Categorical([2, 1, 2], ordered=True))
|
| 642 |
+
|
| 643 |
+
exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
|
| 644 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 645 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 646 |
+
|
| 647 |
+
exp = Series(Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True))
|
| 648 |
+
tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp)
|
| 649 |
+
tm.assert_series_equal(s1._append([s2, s1], ignore_index=True), exp)
|
| 650 |
+
|
| 651 |
+
def test_concat_categorical_coercion_nan(self):
|
| 652 |
+
# GH 13524
|
| 653 |
+
|
| 654 |
+
# some edge cases
|
| 655 |
+
# category + not-category => not category
|
| 656 |
+
s1 = Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category")
|
| 657 |
+
s2 = Series([np.nan, 1])
|
| 658 |
+
|
| 659 |
+
exp = Series([np.nan, np.nan, np.nan, 1])
|
| 660 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 661 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 662 |
+
|
| 663 |
+
s1 = Series([1, np.nan], dtype="category")
|
| 664 |
+
s2 = Series([np.nan, np.nan])
|
| 665 |
+
|
| 666 |
+
exp = Series([1, np.nan, np.nan, np.nan], dtype="float")
|
| 667 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 668 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 669 |
+
|
| 670 |
+
# mixed dtype, all nan-likes => not-category
|
| 671 |
+
s1 = Series([np.nan, np.nan], dtype="category")
|
| 672 |
+
s2 = Series([np.nan, np.nan])
|
| 673 |
+
|
| 674 |
+
exp = Series([np.nan, np.nan, np.nan, np.nan])
|
| 675 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 676 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 677 |
+
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
|
| 678 |
+
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
|
| 679 |
+
|
| 680 |
+
# all category nan-likes => category
|
| 681 |
+
s1 = Series([np.nan, np.nan], dtype="category")
|
| 682 |
+
s2 = Series([np.nan, np.nan], dtype="category")
|
| 683 |
+
|
| 684 |
+
exp = Series([np.nan, np.nan, np.nan, np.nan], dtype="category")
|
| 685 |
+
|
| 686 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 687 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 688 |
+
|
| 689 |
+
def test_concat_categorical_empty(self):
|
| 690 |
+
# GH 13524
|
| 691 |
+
|
| 692 |
+
s1 = Series([], dtype="category")
|
| 693 |
+
s2 = Series([1, 2], dtype="category")
|
| 694 |
+
|
| 695 |
+
msg = "The behavior of array concatenation with empty entries is deprecated"
|
| 696 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 697 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
|
| 698 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), s2)
|
| 699 |
+
|
| 700 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 701 |
+
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
|
| 702 |
+
tm.assert_series_equal(s2._append(s1, ignore_index=True), s2)
|
| 703 |
+
|
| 704 |
+
s1 = Series([], dtype="category")
|
| 705 |
+
s2 = Series([], dtype="category")
|
| 706 |
+
|
| 707 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
|
| 708 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), s2)
|
| 709 |
+
|
| 710 |
+
s1 = Series([], dtype="category")
|
| 711 |
+
s2 = Series([], dtype="object")
|
| 712 |
+
|
| 713 |
+
# different dtype => not-category
|
| 714 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
|
| 715 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), s2)
|
| 716 |
+
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
|
| 717 |
+
tm.assert_series_equal(s2._append(s1, ignore_index=True), s2)
|
| 718 |
+
|
| 719 |
+
s1 = Series([], dtype="category")
|
| 720 |
+
s2 = Series([np.nan, np.nan])
|
| 721 |
+
|
| 722 |
+
# empty Series is ignored
|
| 723 |
+
exp = Series([np.nan, np.nan])
|
| 724 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 725 |
+
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
|
| 726 |
+
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
|
| 727 |
+
|
| 728 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 729 |
+
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
|
| 730 |
+
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
|
| 731 |
+
|
| 732 |
+
def test_categorical_concat_append(self):
|
| 733 |
+
cat = Categorical(["a", "b"], categories=["a", "b"])
|
| 734 |
+
vals = [1, 2]
|
| 735 |
+
df = DataFrame({"cats": cat, "vals": vals})
|
| 736 |
+
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
|
| 737 |
+
vals2 = [1, 2, 1, 2]
|
| 738 |
+
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
|
| 739 |
+
|
| 740 |
+
tm.assert_frame_equal(pd.concat([df, df]), exp)
|
| 741 |
+
tm.assert_frame_equal(df._append(df), exp)
|
| 742 |
+
|
| 743 |
+
# GH 13524 can concat different categories
|
| 744 |
+
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
|
| 745 |
+
vals3 = [1, 2]
|
| 746 |
+
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
|
| 747 |
+
|
| 748 |
+
res = pd.concat([df, df_different_categories], ignore_index=True)
|
| 749 |
+
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
|
| 750 |
+
tm.assert_frame_equal(res, exp)
|
| 751 |
+
|
| 752 |
+
res = df._append(df_different_categories, ignore_index=True)
|
| 753 |
+
tm.assert_frame_equal(res, exp)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py
ADDED
|
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datetime as dt
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
|
| 4 |
+
import dateutil
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from pandas import (
|
| 10 |
+
DataFrame,
|
| 11 |
+
DatetimeIndex,
|
| 12 |
+
Index,
|
| 13 |
+
MultiIndex,
|
| 14 |
+
Series,
|
| 15 |
+
Timestamp,
|
| 16 |
+
concat,
|
| 17 |
+
date_range,
|
| 18 |
+
to_timedelta,
|
| 19 |
+
)
|
| 20 |
+
import pandas._testing as tm
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TestDatetimeConcat:
|
| 24 |
+
def test_concat_datetime64_block(self):
|
| 25 |
+
rng = date_range("1/1/2000", periods=10)
|
| 26 |
+
|
| 27 |
+
df = DataFrame({"time": rng})
|
| 28 |
+
|
| 29 |
+
result = concat([df, df])
|
| 30 |
+
assert (result.iloc[:10]["time"] == rng).all()
|
| 31 |
+
assert (result.iloc[10:]["time"] == rng).all()
|
| 32 |
+
|
| 33 |
+
def test_concat_datetime_datetime64_frame(self):
|
| 34 |
+
# GH#2624
|
| 35 |
+
rows = []
|
| 36 |
+
rows.append([datetime(2010, 1, 1), 1])
|
| 37 |
+
rows.append([datetime(2010, 1, 2), "hi"])
|
| 38 |
+
|
| 39 |
+
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
|
| 40 |
+
|
| 41 |
+
ind = date_range(start="2000/1/1", freq="D", periods=10)
|
| 42 |
+
df1 = DataFrame({"date": ind, "test": range(10)})
|
| 43 |
+
|
| 44 |
+
# it works!
|
| 45 |
+
concat([df1, df2_obj])
|
| 46 |
+
|
| 47 |
+
def test_concat_datetime_timezone(self):
|
| 48 |
+
# GH 18523
|
| 49 |
+
idx1 = date_range("2011-01-01", periods=3, freq="h", tz="Europe/Paris")
|
| 50 |
+
idx2 = date_range(start=idx1[0], end=idx1[-1], freq="h")
|
| 51 |
+
df1 = DataFrame({"a": [1, 2, 3]}, index=idx1)
|
| 52 |
+
df2 = DataFrame({"b": [1, 2, 3]}, index=idx2)
|
| 53 |
+
result = concat([df1, df2], axis=1)
|
| 54 |
+
|
| 55 |
+
exp_idx = DatetimeIndex(
|
| 56 |
+
[
|
| 57 |
+
"2011-01-01 00:00:00+01:00",
|
| 58 |
+
"2011-01-01 01:00:00+01:00",
|
| 59 |
+
"2011-01-01 02:00:00+01:00",
|
| 60 |
+
],
|
| 61 |
+
dtype="M8[ns, Europe/Paris]",
|
| 62 |
+
freq="h",
|
| 63 |
+
)
|
| 64 |
+
expected = DataFrame(
|
| 65 |
+
[[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"]
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
tm.assert_frame_equal(result, expected)
|
| 69 |
+
|
| 70 |
+
idx3 = date_range("2011-01-01", periods=3, freq="h", tz="Asia/Tokyo")
|
| 71 |
+
df3 = DataFrame({"b": [1, 2, 3]}, index=idx3)
|
| 72 |
+
result = concat([df1, df3], axis=1)
|
| 73 |
+
|
| 74 |
+
exp_idx = DatetimeIndex(
|
| 75 |
+
[
|
| 76 |
+
"2010-12-31 15:00:00+00:00",
|
| 77 |
+
"2010-12-31 16:00:00+00:00",
|
| 78 |
+
"2010-12-31 17:00:00+00:00",
|
| 79 |
+
"2010-12-31 23:00:00+00:00",
|
| 80 |
+
"2011-01-01 00:00:00+00:00",
|
| 81 |
+
"2011-01-01 01:00:00+00:00",
|
| 82 |
+
]
|
| 83 |
+
).as_unit("ns")
|
| 84 |
+
|
| 85 |
+
expected = DataFrame(
|
| 86 |
+
[
|
| 87 |
+
[np.nan, 1],
|
| 88 |
+
[np.nan, 2],
|
| 89 |
+
[np.nan, 3],
|
| 90 |
+
[1, np.nan],
|
| 91 |
+
[2, np.nan],
|
| 92 |
+
[3, np.nan],
|
| 93 |
+
],
|
| 94 |
+
index=exp_idx,
|
| 95 |
+
columns=["a", "b"],
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
tm.assert_frame_equal(result, expected)
|
| 99 |
+
|
| 100 |
+
# GH 13783: Concat after resample
|
| 101 |
+
result = concat([df1.resample("h").mean(), df2.resample("h").mean()], sort=True)
|
| 102 |
+
expected = DataFrame(
|
| 103 |
+
{"a": [1, 2, 3] + [np.nan] * 3, "b": [np.nan] * 3 + [1, 2, 3]},
|
| 104 |
+
index=idx1.append(idx1),
|
| 105 |
+
)
|
| 106 |
+
tm.assert_frame_equal(result, expected)
|
| 107 |
+
|
| 108 |
+
def test_concat_datetimeindex_freq(self):
|
| 109 |
+
# GH 3232
|
| 110 |
+
# Monotonic index result
|
| 111 |
+
dr = date_range("01-Jan-2013", periods=100, freq="50ms", tz="UTC")
|
| 112 |
+
data = list(range(100))
|
| 113 |
+
expected = DataFrame(data, index=dr)
|
| 114 |
+
result = concat([expected[:50], expected[50:]])
|
| 115 |
+
tm.assert_frame_equal(result, expected)
|
| 116 |
+
|
| 117 |
+
# Non-monotonic index result
|
| 118 |
+
result = concat([expected[50:], expected[:50]])
|
| 119 |
+
expected = DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50]))
|
| 120 |
+
expected.index._data.freq = None
|
| 121 |
+
tm.assert_frame_equal(result, expected)
|
| 122 |
+
|
| 123 |
+
def test_concat_multiindex_datetime_object_index(self):
|
| 124 |
+
# https://github.com/pandas-dev/pandas/issues/11058
|
| 125 |
+
idx = Index(
|
| 126 |
+
[dt.date(2013, 1, 1), dt.date(2014, 1, 1), dt.date(2015, 1, 1)],
|
| 127 |
+
dtype="object",
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
s = Series(
|
| 131 |
+
["a", "b"],
|
| 132 |
+
index=MultiIndex.from_arrays(
|
| 133 |
+
[
|
| 134 |
+
[1, 2],
|
| 135 |
+
idx[:-1],
|
| 136 |
+
],
|
| 137 |
+
names=["first", "second"],
|
| 138 |
+
),
|
| 139 |
+
)
|
| 140 |
+
s2 = Series(
|
| 141 |
+
["a", "b"],
|
| 142 |
+
index=MultiIndex.from_arrays(
|
| 143 |
+
[[1, 2], idx[::2]],
|
| 144 |
+
names=["first", "second"],
|
| 145 |
+
),
|
| 146 |
+
)
|
| 147 |
+
mi = MultiIndex.from_arrays(
|
| 148 |
+
[[1, 2, 2], idx],
|
| 149 |
+
names=["first", "second"],
|
| 150 |
+
)
|
| 151 |
+
assert mi.levels[1].dtype == object
|
| 152 |
+
|
| 153 |
+
expected = DataFrame(
|
| 154 |
+
[["a", "a"], ["b", np.nan], [np.nan, "b"]],
|
| 155 |
+
index=mi,
|
| 156 |
+
)
|
| 157 |
+
result = concat([s, s2], axis=1)
|
| 158 |
+
tm.assert_frame_equal(result, expected)
|
| 159 |
+
|
| 160 |
+
def test_concat_NaT_series(self):
|
| 161 |
+
# GH 11693
|
| 162 |
+
# test for merging NaT series with datetime series.
|
| 163 |
+
x = Series(
|
| 164 |
+
date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="US/Eastern")
|
| 165 |
+
)
|
| 166 |
+
y = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
|
| 167 |
+
expected = Series([x[0], x[1], pd.NaT, pd.NaT])
|
| 168 |
+
|
| 169 |
+
result = concat([x, y], ignore_index=True)
|
| 170 |
+
tm.assert_series_equal(result, expected)
|
| 171 |
+
|
| 172 |
+
# all NaT with tz
|
| 173 |
+
expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns, US/Eastern]")
|
| 174 |
+
result = concat([y, y], ignore_index=True)
|
| 175 |
+
tm.assert_series_equal(result, expected)
|
| 176 |
+
|
| 177 |
+
def test_concat_NaT_series2(self):
|
| 178 |
+
# without tz
|
| 179 |
+
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h"))
|
| 180 |
+
y = Series(date_range("20151124 10:00", "20151124 11:00", freq="1h"))
|
| 181 |
+
y[:] = pd.NaT
|
| 182 |
+
expected = Series([x[0], x[1], pd.NaT, pd.NaT])
|
| 183 |
+
result = concat([x, y], ignore_index=True)
|
| 184 |
+
tm.assert_series_equal(result, expected)
|
| 185 |
+
|
| 186 |
+
# all NaT without tz
|
| 187 |
+
x[:] = pd.NaT
|
| 188 |
+
expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns]")
|
| 189 |
+
result = concat([x, y], ignore_index=True)
|
| 190 |
+
tm.assert_series_equal(result, expected)
|
| 191 |
+
|
| 192 |
+
@pytest.mark.parametrize("tz", [None, "UTC"])
|
| 193 |
+
def test_concat_NaT_dataframes(self, tz):
|
| 194 |
+
# GH 12396
|
| 195 |
+
|
| 196 |
+
dti = DatetimeIndex([pd.NaT, pd.NaT], tz=tz)
|
| 197 |
+
first = DataFrame({0: dti})
|
| 198 |
+
second = DataFrame(
|
| 199 |
+
[[Timestamp("2015/01/01", tz=tz)], [Timestamp("2016/01/01", tz=tz)]],
|
| 200 |
+
index=[2, 3],
|
| 201 |
+
)
|
| 202 |
+
expected = DataFrame(
|
| 203 |
+
[
|
| 204 |
+
pd.NaT,
|
| 205 |
+
pd.NaT,
|
| 206 |
+
Timestamp("2015/01/01", tz=tz),
|
| 207 |
+
Timestamp("2016/01/01", tz=tz),
|
| 208 |
+
]
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
result = concat([first, second], axis=0)
|
| 212 |
+
tm.assert_frame_equal(result, expected)
|
| 213 |
+
|
| 214 |
+
@pytest.mark.parametrize("tz1", [None, "UTC"])
|
| 215 |
+
@pytest.mark.parametrize("tz2", [None, "UTC"])
|
| 216 |
+
@pytest.mark.parametrize("item", [pd.NaT, Timestamp("20150101")])
|
| 217 |
+
def test_concat_NaT_dataframes_all_NaT_axis_0(
|
| 218 |
+
self, tz1, tz2, item, using_array_manager
|
| 219 |
+
):
|
| 220 |
+
# GH 12396
|
| 221 |
+
|
| 222 |
+
# tz-naive
|
| 223 |
+
first = DataFrame([[pd.NaT], [pd.NaT]]).apply(lambda x: x.dt.tz_localize(tz1))
|
| 224 |
+
second = DataFrame([item]).apply(lambda x: x.dt.tz_localize(tz2))
|
| 225 |
+
|
| 226 |
+
result = concat([first, second], axis=0)
|
| 227 |
+
expected = DataFrame(Series([pd.NaT, pd.NaT, item], index=[0, 1, 0]))
|
| 228 |
+
expected = expected.apply(lambda x: x.dt.tz_localize(tz2))
|
| 229 |
+
if tz1 != tz2:
|
| 230 |
+
expected = expected.astype(object)
|
| 231 |
+
if item is pd.NaT and not using_array_manager:
|
| 232 |
+
# GH#18463
|
| 233 |
+
# TODO: setting nan here is to keep the test passing as we
|
| 234 |
+
# make assert_frame_equal stricter, but is nan really the
|
| 235 |
+
# ideal behavior here?
|
| 236 |
+
if tz1 is not None:
|
| 237 |
+
expected.iloc[-1, 0] = np.nan
|
| 238 |
+
else:
|
| 239 |
+
expected.iloc[:-1, 0] = np.nan
|
| 240 |
+
|
| 241 |
+
tm.assert_frame_equal(result, expected)
|
| 242 |
+
|
| 243 |
+
@pytest.mark.parametrize("tz1", [None, "UTC"])
|
| 244 |
+
@pytest.mark.parametrize("tz2", [None, "UTC"])
|
| 245 |
+
def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2):
|
| 246 |
+
# GH 12396
|
| 247 |
+
|
| 248 |
+
first = DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1))
|
| 249 |
+
second = DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1])
|
| 250 |
+
expected = DataFrame(
|
| 251 |
+
{
|
| 252 |
+
0: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1),
|
| 253 |
+
1: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2),
|
| 254 |
+
}
|
| 255 |
+
)
|
| 256 |
+
result = concat([first, second], axis=1)
|
| 257 |
+
tm.assert_frame_equal(result, expected)
|
| 258 |
+
|
| 259 |
+
@pytest.mark.parametrize("tz1", [None, "UTC"])
|
| 260 |
+
@pytest.mark.parametrize("tz2", [None, "UTC"])
|
| 261 |
+
def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2):
|
| 262 |
+
# GH 12396
|
| 263 |
+
|
| 264 |
+
# tz-naive
|
| 265 |
+
first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)
|
| 266 |
+
second = DataFrame(
|
| 267 |
+
[
|
| 268 |
+
[Timestamp("2015/01/01", tz=tz2)],
|
| 269 |
+
[Timestamp("2016/01/01", tz=tz2)],
|
| 270 |
+
],
|
| 271 |
+
index=[2, 3],
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
expected = DataFrame(
|
| 275 |
+
[
|
| 276 |
+
pd.NaT,
|
| 277 |
+
pd.NaT,
|
| 278 |
+
Timestamp("2015/01/01", tz=tz2),
|
| 279 |
+
Timestamp("2016/01/01", tz=tz2),
|
| 280 |
+
]
|
| 281 |
+
)
|
| 282 |
+
if tz1 != tz2:
|
| 283 |
+
expected = expected.astype(object)
|
| 284 |
+
|
| 285 |
+
result = concat([first, second])
|
| 286 |
+
tm.assert_frame_equal(result, expected)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class TestTimezoneConcat:
|
| 290 |
+
def test_concat_tz_series(self):
|
| 291 |
+
# gh-11755: tz and no tz
|
| 292 |
+
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
|
| 293 |
+
y = Series(date_range("2012-01-01", "2012-01-02"))
|
| 294 |
+
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
|
| 295 |
+
result = concat([x, y], ignore_index=True)
|
| 296 |
+
tm.assert_series_equal(result, expected)
|
| 297 |
+
|
| 298 |
+
def test_concat_tz_series2(self):
|
| 299 |
+
# gh-11887: concat tz and object
|
| 300 |
+
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
|
| 301 |
+
y = Series(["a", "b"])
|
| 302 |
+
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
|
| 303 |
+
result = concat([x, y], ignore_index=True)
|
| 304 |
+
tm.assert_series_equal(result, expected)
|
| 305 |
+
|
| 306 |
+
def test_concat_tz_series3(self, unit, unit2):
|
| 307 |
+
# see gh-12217 and gh-12306
|
| 308 |
+
# Concatenating two UTC times
|
| 309 |
+
first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]")
|
| 310 |
+
first[0] = first[0].dt.tz_localize("UTC")
|
| 311 |
+
|
| 312 |
+
second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]")
|
| 313 |
+
second[0] = second[0].dt.tz_localize("UTC")
|
| 314 |
+
|
| 315 |
+
result = concat([first, second])
|
| 316 |
+
exp_unit = tm.get_finest_unit(unit, unit2)
|
| 317 |
+
assert result[0].dtype == f"datetime64[{exp_unit}, UTC]"
|
| 318 |
+
|
| 319 |
+
def test_concat_tz_series4(self, unit, unit2):
|
| 320 |
+
# Concatenating two London times
|
| 321 |
+
first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]")
|
| 322 |
+
first[0] = first[0].dt.tz_localize("Europe/London")
|
| 323 |
+
|
| 324 |
+
second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]")
|
| 325 |
+
second[0] = second[0].dt.tz_localize("Europe/London")
|
| 326 |
+
|
| 327 |
+
result = concat([first, second])
|
| 328 |
+
exp_unit = tm.get_finest_unit(unit, unit2)
|
| 329 |
+
assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]"
|
| 330 |
+
|
| 331 |
+
def test_concat_tz_series5(self, unit, unit2):
|
| 332 |
+
# Concatenating 2+1 London times
|
| 333 |
+
first = DataFrame(
|
| 334 |
+
[[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]], dtype=f"M8[{unit}]"
|
| 335 |
+
)
|
| 336 |
+
first[0] = first[0].dt.tz_localize("Europe/London")
|
| 337 |
+
|
| 338 |
+
second = DataFrame([[datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]")
|
| 339 |
+
second[0] = second[0].dt.tz_localize("Europe/London")
|
| 340 |
+
|
| 341 |
+
result = concat([first, second])
|
| 342 |
+
exp_unit = tm.get_finest_unit(unit, unit2)
|
| 343 |
+
assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]"
|
| 344 |
+
|
| 345 |
+
def test_concat_tz_series6(self, unit, unit2):
|
| 346 |
+
# Concatenating 1+2 London times
|
| 347 |
+
first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]")
|
| 348 |
+
first[0] = first[0].dt.tz_localize("Europe/London")
|
| 349 |
+
|
| 350 |
+
second = DataFrame(
|
| 351 |
+
[[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]"
|
| 352 |
+
)
|
| 353 |
+
second[0] = second[0].dt.tz_localize("Europe/London")
|
| 354 |
+
|
| 355 |
+
result = concat([first, second])
|
| 356 |
+
exp_unit = tm.get_finest_unit(unit, unit2)
|
| 357 |
+
assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]"
|
| 358 |
+
|
| 359 |
+
def test_concat_tz_series_tzlocal(self):
|
| 360 |
+
# see gh-13583
|
| 361 |
+
x = [
|
| 362 |
+
Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()),
|
| 363 |
+
Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()),
|
| 364 |
+
]
|
| 365 |
+
y = [
|
| 366 |
+
Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()),
|
| 367 |
+
Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()),
|
| 368 |
+
]
|
| 369 |
+
|
| 370 |
+
result = concat([Series(x), Series(y)], ignore_index=True)
|
| 371 |
+
tm.assert_series_equal(result, Series(x + y))
|
| 372 |
+
assert result.dtype == "datetime64[ns, tzlocal()]"
|
| 373 |
+
|
| 374 |
+
def test_concat_tz_series_with_datetimelike(self):
|
| 375 |
+
# see gh-12620: tz and timedelta
|
| 376 |
+
x = [
|
| 377 |
+
Timestamp("2011-01-01", tz="US/Eastern"),
|
| 378 |
+
Timestamp("2011-02-01", tz="US/Eastern"),
|
| 379 |
+
]
|
| 380 |
+
y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
|
| 381 |
+
result = concat([Series(x), Series(y)], ignore_index=True)
|
| 382 |
+
tm.assert_series_equal(result, Series(x + y, dtype="object"))
|
| 383 |
+
|
| 384 |
+
# tz and period
|
| 385 |
+
y = [pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M")]
|
| 386 |
+
result = concat([Series(x), Series(y)], ignore_index=True)
|
| 387 |
+
tm.assert_series_equal(result, Series(x + y, dtype="object"))
|
| 388 |
+
|
| 389 |
+
def test_concat_tz_frame(self):
|
| 390 |
+
df2 = DataFrame(
|
| 391 |
+
{
|
| 392 |
+
"A": Timestamp("20130102", tz="US/Eastern"),
|
| 393 |
+
"B": Timestamp("20130603", tz="CET"),
|
| 394 |
+
},
|
| 395 |
+
index=range(5),
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
# concat
|
| 399 |
+
df3 = concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
|
| 400 |
+
tm.assert_frame_equal(df2, df3)
|
| 401 |
+
|
| 402 |
+
def test_concat_multiple_tzs(self):
|
| 403 |
+
# GH#12467
|
| 404 |
+
# combining datetime tz-aware and naive DataFrames
|
| 405 |
+
ts1 = Timestamp("2015-01-01", tz=None)
|
| 406 |
+
ts2 = Timestamp("2015-01-01", tz="UTC")
|
| 407 |
+
ts3 = Timestamp("2015-01-01", tz="EST")
|
| 408 |
+
|
| 409 |
+
df1 = DataFrame({"time": [ts1]})
|
| 410 |
+
df2 = DataFrame({"time": [ts2]})
|
| 411 |
+
df3 = DataFrame({"time": [ts3]})
|
| 412 |
+
|
| 413 |
+
results = concat([df1, df2]).reset_index(drop=True)
|
| 414 |
+
expected = DataFrame({"time": [ts1, ts2]}, dtype=object)
|
| 415 |
+
tm.assert_frame_equal(results, expected)
|
| 416 |
+
|
| 417 |
+
results = concat([df1, df3]).reset_index(drop=True)
|
| 418 |
+
expected = DataFrame({"time": [ts1, ts3]}, dtype=object)
|
| 419 |
+
tm.assert_frame_equal(results, expected)
|
| 420 |
+
|
| 421 |
+
results = concat([df2, df3]).reset_index(drop=True)
|
| 422 |
+
expected = DataFrame({"time": [ts2, ts3]})
|
| 423 |
+
tm.assert_frame_equal(results, expected)
|
| 424 |
+
|
| 425 |
+
def test_concat_multiindex_with_tz(self):
|
| 426 |
+
# GH 6606
|
| 427 |
+
df = DataFrame(
|
| 428 |
+
{
|
| 429 |
+
"dt": DatetimeIndex(
|
| 430 |
+
[
|
| 431 |
+
datetime(2014, 1, 1),
|
| 432 |
+
datetime(2014, 1, 2),
|
| 433 |
+
datetime(2014, 1, 3),
|
| 434 |
+
],
|
| 435 |
+
dtype="M8[ns, US/Pacific]",
|
| 436 |
+
),
|
| 437 |
+
"b": ["A", "B", "C"],
|
| 438 |
+
"c": [1, 2, 3],
|
| 439 |
+
"d": [4, 5, 6],
|
| 440 |
+
}
|
| 441 |
+
)
|
| 442 |
+
df = df.set_index(["dt", "b"])
|
| 443 |
+
|
| 444 |
+
exp_idx1 = DatetimeIndex(
|
| 445 |
+
["2014-01-01", "2014-01-02", "2014-01-03"] * 2,
|
| 446 |
+
dtype="M8[ns, US/Pacific]",
|
| 447 |
+
name="dt",
|
| 448 |
+
)
|
| 449 |
+
exp_idx2 = Index(["A", "B", "C"] * 2, name="b")
|
| 450 |
+
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
|
| 451 |
+
expected = DataFrame(
|
| 452 |
+
{"c": [1, 2, 3] * 2, "d": [4, 5, 6] * 2}, index=exp_idx, columns=["c", "d"]
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
result = concat([df, df])
|
| 456 |
+
tm.assert_frame_equal(result, expected)
|
| 457 |
+
|
| 458 |
+
def test_concat_tz_not_aligned(self):
|
| 459 |
+
# GH#22796
|
| 460 |
+
ts = pd.to_datetime([1, 2]).tz_localize("UTC")
|
| 461 |
+
a = DataFrame({"A": ts})
|
| 462 |
+
b = DataFrame({"A": ts, "B": ts})
|
| 463 |
+
result = concat([a, b], sort=True, ignore_index=True)
|
| 464 |
+
expected = DataFrame(
|
| 465 |
+
{"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)}
|
| 466 |
+
)
|
| 467 |
+
tm.assert_frame_equal(result, expected)
|
| 468 |
+
|
| 469 |
+
@pytest.mark.parametrize(
|
| 470 |
+
"t1",
|
| 471 |
+
[
|
| 472 |
+
"2015-01-01",
|
| 473 |
+
pytest.param(
|
| 474 |
+
pd.NaT,
|
| 475 |
+
marks=pytest.mark.xfail(
|
| 476 |
+
reason="GH23037 incorrect dtype when concatenating"
|
| 477 |
+
),
|
| 478 |
+
),
|
| 479 |
+
],
|
| 480 |
+
)
|
| 481 |
+
def test_concat_tz_NaT(self, t1):
|
| 482 |
+
# GH#22796
|
| 483 |
+
# Concatenating tz-aware multicolumn DataFrames
|
| 484 |
+
ts1 = Timestamp(t1, tz="UTC")
|
| 485 |
+
ts2 = Timestamp("2015-01-01", tz="UTC")
|
| 486 |
+
ts3 = Timestamp("2015-01-01", tz="UTC")
|
| 487 |
+
|
| 488 |
+
df1 = DataFrame([[ts1, ts2]])
|
| 489 |
+
df2 = DataFrame([[ts3]])
|
| 490 |
+
|
| 491 |
+
result = concat([df1, df2])
|
| 492 |
+
expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
|
| 493 |
+
|
| 494 |
+
tm.assert_frame_equal(result, expected)
|
| 495 |
+
|
| 496 |
+
def test_concat_tz_with_empty(self):
|
| 497 |
+
# GH 9188
|
| 498 |
+
result = concat(
|
| 499 |
+
[DataFrame(date_range("2000", periods=1, tz="UTC")), DataFrame()]
|
| 500 |
+
)
|
| 501 |
+
expected = DataFrame(date_range("2000", periods=1, tz="UTC"))
|
| 502 |
+
tm.assert_frame_equal(result, expected)
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
class TestPeriodConcat:
|
| 506 |
+
def test_concat_period_series(self):
|
| 507 |
+
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
|
| 508 |
+
y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="D"))
|
| 509 |
+
expected = Series([x[0], x[1], y[0], y[1]], dtype="Period[D]")
|
| 510 |
+
result = concat([x, y], ignore_index=True)
|
| 511 |
+
tm.assert_series_equal(result, expected)
|
| 512 |
+
|
| 513 |
+
def test_concat_period_multiple_freq_series(self):
|
| 514 |
+
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
|
| 515 |
+
y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="M"))
|
| 516 |
+
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
|
| 517 |
+
result = concat([x, y], ignore_index=True)
|
| 518 |
+
tm.assert_series_equal(result, expected)
|
| 519 |
+
assert result.dtype == "object"
|
| 520 |
+
|
| 521 |
+
def test_concat_period_other_series(self):
|
| 522 |
+
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
|
| 523 |
+
y = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="M"))
|
| 524 |
+
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
|
| 525 |
+
result = concat([x, y], ignore_index=True)
|
| 526 |
+
tm.assert_series_equal(result, expected)
|
| 527 |
+
assert result.dtype == "object"
|
| 528 |
+
|
| 529 |
+
def test_concat_period_other_series2(self):
|
| 530 |
+
# non-period
|
| 531 |
+
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
|
| 532 |
+
y = Series(DatetimeIndex(["2015-11-01", "2015-12-01"]))
|
| 533 |
+
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
|
| 534 |
+
result = concat([x, y], ignore_index=True)
|
| 535 |
+
tm.assert_series_equal(result, expected)
|
| 536 |
+
assert result.dtype == "object"
|
| 537 |
+
|
| 538 |
+
def test_concat_period_other_series3(self):
|
| 539 |
+
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
|
| 540 |
+
y = Series(["A", "B"])
|
| 541 |
+
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
|
| 542 |
+
result = concat([x, y], ignore_index=True)
|
| 543 |
+
tm.assert_series_equal(result, expected)
|
| 544 |
+
assert result.dtype == "object"
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def test_concat_timedelta64_block():
|
| 548 |
+
rng = to_timedelta(np.arange(10), unit="s")
|
| 549 |
+
|
| 550 |
+
df = DataFrame({"time": rng})
|
| 551 |
+
|
| 552 |
+
result = concat([df, df])
|
| 553 |
+
tm.assert_frame_equal(result.iloc[:10], df)
|
| 554 |
+
tm.assert_frame_equal(result.iloc[10:], df)
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
def test_concat_multiindex_datetime_nat():
|
| 558 |
+
# GH#44900
|
| 559 |
+
left = DataFrame({"a": 1}, index=MultiIndex.from_tuples([(1, pd.NaT)]))
|
| 560 |
+
right = DataFrame(
|
| 561 |
+
{"b": 2}, index=MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
|
| 562 |
+
)
|
| 563 |
+
result = concat([left, right], axis="columns")
|
| 564 |
+
expected = DataFrame(
|
| 565 |
+
{"a": [1.0, np.nan], "b": 2}, MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
|
| 566 |
+
)
|
| 567 |
+
tm.assert_frame_equal(result, expected)
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
def test_concat_float_datetime64(using_array_manager):
|
| 571 |
+
# GH#32934
|
| 572 |
+
df_time = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")})
|
| 573 |
+
df_float = DataFrame({"A": pd.array([1.0], dtype="float64")})
|
| 574 |
+
|
| 575 |
+
expected = DataFrame(
|
| 576 |
+
{
|
| 577 |
+
"A": [
|
| 578 |
+
pd.array(["2000"], dtype="datetime64[ns]")[0],
|
| 579 |
+
pd.array([1.0], dtype="float64")[0],
|
| 580 |
+
]
|
| 581 |
+
},
|
| 582 |
+
index=[0, 0],
|
| 583 |
+
)
|
| 584 |
+
result = concat([df_time, df_float])
|
| 585 |
+
tm.assert_frame_equal(result, expected)
|
| 586 |
+
|
| 587 |
+
expected = DataFrame({"A": pd.array([], dtype="object")})
|
| 588 |
+
result = concat([df_time.iloc[:0], df_float.iloc[:0]])
|
| 589 |
+
tm.assert_frame_equal(result, expected)
|
| 590 |
+
|
| 591 |
+
expected = DataFrame({"A": pd.array([1.0], dtype="object")})
|
| 592 |
+
result = concat([df_time.iloc[:0], df_float])
|
| 593 |
+
tm.assert_frame_equal(result, expected)
|
| 594 |
+
|
| 595 |
+
if not using_array_manager:
|
| 596 |
+
expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")})
|
| 597 |
+
msg = "The behavior of DataFrame concatenation with empty or all-NA entries"
|
| 598 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 599 |
+
result = concat([df_time, df_float.iloc[:0]])
|
| 600 |
+
tm.assert_frame_equal(result, expected)
|
| 601 |
+
else:
|
| 602 |
+
expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}).astype(
|
| 603 |
+
{"A": "object"}
|
| 604 |
+
)
|
| 605 |
+
result = concat([df_time, df_float.iloc[:0]])
|
| 606 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py
ADDED
|
@@ -0,0 +1,472 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from copy import deepcopy
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from pandas.errors import PerformanceWarning
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from pandas import (
|
| 10 |
+
DataFrame,
|
| 11 |
+
Index,
|
| 12 |
+
MultiIndex,
|
| 13 |
+
Series,
|
| 14 |
+
concat,
|
| 15 |
+
)
|
| 16 |
+
import pandas._testing as tm
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TestIndexConcat:
|
| 20 |
+
def test_concat_ignore_index(self, sort):
|
| 21 |
+
frame1 = DataFrame(
|
| 22 |
+
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
|
| 23 |
+
)
|
| 24 |
+
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
|
| 25 |
+
frame1.index = Index(["x", "y", "z"])
|
| 26 |
+
frame2.index = Index(["x", "y", "q"])
|
| 27 |
+
|
| 28 |
+
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
|
| 29 |
+
|
| 30 |
+
nan = np.nan
|
| 31 |
+
expected = DataFrame(
|
| 32 |
+
[
|
| 33 |
+
[nan, nan, nan, 4.3],
|
| 34 |
+
["a", 1, 4.5, 5.2],
|
| 35 |
+
["b", 2, 3.2, 2.2],
|
| 36 |
+
["c", 3, 1.2, nan],
|
| 37 |
+
],
|
| 38 |
+
index=Index(["q", "x", "y", "z"]),
|
| 39 |
+
)
|
| 40 |
+
if not sort:
|
| 41 |
+
expected = expected.loc[["x", "y", "z", "q"]]
|
| 42 |
+
|
| 43 |
+
tm.assert_frame_equal(v1, expected)
|
| 44 |
+
|
| 45 |
+
@pytest.mark.parametrize(
|
| 46 |
+
"name_in1,name_in2,name_in3,name_out",
|
| 47 |
+
[
|
| 48 |
+
("idx", "idx", "idx", "idx"),
|
| 49 |
+
("idx", "idx", None, None),
|
| 50 |
+
("idx", None, None, None),
|
| 51 |
+
("idx1", "idx2", None, None),
|
| 52 |
+
("idx1", "idx1", "idx2", None),
|
| 53 |
+
("idx1", "idx2", "idx3", None),
|
| 54 |
+
(None, None, None, None),
|
| 55 |
+
],
|
| 56 |
+
)
|
| 57 |
+
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
|
| 58 |
+
# GH13475
|
| 59 |
+
indices = [
|
| 60 |
+
Index(["a", "b", "c"], name=name_in1),
|
| 61 |
+
Index(["b", "c", "d"], name=name_in2),
|
| 62 |
+
Index(["c", "d", "e"], name=name_in3),
|
| 63 |
+
]
|
| 64 |
+
frames = [
|
| 65 |
+
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
|
| 66 |
+
]
|
| 67 |
+
result = concat(frames, axis=1)
|
| 68 |
+
|
| 69 |
+
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
|
| 70 |
+
expected = DataFrame(
|
| 71 |
+
{
|
| 72 |
+
"x": [0, 1, 2, np.nan, np.nan],
|
| 73 |
+
"y": [np.nan, 0, 1, 2, np.nan],
|
| 74 |
+
"z": [np.nan, np.nan, 0, 1, 2],
|
| 75 |
+
},
|
| 76 |
+
index=exp_ind,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
tm.assert_frame_equal(result, expected)
|
| 80 |
+
|
| 81 |
+
def test_concat_rename_index(self):
|
| 82 |
+
a = DataFrame(
|
| 83 |
+
np.random.default_rng(2).random((3, 3)),
|
| 84 |
+
columns=list("ABC"),
|
| 85 |
+
index=Index(list("abc"), name="index_a"),
|
| 86 |
+
)
|
| 87 |
+
b = DataFrame(
|
| 88 |
+
np.random.default_rng(2).random((3, 3)),
|
| 89 |
+
columns=list("ABC"),
|
| 90 |
+
index=Index(list("abc"), name="index_b"),
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
|
| 94 |
+
|
| 95 |
+
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
|
| 96 |
+
names = list(exp.index.names)
|
| 97 |
+
names[1] = "lvl1"
|
| 98 |
+
exp.index.set_names(names, inplace=True)
|
| 99 |
+
|
| 100 |
+
tm.assert_frame_equal(result, exp)
|
| 101 |
+
assert result.index.names == exp.index.names
|
| 102 |
+
|
| 103 |
+
def test_concat_copy_index_series(self, axis, using_copy_on_write):
|
| 104 |
+
# GH 29879
|
| 105 |
+
ser = Series([1, 2])
|
| 106 |
+
comb = concat([ser, ser], axis=axis, copy=True)
|
| 107 |
+
if not using_copy_on_write or axis in [0, "index"]:
|
| 108 |
+
assert comb.index is not ser.index
|
| 109 |
+
else:
|
| 110 |
+
assert comb.index is ser.index
|
| 111 |
+
|
| 112 |
+
def test_concat_copy_index_frame(self, axis, using_copy_on_write):
|
| 113 |
+
# GH 29879
|
| 114 |
+
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
|
| 115 |
+
comb = concat([df, df], axis=axis, copy=True)
|
| 116 |
+
if not using_copy_on_write:
|
| 117 |
+
assert not comb.index.is_(df.index)
|
| 118 |
+
assert not comb.columns.is_(df.columns)
|
| 119 |
+
elif axis in [0, "index"]:
|
| 120 |
+
assert not comb.index.is_(df.index)
|
| 121 |
+
assert comb.columns.is_(df.columns)
|
| 122 |
+
elif axis in [1, "columns"]:
|
| 123 |
+
assert comb.index.is_(df.index)
|
| 124 |
+
assert not comb.columns.is_(df.columns)
|
| 125 |
+
|
| 126 |
+
def test_default_index(self):
|
| 127 |
+
# is_series and ignore_index
|
| 128 |
+
s1 = Series([1, 2, 3], name="x")
|
| 129 |
+
s2 = Series([4, 5, 6], name="y")
|
| 130 |
+
res = concat([s1, s2], axis=1, ignore_index=True)
|
| 131 |
+
assert isinstance(res.columns, pd.RangeIndex)
|
| 132 |
+
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
|
| 133 |
+
# use check_index_type=True to check the result have
|
| 134 |
+
# RangeIndex (default index)
|
| 135 |
+
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
|
| 136 |
+
|
| 137 |
+
# is_series and all inputs have no names
|
| 138 |
+
s1 = Series([1, 2, 3])
|
| 139 |
+
s2 = Series([4, 5, 6])
|
| 140 |
+
res = concat([s1, s2], axis=1, ignore_index=False)
|
| 141 |
+
assert isinstance(res.columns, pd.RangeIndex)
|
| 142 |
+
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
|
| 143 |
+
exp.columns = pd.RangeIndex(2)
|
| 144 |
+
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
|
| 145 |
+
|
| 146 |
+
# is_dataframe and ignore_index
|
| 147 |
+
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
|
| 148 |
+
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
|
| 149 |
+
|
| 150 |
+
res = concat([df1, df2], axis=0, ignore_index=True)
|
| 151 |
+
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
|
| 152 |
+
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
|
| 153 |
+
|
| 154 |
+
res = concat([df1, df2], axis=1, ignore_index=True)
|
| 155 |
+
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
|
| 156 |
+
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
|
| 157 |
+
|
| 158 |
+
def test_dups_index(self):
|
| 159 |
+
# GH 4771
|
| 160 |
+
|
| 161 |
+
# single dtypes
|
| 162 |
+
df = DataFrame(
|
| 163 |
+
np.random.default_rng(2).integers(0, 10, size=40).reshape(10, 4),
|
| 164 |
+
columns=["A", "A", "C", "C"],
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
result = concat([df, df], axis=1)
|
| 168 |
+
tm.assert_frame_equal(result.iloc[:, :4], df)
|
| 169 |
+
tm.assert_frame_equal(result.iloc[:, 4:], df)
|
| 170 |
+
|
| 171 |
+
result = concat([df, df], axis=0)
|
| 172 |
+
tm.assert_frame_equal(result.iloc[:10], df)
|
| 173 |
+
tm.assert_frame_equal(result.iloc[10:], df)
|
| 174 |
+
|
| 175 |
+
# multi dtypes
|
| 176 |
+
df = concat(
|
| 177 |
+
[
|
| 178 |
+
DataFrame(
|
| 179 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
| 180 |
+
columns=["A", "A", "B", "B"],
|
| 181 |
+
),
|
| 182 |
+
DataFrame(
|
| 183 |
+
np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2),
|
| 184 |
+
columns=["A", "C"],
|
| 185 |
+
),
|
| 186 |
+
],
|
| 187 |
+
axis=1,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
result = concat([df, df], axis=1)
|
| 191 |
+
tm.assert_frame_equal(result.iloc[:, :6], df)
|
| 192 |
+
tm.assert_frame_equal(result.iloc[:, 6:], df)
|
| 193 |
+
|
| 194 |
+
result = concat([df, df], axis=0)
|
| 195 |
+
tm.assert_frame_equal(result.iloc[:10], df)
|
| 196 |
+
tm.assert_frame_equal(result.iloc[10:], df)
|
| 197 |
+
|
| 198 |
+
# append
|
| 199 |
+
result = df.iloc[0:8, :]._append(df.iloc[8:])
|
| 200 |
+
tm.assert_frame_equal(result, df)
|
| 201 |
+
|
| 202 |
+
result = df.iloc[0:8, :]._append(df.iloc[8:9])._append(df.iloc[9:10])
|
| 203 |
+
tm.assert_frame_equal(result, df)
|
| 204 |
+
|
| 205 |
+
expected = concat([df, df], axis=0)
|
| 206 |
+
result = df._append(df)
|
| 207 |
+
tm.assert_frame_equal(result, expected)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class TestMultiIndexConcat:
|
| 211 |
+
def test_concat_multiindex_with_keys(self, multiindex_dataframe_random_data):
|
| 212 |
+
frame = multiindex_dataframe_random_data
|
| 213 |
+
index = frame.index
|
| 214 |
+
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
|
| 215 |
+
|
| 216 |
+
assert result.index.names == ("iteration",) + index.names
|
| 217 |
+
tm.assert_frame_equal(result.loc[0], frame)
|
| 218 |
+
tm.assert_frame_equal(result.loc[1], frame)
|
| 219 |
+
assert result.index.nlevels == 3
|
| 220 |
+
|
| 221 |
+
def test_concat_multiindex_with_none_in_index_names(self):
|
| 222 |
+
# GH 15787
|
| 223 |
+
index = MultiIndex.from_product([[1], range(5)], names=["level1", None])
|
| 224 |
+
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
|
| 225 |
+
|
| 226 |
+
result = concat([df, df], keys=[1, 2], names=["level2"])
|
| 227 |
+
index = MultiIndex.from_product(
|
| 228 |
+
[[1, 2], [1], range(5)], names=["level2", "level1", None]
|
| 229 |
+
)
|
| 230 |
+
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
|
| 231 |
+
tm.assert_frame_equal(result, expected)
|
| 232 |
+
|
| 233 |
+
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
|
| 234 |
+
level2 = [1] * 5 + [2] * 2
|
| 235 |
+
level1 = [1] * 7
|
| 236 |
+
no_name = list(range(5)) + list(range(2))
|
| 237 |
+
tuples = list(zip(level2, level1, no_name))
|
| 238 |
+
index = MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
|
| 239 |
+
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
|
| 240 |
+
tm.assert_frame_equal(result, expected)
|
| 241 |
+
|
| 242 |
+
def test_concat_multiindex_rangeindex(self):
|
| 243 |
+
# GH13542
|
| 244 |
+
# when multi-index levels are RangeIndex objects
|
| 245 |
+
# there is a bug in concat with objects of len 1
|
| 246 |
+
|
| 247 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((9, 2)))
|
| 248 |
+
df.index = MultiIndex(
|
| 249 |
+
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
|
| 250 |
+
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
|
| 254 |
+
exp = df.iloc[[2, 3, 4, 5], :]
|
| 255 |
+
tm.assert_frame_equal(res, exp)
|
| 256 |
+
|
| 257 |
+
def test_concat_multiindex_dfs_with_deepcopy(self):
|
| 258 |
+
# GH 9967
|
| 259 |
+
example_multiindex1 = MultiIndex.from_product([["a"], ["b"]])
|
| 260 |
+
example_dataframe1 = DataFrame([0], index=example_multiindex1)
|
| 261 |
+
|
| 262 |
+
example_multiindex2 = MultiIndex.from_product([["a"], ["c"]])
|
| 263 |
+
example_dataframe2 = DataFrame([1], index=example_multiindex2)
|
| 264 |
+
|
| 265 |
+
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
|
| 266 |
+
expected_index = MultiIndex(
|
| 267 |
+
levels=[["s1", "s2"], ["a"], ["b", "c"]],
|
| 268 |
+
codes=[[0, 1], [0, 0], [0, 1]],
|
| 269 |
+
names=["testname", None, None],
|
| 270 |
+
)
|
| 271 |
+
expected = DataFrame([[0], [1]], index=expected_index)
|
| 272 |
+
result_copy = concat(deepcopy(example_dict), names=["testname"])
|
| 273 |
+
tm.assert_frame_equal(result_copy, expected)
|
| 274 |
+
result_no_copy = concat(example_dict, names=["testname"])
|
| 275 |
+
tm.assert_frame_equal(result_no_copy, expected)
|
| 276 |
+
|
| 277 |
+
@pytest.mark.parametrize(
|
| 278 |
+
"mi1_list",
|
| 279 |
+
[
|
| 280 |
+
[["a"], range(2)],
|
| 281 |
+
[["b"], np.arange(2.0, 4.0)],
|
| 282 |
+
[["c"], ["A", "B"]],
|
| 283 |
+
[["d"], pd.date_range(start="2017", end="2018", periods=2)],
|
| 284 |
+
],
|
| 285 |
+
)
|
| 286 |
+
@pytest.mark.parametrize(
|
| 287 |
+
"mi2_list",
|
| 288 |
+
[
|
| 289 |
+
[["a"], range(2)],
|
| 290 |
+
[["b"], np.arange(2.0, 4.0)],
|
| 291 |
+
[["c"], ["A", "B"]],
|
| 292 |
+
[["d"], pd.date_range(start="2017", end="2018", periods=2)],
|
| 293 |
+
],
|
| 294 |
+
)
|
| 295 |
+
def test_concat_with_various_multiindex_dtypes(
|
| 296 |
+
self, mi1_list: list, mi2_list: list
|
| 297 |
+
):
|
| 298 |
+
# GitHub #23478
|
| 299 |
+
mi1 = MultiIndex.from_product(mi1_list)
|
| 300 |
+
mi2 = MultiIndex.from_product(mi2_list)
|
| 301 |
+
|
| 302 |
+
df1 = DataFrame(np.zeros((1, len(mi1))), columns=mi1)
|
| 303 |
+
df2 = DataFrame(np.zeros((1, len(mi2))), columns=mi2)
|
| 304 |
+
|
| 305 |
+
if mi1_list[0] == mi2_list[0]:
|
| 306 |
+
expected_mi = MultiIndex(
|
| 307 |
+
levels=[mi1_list[0], list(mi1_list[1])],
|
| 308 |
+
codes=[[0, 0, 0, 0], [0, 1, 0, 1]],
|
| 309 |
+
)
|
| 310 |
+
else:
|
| 311 |
+
expected_mi = MultiIndex(
|
| 312 |
+
levels=[
|
| 313 |
+
mi1_list[0] + mi2_list[0],
|
| 314 |
+
list(mi1_list[1]) + list(mi2_list[1]),
|
| 315 |
+
],
|
| 316 |
+
codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
expected_df = DataFrame(np.zeros((1, len(expected_mi))), columns=expected_mi)
|
| 320 |
+
|
| 321 |
+
with tm.assert_produces_warning(None):
|
| 322 |
+
result_df = concat((df1, df2), axis=1)
|
| 323 |
+
|
| 324 |
+
tm.assert_frame_equal(expected_df, result_df)
|
| 325 |
+
|
| 326 |
+
def test_concat_multiindex_(self):
|
| 327 |
+
# GitHub #44786
|
| 328 |
+
df = DataFrame({"col": ["a", "b", "c"]}, index=["1", "2", "2"])
|
| 329 |
+
df = concat([df], keys=["X"])
|
| 330 |
+
|
| 331 |
+
iterables = [["X"], ["1", "2", "2"]]
|
| 332 |
+
result_index = df.index
|
| 333 |
+
expected_index = MultiIndex.from_product(iterables)
|
| 334 |
+
|
| 335 |
+
tm.assert_index_equal(result_index, expected_index)
|
| 336 |
+
|
| 337 |
+
result_df = df
|
| 338 |
+
expected_df = DataFrame(
|
| 339 |
+
{"col": ["a", "b", "c"]}, index=MultiIndex.from_product(iterables)
|
| 340 |
+
)
|
| 341 |
+
tm.assert_frame_equal(result_df, expected_df)
|
| 342 |
+
|
| 343 |
+
def test_concat_with_key_not_unique(self):
|
| 344 |
+
# GitHub #46519
|
| 345 |
+
df1 = DataFrame({"name": [1]})
|
| 346 |
+
df2 = DataFrame({"name": [2]})
|
| 347 |
+
df3 = DataFrame({"name": [3]})
|
| 348 |
+
df_a = concat([df1, df2, df3], keys=["x", "y", "x"])
|
| 349 |
+
# the warning is caused by indexing unsorted multi-index
|
| 350 |
+
with tm.assert_produces_warning(
|
| 351 |
+
PerformanceWarning, match="indexing past lexsort depth"
|
| 352 |
+
):
|
| 353 |
+
out_a = df_a.loc[("x", 0), :]
|
| 354 |
+
|
| 355 |
+
df_b = DataFrame(
|
| 356 |
+
{"name": [1, 2, 3]}, index=Index([("x", 0), ("y", 0), ("x", 0)])
|
| 357 |
+
)
|
| 358 |
+
with tm.assert_produces_warning(
|
| 359 |
+
PerformanceWarning, match="indexing past lexsort depth"
|
| 360 |
+
):
|
| 361 |
+
out_b = df_b.loc[("x", 0)]
|
| 362 |
+
|
| 363 |
+
tm.assert_frame_equal(out_a, out_b)
|
| 364 |
+
|
| 365 |
+
df1 = DataFrame({"name": ["a", "a", "b"]})
|
| 366 |
+
df2 = DataFrame({"name": ["a", "b"]})
|
| 367 |
+
df3 = DataFrame({"name": ["c", "d"]})
|
| 368 |
+
df_a = concat([df1, df2, df3], keys=["x", "y", "x"])
|
| 369 |
+
with tm.assert_produces_warning(
|
| 370 |
+
PerformanceWarning, match="indexing past lexsort depth"
|
| 371 |
+
):
|
| 372 |
+
out_a = df_a.loc[("x", 0), :]
|
| 373 |
+
|
| 374 |
+
df_b = DataFrame(
|
| 375 |
+
{
|
| 376 |
+
"a": ["x", "x", "x", "y", "y", "x", "x"],
|
| 377 |
+
"b": [0, 1, 2, 0, 1, 0, 1],
|
| 378 |
+
"name": list("aababcd"),
|
| 379 |
+
}
|
| 380 |
+
).set_index(["a", "b"])
|
| 381 |
+
df_b.index.names = [None, None]
|
| 382 |
+
with tm.assert_produces_warning(
|
| 383 |
+
PerformanceWarning, match="indexing past lexsort depth"
|
| 384 |
+
):
|
| 385 |
+
out_b = df_b.loc[("x", 0), :]
|
| 386 |
+
|
| 387 |
+
tm.assert_frame_equal(out_a, out_b)
|
| 388 |
+
|
| 389 |
+
def test_concat_with_duplicated_levels(self):
|
| 390 |
+
# keyword levels should be unique
|
| 391 |
+
df1 = DataFrame({"A": [1]}, index=["x"])
|
| 392 |
+
df2 = DataFrame({"A": [1]}, index=["y"])
|
| 393 |
+
msg = r"Level values not unique: \['x', 'y', 'y'\]"
|
| 394 |
+
with pytest.raises(ValueError, match=msg):
|
| 395 |
+
concat([df1, df2], keys=["x", "y"], levels=[["x", "y", "y"]])
|
| 396 |
+
|
| 397 |
+
@pytest.mark.parametrize("levels", [[["x", "y"]], [["x", "y", "y"]]])
|
| 398 |
+
def test_concat_with_levels_with_none_keys(self, levels):
|
| 399 |
+
df1 = DataFrame({"A": [1]}, index=["x"])
|
| 400 |
+
df2 = DataFrame({"A": [1]}, index=["y"])
|
| 401 |
+
msg = "levels supported only when keys is not None"
|
| 402 |
+
with pytest.raises(ValueError, match=msg):
|
| 403 |
+
concat([df1, df2], levels=levels)
|
| 404 |
+
|
| 405 |
+
def test_concat_range_index_result(self):
|
| 406 |
+
# GH#47501
|
| 407 |
+
df1 = DataFrame({"a": [1, 2]})
|
| 408 |
+
df2 = DataFrame({"b": [1, 2]})
|
| 409 |
+
|
| 410 |
+
result = concat([df1, df2], sort=True, axis=1)
|
| 411 |
+
expected = DataFrame({"a": [1, 2], "b": [1, 2]})
|
| 412 |
+
tm.assert_frame_equal(result, expected)
|
| 413 |
+
expected_index = pd.RangeIndex(0, 2)
|
| 414 |
+
tm.assert_index_equal(result.index, expected_index, exact=True)
|
| 415 |
+
|
| 416 |
+
def test_concat_index_keep_dtype(self):
|
| 417 |
+
# GH#47329
|
| 418 |
+
df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype="object"))
|
| 419 |
+
df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="object"))
|
| 420 |
+
result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
|
| 421 |
+
expected = DataFrame(
|
| 422 |
+
[[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="object")
|
| 423 |
+
)
|
| 424 |
+
tm.assert_frame_equal(result, expected)
|
| 425 |
+
|
| 426 |
+
def test_concat_index_keep_dtype_ea_numeric(self, any_numeric_ea_dtype):
|
| 427 |
+
# GH#47329
|
| 428 |
+
df1 = DataFrame(
|
| 429 |
+
[[0, 1, 1]], columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype)
|
| 430 |
+
)
|
| 431 |
+
df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=any_numeric_ea_dtype))
|
| 432 |
+
result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
|
| 433 |
+
expected = DataFrame(
|
| 434 |
+
[[0, 1, 1.0], [0, 1, np.nan]],
|
| 435 |
+
columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype),
|
| 436 |
+
)
|
| 437 |
+
tm.assert_frame_equal(result, expected)
|
| 438 |
+
|
| 439 |
+
@pytest.mark.parametrize("dtype", ["Int8", "Int16", "Int32"])
|
| 440 |
+
def test_concat_index_find_common(self, dtype):
|
| 441 |
+
# GH#47329
|
| 442 |
+
df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype))
|
| 443 |
+
df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="Int32"))
|
| 444 |
+
result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
|
| 445 |
+
expected = DataFrame(
|
| 446 |
+
[[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="Int32")
|
| 447 |
+
)
|
| 448 |
+
tm.assert_frame_equal(result, expected)
|
| 449 |
+
|
| 450 |
+
def test_concat_axis_1_sort_false_rangeindex(self, using_infer_string):
|
| 451 |
+
# GH 46675
|
| 452 |
+
s1 = Series(["a", "b", "c"])
|
| 453 |
+
s2 = Series(["a", "b"])
|
| 454 |
+
s3 = Series(["a", "b", "c", "d"])
|
| 455 |
+
s4 = Series(
|
| 456 |
+
[], dtype=object if not using_infer_string else "string[pyarrow_numpy]"
|
| 457 |
+
)
|
| 458 |
+
result = concat(
|
| 459 |
+
[s1, s2, s3, s4], sort=False, join="outer", ignore_index=False, axis=1
|
| 460 |
+
)
|
| 461 |
+
expected = DataFrame(
|
| 462 |
+
[
|
| 463 |
+
["a"] * 3 + [np.nan],
|
| 464 |
+
["b"] * 3 + [np.nan],
|
| 465 |
+
["c", np.nan] * 2,
|
| 466 |
+
[np.nan] * 2 + ["d"] + [np.nan],
|
| 467 |
+
],
|
| 468 |
+
dtype=object if not using_infer_string else "string[pyarrow_numpy]",
|
| 469 |
+
)
|
| 470 |
+
tm.assert_frame_equal(
|
| 471 |
+
result, expected, check_index_type=True, check_column_type=True
|
| 472 |
+
)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from io import StringIO
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
DataFrame,
|
| 8 |
+
concat,
|
| 9 |
+
read_csv,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestInvalidConcat:
|
| 15 |
+
@pytest.mark.parametrize("obj", [1, {}, [1, 2], (1, 2)])
|
| 16 |
+
def test_concat_invalid(self, obj):
|
| 17 |
+
# trying to concat a ndframe with a non-ndframe
|
| 18 |
+
df1 = DataFrame(range(2))
|
| 19 |
+
msg = (
|
| 20 |
+
f"cannot concatenate object of type '{type(obj)}'; "
|
| 21 |
+
"only Series and DataFrame objs are valid"
|
| 22 |
+
)
|
| 23 |
+
with pytest.raises(TypeError, match=msg):
|
| 24 |
+
concat([df1, obj])
|
| 25 |
+
|
| 26 |
+
def test_concat_invalid_first_argument(self):
|
| 27 |
+
df1 = DataFrame(range(2))
|
| 28 |
+
msg = (
|
| 29 |
+
"first argument must be an iterable of pandas "
|
| 30 |
+
'objects, you passed an object of type "DataFrame"'
|
| 31 |
+
)
|
| 32 |
+
with pytest.raises(TypeError, match=msg):
|
| 33 |
+
concat(df1)
|
| 34 |
+
|
| 35 |
+
def test_concat_generator_obj(self):
|
| 36 |
+
# generator ok though
|
| 37 |
+
concat(DataFrame(np.random.default_rng(2).random((5, 5))) for _ in range(3))
|
| 38 |
+
|
| 39 |
+
def test_concat_textreader_obj(self):
|
| 40 |
+
# text reader ok
|
| 41 |
+
# GH6583
|
| 42 |
+
data = """index,A,B,C,D
|
| 43 |
+
foo,2,3,4,5
|
| 44 |
+
bar,7,8,9,10
|
| 45 |
+
baz,12,13,14,15
|
| 46 |
+
qux,12,13,14,15
|
| 47 |
+
foo2,12,13,14,15
|
| 48 |
+
bar2,12,13,14,15
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
with read_csv(StringIO(data), chunksize=1) as reader:
|
| 52 |
+
result = concat(reader, ignore_index=True)
|
| 53 |
+
expected = read_csv(StringIO(data))
|
| 54 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
DataFrame,
|
| 6 |
+
DatetimeIndex,
|
| 7 |
+
Index,
|
| 8 |
+
MultiIndex,
|
| 9 |
+
Series,
|
| 10 |
+
concat,
|
| 11 |
+
date_range,
|
| 12 |
+
)
|
| 13 |
+
import pandas._testing as tm
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TestSeriesConcat:
|
| 17 |
+
def test_concat_series(self):
|
| 18 |
+
ts = Series(
|
| 19 |
+
np.arange(20, dtype=np.float64),
|
| 20 |
+
index=date_range("2020-01-01", periods=20),
|
| 21 |
+
name="foo",
|
| 22 |
+
)
|
| 23 |
+
ts.name = "foo"
|
| 24 |
+
|
| 25 |
+
pieces = [ts[:5], ts[5:15], ts[15:]]
|
| 26 |
+
|
| 27 |
+
result = concat(pieces)
|
| 28 |
+
tm.assert_series_equal(result, ts)
|
| 29 |
+
assert result.name == ts.name
|
| 30 |
+
|
| 31 |
+
result = concat(pieces, keys=[0, 1, 2])
|
| 32 |
+
expected = ts.copy()
|
| 33 |
+
|
| 34 |
+
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
|
| 35 |
+
|
| 36 |
+
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
|
| 37 |
+
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
|
| 38 |
+
expected.index = exp_index
|
| 39 |
+
tm.assert_series_equal(result, expected)
|
| 40 |
+
|
| 41 |
+
def test_concat_empty_and_non_empty_series_regression(self):
|
| 42 |
+
# GH 18187 regression test
|
| 43 |
+
s1 = Series([1])
|
| 44 |
+
s2 = Series([], dtype=object)
|
| 45 |
+
|
| 46 |
+
expected = s1
|
| 47 |
+
msg = "The behavior of array concatenation with empty entries is deprecated"
|
| 48 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 49 |
+
result = concat([s1, s2])
|
| 50 |
+
tm.assert_series_equal(result, expected)
|
| 51 |
+
|
| 52 |
+
def test_concat_series_axis1(self):
|
| 53 |
+
ts = Series(
|
| 54 |
+
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
pieces = [ts[:-2], ts[2:], ts[2:-2]]
|
| 58 |
+
|
| 59 |
+
result = concat(pieces, axis=1)
|
| 60 |
+
expected = DataFrame(pieces).T
|
| 61 |
+
tm.assert_frame_equal(result, expected)
|
| 62 |
+
|
| 63 |
+
result = concat(pieces, keys=["A", "B", "C"], axis=1)
|
| 64 |
+
expected = DataFrame(pieces, index=["A", "B", "C"]).T
|
| 65 |
+
tm.assert_frame_equal(result, expected)
|
| 66 |
+
|
| 67 |
+
def test_concat_series_axis1_preserves_series_names(self):
|
| 68 |
+
# preserve series names, #2489
|
| 69 |
+
s = Series(np.random.default_rng(2).standard_normal(5), name="A")
|
| 70 |
+
s2 = Series(np.random.default_rng(2).standard_normal(5), name="B")
|
| 71 |
+
|
| 72 |
+
result = concat([s, s2], axis=1)
|
| 73 |
+
expected = DataFrame({"A": s, "B": s2})
|
| 74 |
+
tm.assert_frame_equal(result, expected)
|
| 75 |
+
|
| 76 |
+
s2.name = None
|
| 77 |
+
result = concat([s, s2], axis=1)
|
| 78 |
+
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
|
| 79 |
+
|
| 80 |
+
def test_concat_series_axis1_with_reindex(self, sort):
|
| 81 |
+
# must reindex, #2603
|
| 82 |
+
s = Series(
|
| 83 |
+
np.random.default_rng(2).standard_normal(3), index=["c", "a", "b"], name="A"
|
| 84 |
+
)
|
| 85 |
+
s2 = Series(
|
| 86 |
+
np.random.default_rng(2).standard_normal(4),
|
| 87 |
+
index=["d", "a", "b", "c"],
|
| 88 |
+
name="B",
|
| 89 |
+
)
|
| 90 |
+
result = concat([s, s2], axis=1, sort=sort)
|
| 91 |
+
expected = DataFrame({"A": s, "B": s2}, index=["c", "a", "b", "d"])
|
| 92 |
+
if sort:
|
| 93 |
+
expected = expected.sort_index()
|
| 94 |
+
tm.assert_frame_equal(result, expected)
|
| 95 |
+
|
| 96 |
+
def test_concat_series_axis1_names_applied(self):
|
| 97 |
+
# ensure names argument is not ignored on axis=1, #23490
|
| 98 |
+
s = Series([1, 2, 3])
|
| 99 |
+
s2 = Series([4, 5, 6])
|
| 100 |
+
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
|
| 101 |
+
expected = DataFrame(
|
| 102 |
+
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
|
| 103 |
+
)
|
| 104 |
+
tm.assert_frame_equal(result, expected)
|
| 105 |
+
|
| 106 |
+
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
|
| 107 |
+
expected = DataFrame(
|
| 108 |
+
[[1, 4], [2, 5], [3, 6]],
|
| 109 |
+
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
|
| 110 |
+
)
|
| 111 |
+
tm.assert_frame_equal(result, expected)
|
| 112 |
+
|
| 113 |
+
def test_concat_series_axis1_same_names_ignore_index(self):
|
| 114 |
+
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
|
| 115 |
+
s1 = Series(
|
| 116 |
+
np.random.default_rng(2).standard_normal(len(dates)),
|
| 117 |
+
index=dates,
|
| 118 |
+
name="value",
|
| 119 |
+
)
|
| 120 |
+
s2 = Series(
|
| 121 |
+
np.random.default_rng(2).standard_normal(len(dates)),
|
| 122 |
+
index=dates,
|
| 123 |
+
name="value",
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
result = concat([s1, s2], axis=1, ignore_index=True)
|
| 127 |
+
expected = Index(range(2))
|
| 128 |
+
|
| 129 |
+
tm.assert_index_equal(result.columns, expected, exact=True)
|
| 130 |
+
|
| 131 |
+
@pytest.mark.parametrize(
|
| 132 |
+
"s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))]
|
| 133 |
+
)
|
| 134 |
+
def test_concat_series_name_npscalar_tuple(self, s1name, s2name):
|
| 135 |
+
# GH21015
|
| 136 |
+
s1 = Series({"a": 1, "b": 2}, name=s1name)
|
| 137 |
+
s2 = Series({"c": 5, "d": 6}, name=s2name)
|
| 138 |
+
result = concat([s1, s2])
|
| 139 |
+
expected = Series({"a": 1, "b": 2, "c": 5, "d": 6})
|
| 140 |
+
tm.assert_series_equal(result, expected)
|
| 141 |
+
|
| 142 |
+
def test_concat_series_partial_columns_names(self):
|
| 143 |
+
# GH10698
|
| 144 |
+
named_series = Series([1, 2], name="foo")
|
| 145 |
+
unnamed_series1 = Series([1, 2])
|
| 146 |
+
unnamed_series2 = Series([4, 5])
|
| 147 |
+
|
| 148 |
+
result = concat([named_series, unnamed_series1, unnamed_series2], axis=1)
|
| 149 |
+
expected = DataFrame(
|
| 150 |
+
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
|
| 151 |
+
)
|
| 152 |
+
tm.assert_frame_equal(result, expected)
|
| 153 |
+
|
| 154 |
+
result = concat(
|
| 155 |
+
[named_series, unnamed_series1, unnamed_series2],
|
| 156 |
+
axis=1,
|
| 157 |
+
keys=["red", "blue", "yellow"],
|
| 158 |
+
)
|
| 159 |
+
expected = DataFrame(
|
| 160 |
+
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
|
| 161 |
+
columns=["red", "blue", "yellow"],
|
| 162 |
+
)
|
| 163 |
+
tm.assert_frame_equal(result, expected)
|
| 164 |
+
|
| 165 |
+
result = concat(
|
| 166 |
+
[named_series, unnamed_series1, unnamed_series2], axis=1, ignore_index=True
|
| 167 |
+
)
|
| 168 |
+
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
|
| 169 |
+
tm.assert_frame_equal(result, expected)
|
| 170 |
+
|
| 171 |
+
def test_concat_series_length_one_reversed(self, frame_or_series):
|
| 172 |
+
# GH39401
|
| 173 |
+
obj = frame_or_series([100])
|
| 174 |
+
result = concat([obj.iloc[::-1]])
|
| 175 |
+
tm.assert_equal(result, obj)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_crosstab.py
ADDED
|
@@ -0,0 +1,886 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import (
|
| 6 |
+
CategoricalDtype,
|
| 7 |
+
CategoricalIndex,
|
| 8 |
+
DataFrame,
|
| 9 |
+
Index,
|
| 10 |
+
MultiIndex,
|
| 11 |
+
Series,
|
| 12 |
+
crosstab,
|
| 13 |
+
)
|
| 14 |
+
import pandas._testing as tm
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@pytest.fixture
|
| 18 |
+
def df():
|
| 19 |
+
df = DataFrame(
|
| 20 |
+
{
|
| 21 |
+
"A": [
|
| 22 |
+
"foo",
|
| 23 |
+
"foo",
|
| 24 |
+
"foo",
|
| 25 |
+
"foo",
|
| 26 |
+
"bar",
|
| 27 |
+
"bar",
|
| 28 |
+
"bar",
|
| 29 |
+
"bar",
|
| 30 |
+
"foo",
|
| 31 |
+
"foo",
|
| 32 |
+
"foo",
|
| 33 |
+
],
|
| 34 |
+
"B": [
|
| 35 |
+
"one",
|
| 36 |
+
"one",
|
| 37 |
+
"one",
|
| 38 |
+
"two",
|
| 39 |
+
"one",
|
| 40 |
+
"one",
|
| 41 |
+
"one",
|
| 42 |
+
"two",
|
| 43 |
+
"two",
|
| 44 |
+
"two",
|
| 45 |
+
"one",
|
| 46 |
+
],
|
| 47 |
+
"C": [
|
| 48 |
+
"dull",
|
| 49 |
+
"dull",
|
| 50 |
+
"shiny",
|
| 51 |
+
"dull",
|
| 52 |
+
"dull",
|
| 53 |
+
"shiny",
|
| 54 |
+
"shiny",
|
| 55 |
+
"dull",
|
| 56 |
+
"shiny",
|
| 57 |
+
"shiny",
|
| 58 |
+
"shiny",
|
| 59 |
+
],
|
| 60 |
+
"D": np.random.default_rng(2).standard_normal(11),
|
| 61 |
+
"E": np.random.default_rng(2).standard_normal(11),
|
| 62 |
+
"F": np.random.default_rng(2).standard_normal(11),
|
| 63 |
+
}
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
return pd.concat([df, df], ignore_index=True)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class TestCrosstab:
|
| 70 |
+
def test_crosstab_single(self, df):
|
| 71 |
+
result = crosstab(df["A"], df["C"])
|
| 72 |
+
expected = df.groupby(["A", "C"]).size().unstack()
|
| 73 |
+
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
|
| 74 |
+
|
| 75 |
+
def test_crosstab_multiple(self, df):
|
| 76 |
+
result = crosstab(df["A"], [df["B"], df["C"]])
|
| 77 |
+
expected = df.groupby(["A", "B", "C"]).size()
|
| 78 |
+
expected = expected.unstack("B").unstack("C").fillna(0).astype(np.int64)
|
| 79 |
+
tm.assert_frame_equal(result, expected)
|
| 80 |
+
|
| 81 |
+
result = crosstab([df["B"], df["C"]], df["A"])
|
| 82 |
+
expected = df.groupby(["B", "C", "A"]).size()
|
| 83 |
+
expected = expected.unstack("A").fillna(0).astype(np.int64)
|
| 84 |
+
tm.assert_frame_equal(result, expected)
|
| 85 |
+
|
| 86 |
+
@pytest.mark.parametrize("box", [np.array, list, tuple])
|
| 87 |
+
def test_crosstab_ndarray(self, box):
|
| 88 |
+
# GH 44076
|
| 89 |
+
a = box(np.random.default_rng(2).integers(0, 5, size=100))
|
| 90 |
+
b = box(np.random.default_rng(2).integers(0, 3, size=100))
|
| 91 |
+
c = box(np.random.default_rng(2).integers(0, 10, size=100))
|
| 92 |
+
|
| 93 |
+
df = DataFrame({"a": a, "b": b, "c": c})
|
| 94 |
+
|
| 95 |
+
result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"))
|
| 96 |
+
expected = crosstab(df["a"], [df["b"], df["c"]])
|
| 97 |
+
tm.assert_frame_equal(result, expected)
|
| 98 |
+
|
| 99 |
+
result = crosstab([b, c], a, colnames=["a"], rownames=("b", "c"))
|
| 100 |
+
expected = crosstab([df["b"], df["c"]], df["a"])
|
| 101 |
+
tm.assert_frame_equal(result, expected)
|
| 102 |
+
|
| 103 |
+
# assign arbitrary names
|
| 104 |
+
result = crosstab(a, c)
|
| 105 |
+
expected = crosstab(df["a"], df["c"])
|
| 106 |
+
expected.index.names = ["row_0"]
|
| 107 |
+
expected.columns.names = ["col_0"]
|
| 108 |
+
tm.assert_frame_equal(result, expected)
|
| 109 |
+
|
| 110 |
+
def test_crosstab_non_aligned(self):
|
| 111 |
+
# GH 17005
|
| 112 |
+
a = Series([0, 1, 1], index=["a", "b", "c"])
|
| 113 |
+
b = Series([3, 4, 3, 4, 3], index=["a", "b", "c", "d", "f"])
|
| 114 |
+
c = np.array([3, 4, 3], dtype=np.int64)
|
| 115 |
+
|
| 116 |
+
expected = DataFrame(
|
| 117 |
+
[[1, 0], [1, 1]],
|
| 118 |
+
index=Index([0, 1], name="row_0"),
|
| 119 |
+
columns=Index([3, 4], name="col_0"),
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
result = crosstab(a, b)
|
| 123 |
+
tm.assert_frame_equal(result, expected)
|
| 124 |
+
|
| 125 |
+
result = crosstab(a, c)
|
| 126 |
+
tm.assert_frame_equal(result, expected)
|
| 127 |
+
|
| 128 |
+
def test_crosstab_margins(self):
|
| 129 |
+
a = np.random.default_rng(2).integers(0, 7, size=100)
|
| 130 |
+
b = np.random.default_rng(2).integers(0, 3, size=100)
|
| 131 |
+
c = np.random.default_rng(2).integers(0, 5, size=100)
|
| 132 |
+
|
| 133 |
+
df = DataFrame({"a": a, "b": b, "c": c})
|
| 134 |
+
|
| 135 |
+
result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"), margins=True)
|
| 136 |
+
|
| 137 |
+
assert result.index.names == ("a",)
|
| 138 |
+
assert result.columns.names == ["b", "c"]
|
| 139 |
+
|
| 140 |
+
all_cols = result["All", ""]
|
| 141 |
+
exp_cols = df.groupby(["a"]).size().astype("i8")
|
| 142 |
+
# to keep index.name
|
| 143 |
+
exp_margin = Series([len(df)], index=Index(["All"], name="a"))
|
| 144 |
+
exp_cols = pd.concat([exp_cols, exp_margin])
|
| 145 |
+
exp_cols.name = ("All", "")
|
| 146 |
+
|
| 147 |
+
tm.assert_series_equal(all_cols, exp_cols)
|
| 148 |
+
|
| 149 |
+
all_rows = result.loc["All"]
|
| 150 |
+
exp_rows = df.groupby(["b", "c"]).size().astype("i8")
|
| 151 |
+
exp_rows = pd.concat([exp_rows, Series([len(df)], index=[("All", "")])])
|
| 152 |
+
exp_rows.name = "All"
|
| 153 |
+
|
| 154 |
+
exp_rows = exp_rows.reindex(all_rows.index)
|
| 155 |
+
exp_rows = exp_rows.fillna(0).astype(np.int64)
|
| 156 |
+
tm.assert_series_equal(all_rows, exp_rows)
|
| 157 |
+
|
| 158 |
+
def test_crosstab_margins_set_margin_name(self):
|
| 159 |
+
# GH 15972
|
| 160 |
+
a = np.random.default_rng(2).integers(0, 7, size=100)
|
| 161 |
+
b = np.random.default_rng(2).integers(0, 3, size=100)
|
| 162 |
+
c = np.random.default_rng(2).integers(0, 5, size=100)
|
| 163 |
+
|
| 164 |
+
df = DataFrame({"a": a, "b": b, "c": c})
|
| 165 |
+
|
| 166 |
+
result = crosstab(
|
| 167 |
+
a,
|
| 168 |
+
[b, c],
|
| 169 |
+
rownames=["a"],
|
| 170 |
+
colnames=("b", "c"),
|
| 171 |
+
margins=True,
|
| 172 |
+
margins_name="TOTAL",
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
assert result.index.names == ("a",)
|
| 176 |
+
assert result.columns.names == ["b", "c"]
|
| 177 |
+
|
| 178 |
+
all_cols = result["TOTAL", ""]
|
| 179 |
+
exp_cols = df.groupby(["a"]).size().astype("i8")
|
| 180 |
+
# to keep index.name
|
| 181 |
+
exp_margin = Series([len(df)], index=Index(["TOTAL"], name="a"))
|
| 182 |
+
exp_cols = pd.concat([exp_cols, exp_margin])
|
| 183 |
+
exp_cols.name = ("TOTAL", "")
|
| 184 |
+
|
| 185 |
+
tm.assert_series_equal(all_cols, exp_cols)
|
| 186 |
+
|
| 187 |
+
all_rows = result.loc["TOTAL"]
|
| 188 |
+
exp_rows = df.groupby(["b", "c"]).size().astype("i8")
|
| 189 |
+
exp_rows = pd.concat([exp_rows, Series([len(df)], index=[("TOTAL", "")])])
|
| 190 |
+
exp_rows.name = "TOTAL"
|
| 191 |
+
|
| 192 |
+
exp_rows = exp_rows.reindex(all_rows.index)
|
| 193 |
+
exp_rows = exp_rows.fillna(0).astype(np.int64)
|
| 194 |
+
tm.assert_series_equal(all_rows, exp_rows)
|
| 195 |
+
|
| 196 |
+
msg = "margins_name argument must be a string"
|
| 197 |
+
for margins_name in [666, None, ["a", "b"]]:
|
| 198 |
+
with pytest.raises(ValueError, match=msg):
|
| 199 |
+
crosstab(
|
| 200 |
+
a,
|
| 201 |
+
[b, c],
|
| 202 |
+
rownames=["a"],
|
| 203 |
+
colnames=("b", "c"),
|
| 204 |
+
margins=True,
|
| 205 |
+
margins_name=margins_name,
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
def test_crosstab_pass_values(self):
|
| 209 |
+
a = np.random.default_rng(2).integers(0, 7, size=100)
|
| 210 |
+
b = np.random.default_rng(2).integers(0, 3, size=100)
|
| 211 |
+
c = np.random.default_rng(2).integers(0, 5, size=100)
|
| 212 |
+
values = np.random.default_rng(2).standard_normal(100)
|
| 213 |
+
|
| 214 |
+
table = crosstab(
|
| 215 |
+
[a, b], c, values, aggfunc="sum", rownames=["foo", "bar"], colnames=["baz"]
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
df = DataFrame({"foo": a, "bar": b, "baz": c, "values": values})
|
| 219 |
+
|
| 220 |
+
expected = df.pivot_table(
|
| 221 |
+
"values", index=["foo", "bar"], columns="baz", aggfunc="sum"
|
| 222 |
+
)
|
| 223 |
+
tm.assert_frame_equal(table, expected)
|
| 224 |
+
|
| 225 |
+
def test_crosstab_dropna(self):
|
| 226 |
+
# GH 3820
|
| 227 |
+
a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object)
|
| 228 |
+
b = np.array(["one", "one", "two", "one", "two", "two", "two"], dtype=object)
|
| 229 |
+
c = np.array(
|
| 230 |
+
["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object
|
| 231 |
+
)
|
| 232 |
+
res = crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"], dropna=False)
|
| 233 |
+
m = MultiIndex.from_tuples(
|
| 234 |
+
[("one", "dull"), ("one", "shiny"), ("two", "dull"), ("two", "shiny")],
|
| 235 |
+
names=["b", "c"],
|
| 236 |
+
)
|
| 237 |
+
tm.assert_index_equal(res.columns, m)
|
| 238 |
+
|
| 239 |
+
def test_crosstab_no_overlap(self):
|
| 240 |
+
# GS 10291
|
| 241 |
+
|
| 242 |
+
s1 = Series([1, 2, 3], index=[1, 2, 3])
|
| 243 |
+
s2 = Series([4, 5, 6], index=[4, 5, 6])
|
| 244 |
+
|
| 245 |
+
actual = crosstab(s1, s2)
|
| 246 |
+
expected = DataFrame(
|
| 247 |
+
index=Index([], dtype="int64", name="row_0"),
|
| 248 |
+
columns=Index([], dtype="int64", name="col_0"),
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
tm.assert_frame_equal(actual, expected)
|
| 252 |
+
|
| 253 |
+
def test_margin_dropna(self):
|
| 254 |
+
# GH 12577
|
| 255 |
+
# pivot_table counts null into margin ('All')
|
| 256 |
+
# when margins=true and dropna=true
|
| 257 |
+
|
| 258 |
+
df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]})
|
| 259 |
+
actual = crosstab(df.a, df.b, margins=True, dropna=True)
|
| 260 |
+
expected = DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
|
| 261 |
+
expected.index = Index([1.0, 2.0, "All"], name="a")
|
| 262 |
+
expected.columns = Index([3, 4, "All"], name="b")
|
| 263 |
+
tm.assert_frame_equal(actual, expected)
|
| 264 |
+
|
| 265 |
+
def test_margin_dropna2(self):
|
| 266 |
+
df = DataFrame(
|
| 267 |
+
{"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]}
|
| 268 |
+
)
|
| 269 |
+
actual = crosstab(df.a, df.b, margins=True, dropna=True)
|
| 270 |
+
expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
|
| 271 |
+
expected.index = Index([1.0, 2.0, "All"], name="a")
|
| 272 |
+
expected.columns = Index([3.0, 4.0, "All"], name="b")
|
| 273 |
+
tm.assert_frame_equal(actual, expected)
|
| 274 |
+
|
| 275 |
+
def test_margin_dropna3(self):
|
| 276 |
+
df = DataFrame(
|
| 277 |
+
{"a": [1, np.nan, np.nan, np.nan, np.nan, 2], "b": [3, 3, 4, 4, 4, 4]}
|
| 278 |
+
)
|
| 279 |
+
actual = crosstab(df.a, df.b, margins=True, dropna=True)
|
| 280 |
+
expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
|
| 281 |
+
expected.index = Index([1.0, 2.0, "All"], name="a")
|
| 282 |
+
expected.columns = Index([3, 4, "All"], name="b")
|
| 283 |
+
tm.assert_frame_equal(actual, expected)
|
| 284 |
+
|
| 285 |
+
def test_margin_dropna4(self):
|
| 286 |
+
# GH 12642
|
| 287 |
+
# _add_margins raises KeyError: Level None not found
|
| 288 |
+
# when margins=True and dropna=False
|
| 289 |
+
# GH: 10772: Keep np.nan in result with dropna=False
|
| 290 |
+
df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]})
|
| 291 |
+
actual = crosstab(df.a, df.b, margins=True, dropna=False)
|
| 292 |
+
expected = DataFrame([[1, 0, 1.0], [1, 3, 4.0], [0, 1, np.nan], [2, 4, 6.0]])
|
| 293 |
+
expected.index = Index([1.0, 2.0, np.nan, "All"], name="a")
|
| 294 |
+
expected.columns = Index([3, 4, "All"], name="b")
|
| 295 |
+
tm.assert_frame_equal(actual, expected)
|
| 296 |
+
|
| 297 |
+
def test_margin_dropna5(self):
|
| 298 |
+
# GH: 10772: Keep np.nan in result with dropna=False
|
| 299 |
+
df = DataFrame(
|
| 300 |
+
{"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]}
|
| 301 |
+
)
|
| 302 |
+
actual = crosstab(df.a, df.b, margins=True, dropna=False)
|
| 303 |
+
expected = DataFrame(
|
| 304 |
+
[[1, 0, 0, 1.0], [0, 1, 0, 1.0], [0, 3, 1, np.nan], [1, 4, 0, 6.0]]
|
| 305 |
+
)
|
| 306 |
+
expected.index = Index([1.0, 2.0, np.nan, "All"], name="a")
|
| 307 |
+
expected.columns = Index([3.0, 4.0, np.nan, "All"], name="b")
|
| 308 |
+
tm.assert_frame_equal(actual, expected)
|
| 309 |
+
|
| 310 |
+
def test_margin_dropna6(self):
|
| 311 |
+
# GH: 10772: Keep np.nan in result with dropna=False
|
| 312 |
+
a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object)
|
| 313 |
+
b = np.array(["one", "one", "two", "one", "two", np.nan, "two"], dtype=object)
|
| 314 |
+
c = np.array(
|
| 315 |
+
["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
actual = crosstab(
|
| 319 |
+
a, [b, c], rownames=["a"], colnames=["b", "c"], margins=True, dropna=False
|
| 320 |
+
)
|
| 321 |
+
m = MultiIndex.from_arrays(
|
| 322 |
+
[
|
| 323 |
+
["one", "one", "two", "two", np.nan, np.nan, "All"],
|
| 324 |
+
["dull", "shiny", "dull", "shiny", "dull", "shiny", ""],
|
| 325 |
+
],
|
| 326 |
+
names=["b", "c"],
|
| 327 |
+
)
|
| 328 |
+
expected = DataFrame(
|
| 329 |
+
[[1, 0, 1, 0, 0, 0, 2], [2, 0, 1, 1, 0, 1, 5], [3, 0, 2, 1, 0, 0, 7]],
|
| 330 |
+
columns=m,
|
| 331 |
+
)
|
| 332 |
+
expected.index = Index(["bar", "foo", "All"], name="a")
|
| 333 |
+
tm.assert_frame_equal(actual, expected)
|
| 334 |
+
|
| 335 |
+
actual = crosstab(
|
| 336 |
+
[a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=False
|
| 337 |
+
)
|
| 338 |
+
m = MultiIndex.from_arrays(
|
| 339 |
+
[
|
| 340 |
+
["bar", "bar", "bar", "foo", "foo", "foo", "All"],
|
| 341 |
+
["one", "two", np.nan, "one", "two", np.nan, ""],
|
| 342 |
+
],
|
| 343 |
+
names=["a", "b"],
|
| 344 |
+
)
|
| 345 |
+
expected = DataFrame(
|
| 346 |
+
[
|
| 347 |
+
[1, 0, 1.0],
|
| 348 |
+
[1, 0, 1.0],
|
| 349 |
+
[0, 0, np.nan],
|
| 350 |
+
[2, 0, 2.0],
|
| 351 |
+
[1, 1, 2.0],
|
| 352 |
+
[0, 1, np.nan],
|
| 353 |
+
[5, 2, 7.0],
|
| 354 |
+
],
|
| 355 |
+
index=m,
|
| 356 |
+
)
|
| 357 |
+
expected.columns = Index(["dull", "shiny", "All"], name="c")
|
| 358 |
+
tm.assert_frame_equal(actual, expected)
|
| 359 |
+
|
| 360 |
+
actual = crosstab(
|
| 361 |
+
[a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=True
|
| 362 |
+
)
|
| 363 |
+
m = MultiIndex.from_arrays(
|
| 364 |
+
[["bar", "bar", "foo", "foo", "All"], ["one", "two", "one", "two", ""]],
|
| 365 |
+
names=["a", "b"],
|
| 366 |
+
)
|
| 367 |
+
expected = DataFrame(
|
| 368 |
+
[[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 1, 6]], index=m
|
| 369 |
+
)
|
| 370 |
+
expected.columns = Index(["dull", "shiny", "All"], name="c")
|
| 371 |
+
tm.assert_frame_equal(actual, expected)
|
| 372 |
+
|
| 373 |
+
def test_crosstab_normalize(self):
|
| 374 |
+
# Issue 12578
|
| 375 |
+
df = DataFrame(
|
| 376 |
+
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
rindex = Index([1, 2], name="a")
|
| 380 |
+
cindex = Index([3, 4], name="b")
|
| 381 |
+
full_normal = DataFrame([[0.2, 0], [0.2, 0.6]], index=rindex, columns=cindex)
|
| 382 |
+
row_normal = DataFrame([[1.0, 0], [0.25, 0.75]], index=rindex, columns=cindex)
|
| 383 |
+
col_normal = DataFrame([[0.5, 0], [0.5, 1.0]], index=rindex, columns=cindex)
|
| 384 |
+
|
| 385 |
+
# Check all normalize args
|
| 386 |
+
tm.assert_frame_equal(crosstab(df.a, df.b, normalize="all"), full_normal)
|
| 387 |
+
tm.assert_frame_equal(crosstab(df.a, df.b, normalize=True), full_normal)
|
| 388 |
+
tm.assert_frame_equal(crosstab(df.a, df.b, normalize="index"), row_normal)
|
| 389 |
+
tm.assert_frame_equal(crosstab(df.a, df.b, normalize="columns"), col_normal)
|
| 390 |
+
tm.assert_frame_equal(
|
| 391 |
+
crosstab(df.a, df.b, normalize=1),
|
| 392 |
+
crosstab(df.a, df.b, normalize="columns"),
|
| 393 |
+
)
|
| 394 |
+
tm.assert_frame_equal(
|
| 395 |
+
crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index")
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
row_normal_margins = DataFrame(
|
| 399 |
+
[[1.0, 0], [0.25, 0.75], [0.4, 0.6]],
|
| 400 |
+
index=Index([1, 2, "All"], name="a", dtype="object"),
|
| 401 |
+
columns=Index([3, 4], name="b", dtype="object"),
|
| 402 |
+
)
|
| 403 |
+
col_normal_margins = DataFrame(
|
| 404 |
+
[[0.5, 0, 0.2], [0.5, 1.0, 0.8]],
|
| 405 |
+
index=Index([1, 2], name="a", dtype="object"),
|
| 406 |
+
columns=Index([3, 4, "All"], name="b", dtype="object"),
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
all_normal_margins = DataFrame(
|
| 410 |
+
[[0.2, 0, 0.2], [0.2, 0.6, 0.8], [0.4, 0.6, 1]],
|
| 411 |
+
index=Index([1, 2, "All"], name="a", dtype="object"),
|
| 412 |
+
columns=Index([3, 4, "All"], name="b", dtype="object"),
|
| 413 |
+
)
|
| 414 |
+
tm.assert_frame_equal(
|
| 415 |
+
crosstab(df.a, df.b, normalize="index", margins=True), row_normal_margins
|
| 416 |
+
)
|
| 417 |
+
tm.assert_frame_equal(
|
| 418 |
+
crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins
|
| 419 |
+
)
|
| 420 |
+
tm.assert_frame_equal(
|
| 421 |
+
crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
def test_crosstab_normalize_arrays(self):
|
| 425 |
+
# GH#12578
|
| 426 |
+
df = DataFrame(
|
| 427 |
+
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
# Test arrays
|
| 431 |
+
crosstab(
|
| 432 |
+
[np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])], np.array([1, 2, 1, 2])
|
| 433 |
+
)
|
| 434 |
+
|
| 435 |
+
# Test with aggfunc
|
| 436 |
+
norm_counts = DataFrame(
|
| 437 |
+
[[0.25, 0, 0.25], [0.25, 0.5, 0.75], [0.5, 0.5, 1]],
|
| 438 |
+
index=Index([1, 2, "All"], name="a", dtype="object"),
|
| 439 |
+
columns=Index([3, 4, "All"], name="b"),
|
| 440 |
+
)
|
| 441 |
+
test_case = crosstab(
|
| 442 |
+
df.a, df.b, df.c, aggfunc="count", normalize="all", margins=True
|
| 443 |
+
)
|
| 444 |
+
tm.assert_frame_equal(test_case, norm_counts)
|
| 445 |
+
|
| 446 |
+
df = DataFrame(
|
| 447 |
+
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [0, 4, np.nan, 3, 3]}
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
norm_sum = DataFrame(
|
| 451 |
+
[[0, 0, 0.0], [0.4, 0.6, 1], [0.4, 0.6, 1]],
|
| 452 |
+
index=Index([1, 2, "All"], name="a", dtype="object"),
|
| 453 |
+
columns=Index([3, 4, "All"], name="b", dtype="object"),
|
| 454 |
+
)
|
| 455 |
+
msg = "using DataFrameGroupBy.sum"
|
| 456 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 457 |
+
test_case = crosstab(
|
| 458 |
+
df.a, df.b, df.c, aggfunc=np.sum, normalize="all", margins=True
|
| 459 |
+
)
|
| 460 |
+
tm.assert_frame_equal(test_case, norm_sum)
|
| 461 |
+
|
| 462 |
+
def test_crosstab_with_empties(self, using_array_manager):
|
| 463 |
+
# Check handling of empties
|
| 464 |
+
df = DataFrame(
|
| 465 |
+
{
|
| 466 |
+
"a": [1, 2, 2, 2, 2],
|
| 467 |
+
"b": [3, 3, 4, 4, 4],
|
| 468 |
+
"c": [np.nan, np.nan, np.nan, np.nan, np.nan],
|
| 469 |
+
}
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
empty = DataFrame(
|
| 473 |
+
[[0.0, 0.0], [0.0, 0.0]],
|
| 474 |
+
index=Index([1, 2], name="a", dtype="int64"),
|
| 475 |
+
columns=Index([3, 4], name="b"),
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
for i in [True, "index", "columns"]:
|
| 479 |
+
calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=i)
|
| 480 |
+
tm.assert_frame_equal(empty, calculated)
|
| 481 |
+
|
| 482 |
+
nans = DataFrame(
|
| 483 |
+
[[0.0, np.nan], [0.0, 0.0]],
|
| 484 |
+
index=Index([1, 2], name="a", dtype="int64"),
|
| 485 |
+
columns=Index([3, 4], name="b"),
|
| 486 |
+
)
|
| 487 |
+
if using_array_manager:
|
| 488 |
+
# INFO(ArrayManager) column without NaNs can preserve int dtype
|
| 489 |
+
nans[3] = nans[3].astype("int64")
|
| 490 |
+
|
| 491 |
+
calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=False)
|
| 492 |
+
tm.assert_frame_equal(nans, calculated)
|
| 493 |
+
|
| 494 |
+
def test_crosstab_errors(self):
|
| 495 |
+
# Issue 12578
|
| 496 |
+
|
| 497 |
+
df = DataFrame(
|
| 498 |
+
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
error = "values cannot be used without an aggfunc."
|
| 502 |
+
with pytest.raises(ValueError, match=error):
|
| 503 |
+
crosstab(df.a, df.b, values=df.c)
|
| 504 |
+
|
| 505 |
+
error = "aggfunc cannot be used without values"
|
| 506 |
+
with pytest.raises(ValueError, match=error):
|
| 507 |
+
crosstab(df.a, df.b, aggfunc=np.mean)
|
| 508 |
+
|
| 509 |
+
error = "Not a valid normalize argument"
|
| 510 |
+
with pytest.raises(ValueError, match=error):
|
| 511 |
+
crosstab(df.a, df.b, normalize="42")
|
| 512 |
+
|
| 513 |
+
with pytest.raises(ValueError, match=error):
|
| 514 |
+
crosstab(df.a, df.b, normalize=42)
|
| 515 |
+
|
| 516 |
+
error = "Not a valid margins argument"
|
| 517 |
+
with pytest.raises(ValueError, match=error):
|
| 518 |
+
crosstab(df.a, df.b, normalize="all", margins=42)
|
| 519 |
+
|
| 520 |
+
def test_crosstab_with_categorial_columns(self):
|
| 521 |
+
# GH 8860
|
| 522 |
+
df = DataFrame(
|
| 523 |
+
{
|
| 524 |
+
"MAKE": ["Honda", "Acura", "Tesla", "Honda", "Honda", "Acura"],
|
| 525 |
+
"MODEL": ["Sedan", "Sedan", "Electric", "Pickup", "Sedan", "Sedan"],
|
| 526 |
+
}
|
| 527 |
+
)
|
| 528 |
+
categories = ["Sedan", "Electric", "Pickup"]
|
| 529 |
+
df["MODEL"] = df["MODEL"].astype("category").cat.set_categories(categories)
|
| 530 |
+
result = crosstab(df["MAKE"], df["MODEL"])
|
| 531 |
+
|
| 532 |
+
expected_index = Index(["Acura", "Honda", "Tesla"], name="MAKE")
|
| 533 |
+
expected_columns = CategoricalIndex(
|
| 534 |
+
categories, categories=categories, ordered=False, name="MODEL"
|
| 535 |
+
)
|
| 536 |
+
expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]]
|
| 537 |
+
expected = DataFrame(
|
| 538 |
+
expected_data, index=expected_index, columns=expected_columns
|
| 539 |
+
)
|
| 540 |
+
tm.assert_frame_equal(result, expected)
|
| 541 |
+
|
| 542 |
+
def test_crosstab_with_numpy_size(self):
|
| 543 |
+
# GH 4003
|
| 544 |
+
df = DataFrame(
|
| 545 |
+
{
|
| 546 |
+
"A": ["one", "one", "two", "three"] * 6,
|
| 547 |
+
"B": ["A", "B", "C"] * 8,
|
| 548 |
+
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
|
| 549 |
+
"D": np.random.default_rng(2).standard_normal(24),
|
| 550 |
+
"E": np.random.default_rng(2).standard_normal(24),
|
| 551 |
+
}
|
| 552 |
+
)
|
| 553 |
+
result = crosstab(
|
| 554 |
+
index=[df["A"], df["B"]],
|
| 555 |
+
columns=[df["C"]],
|
| 556 |
+
margins=True,
|
| 557 |
+
aggfunc=np.size,
|
| 558 |
+
values=df["D"],
|
| 559 |
+
)
|
| 560 |
+
expected_index = MultiIndex(
|
| 561 |
+
levels=[["All", "one", "three", "two"], ["", "A", "B", "C"]],
|
| 562 |
+
codes=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0], [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]],
|
| 563 |
+
names=["A", "B"],
|
| 564 |
+
)
|
| 565 |
+
expected_column = Index(["bar", "foo", "All"], name="C")
|
| 566 |
+
expected_data = np.array(
|
| 567 |
+
[
|
| 568 |
+
[2.0, 2.0, 4.0],
|
| 569 |
+
[2.0, 2.0, 4.0],
|
| 570 |
+
[2.0, 2.0, 4.0],
|
| 571 |
+
[2.0, np.nan, 2.0],
|
| 572 |
+
[np.nan, 2.0, 2.0],
|
| 573 |
+
[2.0, np.nan, 2.0],
|
| 574 |
+
[np.nan, 2.0, 2.0],
|
| 575 |
+
[2.0, np.nan, 2.0],
|
| 576 |
+
[np.nan, 2.0, 2.0],
|
| 577 |
+
[12.0, 12.0, 24.0],
|
| 578 |
+
]
|
| 579 |
+
)
|
| 580 |
+
expected = DataFrame(
|
| 581 |
+
expected_data, index=expected_index, columns=expected_column
|
| 582 |
+
)
|
| 583 |
+
# aggfunc is np.size, resulting in integers
|
| 584 |
+
expected["All"] = expected["All"].astype("int64")
|
| 585 |
+
tm.assert_frame_equal(result, expected)
|
| 586 |
+
|
| 587 |
+
def test_crosstab_duplicate_names(self):
|
| 588 |
+
# GH 13279 / 22529
|
| 589 |
+
|
| 590 |
+
s1 = Series(range(3), name="foo")
|
| 591 |
+
s2_foo = Series(range(1, 4), name="foo")
|
| 592 |
+
s2_bar = Series(range(1, 4), name="bar")
|
| 593 |
+
s3 = Series(range(3), name="waldo")
|
| 594 |
+
|
| 595 |
+
# check result computed with duplicate labels against
|
| 596 |
+
# result computed with unique labels, then relabelled
|
| 597 |
+
mapper = {"bar": "foo"}
|
| 598 |
+
|
| 599 |
+
# duplicate row, column labels
|
| 600 |
+
result = crosstab(s1, s2_foo)
|
| 601 |
+
expected = crosstab(s1, s2_bar).rename_axis(columns=mapper, axis=1)
|
| 602 |
+
tm.assert_frame_equal(result, expected)
|
| 603 |
+
|
| 604 |
+
# duplicate row, unique column labels
|
| 605 |
+
result = crosstab([s1, s2_foo], s3)
|
| 606 |
+
expected = crosstab([s1, s2_bar], s3).rename_axis(index=mapper, axis=0)
|
| 607 |
+
tm.assert_frame_equal(result, expected)
|
| 608 |
+
|
| 609 |
+
# unique row, duplicate column labels
|
| 610 |
+
result = crosstab(s3, [s1, s2_foo])
|
| 611 |
+
expected = crosstab(s3, [s1, s2_bar]).rename_axis(columns=mapper, axis=1)
|
| 612 |
+
|
| 613 |
+
tm.assert_frame_equal(result, expected)
|
| 614 |
+
|
| 615 |
+
@pytest.mark.parametrize("names", [["a", ("b", "c")], [("a", "b"), "c"]])
|
| 616 |
+
def test_crosstab_tuple_name(self, names):
|
| 617 |
+
s1 = Series(range(3), name=names[0])
|
| 618 |
+
s2 = Series(range(1, 4), name=names[1])
|
| 619 |
+
|
| 620 |
+
mi = MultiIndex.from_arrays([range(3), range(1, 4)], names=names)
|
| 621 |
+
expected = Series(1, index=mi).unstack(1, fill_value=0)
|
| 622 |
+
|
| 623 |
+
result = crosstab(s1, s2)
|
| 624 |
+
tm.assert_frame_equal(result, expected)
|
| 625 |
+
|
| 626 |
+
def test_crosstab_both_tuple_names(self):
|
| 627 |
+
# GH 18321
|
| 628 |
+
s1 = Series(range(3), name=("a", "b"))
|
| 629 |
+
s2 = Series(range(3), name=("c", "d"))
|
| 630 |
+
|
| 631 |
+
expected = DataFrame(
|
| 632 |
+
np.eye(3, dtype="int64"),
|
| 633 |
+
index=Index(range(3), name=("a", "b")),
|
| 634 |
+
columns=Index(range(3), name=("c", "d")),
|
| 635 |
+
)
|
| 636 |
+
result = crosstab(s1, s2)
|
| 637 |
+
tm.assert_frame_equal(result, expected)
|
| 638 |
+
|
| 639 |
+
def test_crosstab_unsorted_order(self):
|
| 640 |
+
df = DataFrame({"b": [3, 1, 2], "a": [5, 4, 6]}, index=["C", "A", "B"])
|
| 641 |
+
result = crosstab(df.index, [df.b, df.a])
|
| 642 |
+
e_idx = Index(["A", "B", "C"], name="row_0")
|
| 643 |
+
e_columns = MultiIndex.from_tuples([(1, 4), (2, 6), (3, 5)], names=["b", "a"])
|
| 644 |
+
expected = DataFrame(
|
| 645 |
+
[[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns
|
| 646 |
+
)
|
| 647 |
+
tm.assert_frame_equal(result, expected)
|
| 648 |
+
|
| 649 |
+
def test_crosstab_normalize_multiple_columns(self):
|
| 650 |
+
# GH 15150
|
| 651 |
+
df = DataFrame(
|
| 652 |
+
{
|
| 653 |
+
"A": ["one", "one", "two", "three"] * 6,
|
| 654 |
+
"B": ["A", "B", "C"] * 8,
|
| 655 |
+
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
|
| 656 |
+
"D": [0] * 24,
|
| 657 |
+
"E": [0] * 24,
|
| 658 |
+
}
|
| 659 |
+
)
|
| 660 |
+
|
| 661 |
+
msg = "using DataFrameGroupBy.sum"
|
| 662 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 663 |
+
result = crosstab(
|
| 664 |
+
[df.A, df.B],
|
| 665 |
+
df.C,
|
| 666 |
+
values=df.D,
|
| 667 |
+
aggfunc=np.sum,
|
| 668 |
+
normalize=True,
|
| 669 |
+
margins=True,
|
| 670 |
+
)
|
| 671 |
+
expected = DataFrame(
|
| 672 |
+
np.array([0] * 29 + [1], dtype=float).reshape(10, 3),
|
| 673 |
+
columns=Index(["bar", "foo", "All"], name="C"),
|
| 674 |
+
index=MultiIndex.from_tuples(
|
| 675 |
+
[
|
| 676 |
+
("one", "A"),
|
| 677 |
+
("one", "B"),
|
| 678 |
+
("one", "C"),
|
| 679 |
+
("three", "A"),
|
| 680 |
+
("three", "B"),
|
| 681 |
+
("three", "C"),
|
| 682 |
+
("two", "A"),
|
| 683 |
+
("two", "B"),
|
| 684 |
+
("two", "C"),
|
| 685 |
+
("All", ""),
|
| 686 |
+
],
|
| 687 |
+
names=["A", "B"],
|
| 688 |
+
),
|
| 689 |
+
)
|
| 690 |
+
tm.assert_frame_equal(result, expected)
|
| 691 |
+
|
| 692 |
+
def test_margin_normalize(self):
|
| 693 |
+
# GH 27500
|
| 694 |
+
df = DataFrame(
|
| 695 |
+
{
|
| 696 |
+
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
|
| 697 |
+
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
|
| 698 |
+
"C": [
|
| 699 |
+
"small",
|
| 700 |
+
"large",
|
| 701 |
+
"large",
|
| 702 |
+
"small",
|
| 703 |
+
"small",
|
| 704 |
+
"large",
|
| 705 |
+
"small",
|
| 706 |
+
"small",
|
| 707 |
+
"large",
|
| 708 |
+
],
|
| 709 |
+
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
|
| 710 |
+
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
|
| 711 |
+
}
|
| 712 |
+
)
|
| 713 |
+
# normalize on index
|
| 714 |
+
result = crosstab(
|
| 715 |
+
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0
|
| 716 |
+
)
|
| 717 |
+
expected = DataFrame(
|
| 718 |
+
[[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]]
|
| 719 |
+
)
|
| 720 |
+
expected.index = MultiIndex(
|
| 721 |
+
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
|
| 722 |
+
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
|
| 723 |
+
names=["A", "B"],
|
| 724 |
+
)
|
| 725 |
+
expected.columns = Index(["large", "small"], name="C")
|
| 726 |
+
tm.assert_frame_equal(result, expected)
|
| 727 |
+
|
| 728 |
+
# normalize on columns
|
| 729 |
+
result = crosstab(
|
| 730 |
+
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1
|
| 731 |
+
)
|
| 732 |
+
expected = DataFrame(
|
| 733 |
+
[
|
| 734 |
+
[0.25, 0.2, 0.222222],
|
| 735 |
+
[0.25, 0.2, 0.222222],
|
| 736 |
+
[0.5, 0.2, 0.333333],
|
| 737 |
+
[0, 0.4, 0.222222],
|
| 738 |
+
]
|
| 739 |
+
)
|
| 740 |
+
expected.columns = Index(["large", "small", "Sub-Total"], name="C")
|
| 741 |
+
expected.index = MultiIndex(
|
| 742 |
+
levels=[["bar", "foo"], ["one", "two"]],
|
| 743 |
+
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
|
| 744 |
+
names=["A", "B"],
|
| 745 |
+
)
|
| 746 |
+
tm.assert_frame_equal(result, expected)
|
| 747 |
+
|
| 748 |
+
# normalize on both index and column
|
| 749 |
+
result = crosstab(
|
| 750 |
+
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True
|
| 751 |
+
)
|
| 752 |
+
expected = DataFrame(
|
| 753 |
+
[
|
| 754 |
+
[0.111111, 0.111111, 0.222222],
|
| 755 |
+
[0.111111, 0.111111, 0.222222],
|
| 756 |
+
[0.222222, 0.111111, 0.333333],
|
| 757 |
+
[0.000000, 0.222222, 0.222222],
|
| 758 |
+
[0.444444, 0.555555, 1],
|
| 759 |
+
]
|
| 760 |
+
)
|
| 761 |
+
expected.columns = Index(["large", "small", "Sub-Total"], name="C")
|
| 762 |
+
expected.index = MultiIndex(
|
| 763 |
+
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
|
| 764 |
+
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
|
| 765 |
+
names=["A", "B"],
|
| 766 |
+
)
|
| 767 |
+
tm.assert_frame_equal(result, expected)
|
| 768 |
+
|
| 769 |
+
def test_margin_normalize_multiple_columns(self):
|
| 770 |
+
# GH 35144
|
| 771 |
+
# use multiple columns with margins and normalization
|
| 772 |
+
df = DataFrame(
|
| 773 |
+
{
|
| 774 |
+
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
|
| 775 |
+
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
|
| 776 |
+
"C": [
|
| 777 |
+
"small",
|
| 778 |
+
"large",
|
| 779 |
+
"large",
|
| 780 |
+
"small",
|
| 781 |
+
"small",
|
| 782 |
+
"large",
|
| 783 |
+
"small",
|
| 784 |
+
"small",
|
| 785 |
+
"large",
|
| 786 |
+
],
|
| 787 |
+
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
|
| 788 |
+
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
|
| 789 |
+
}
|
| 790 |
+
)
|
| 791 |
+
result = crosstab(
|
| 792 |
+
index=df.C,
|
| 793 |
+
columns=[df.A, df.B],
|
| 794 |
+
margins=True,
|
| 795 |
+
margins_name="margin",
|
| 796 |
+
normalize=True,
|
| 797 |
+
)
|
| 798 |
+
expected = DataFrame(
|
| 799 |
+
[
|
| 800 |
+
[0.111111, 0.111111, 0.222222, 0.000000, 0.444444],
|
| 801 |
+
[0.111111, 0.111111, 0.111111, 0.222222, 0.555556],
|
| 802 |
+
[0.222222, 0.222222, 0.333333, 0.222222, 1.0],
|
| 803 |
+
],
|
| 804 |
+
index=["large", "small", "margin"],
|
| 805 |
+
)
|
| 806 |
+
expected.columns = MultiIndex(
|
| 807 |
+
levels=[["bar", "foo", "margin"], ["", "one", "two"]],
|
| 808 |
+
codes=[[0, 0, 1, 1, 2], [1, 2, 1, 2, 0]],
|
| 809 |
+
names=["A", "B"],
|
| 810 |
+
)
|
| 811 |
+
expected.index.name = "C"
|
| 812 |
+
tm.assert_frame_equal(result, expected)
|
| 813 |
+
|
| 814 |
+
def test_margin_support_Float(self):
|
| 815 |
+
# GH 50313
|
| 816 |
+
# use Float64 formats and function aggfunc with margins
|
| 817 |
+
df = DataFrame(
|
| 818 |
+
{"A": [1, 2, 2, 1], "B": [3, 3, 4, 5], "C": [-1.0, 10.0, 1.0, 10.0]},
|
| 819 |
+
dtype="Float64",
|
| 820 |
+
)
|
| 821 |
+
result = crosstab(
|
| 822 |
+
df["A"],
|
| 823 |
+
df["B"],
|
| 824 |
+
values=df["C"],
|
| 825 |
+
aggfunc="sum",
|
| 826 |
+
margins=True,
|
| 827 |
+
)
|
| 828 |
+
expected = DataFrame(
|
| 829 |
+
[
|
| 830 |
+
[-1.0, pd.NA, 10.0, 9.0],
|
| 831 |
+
[10.0, 1.0, pd.NA, 11.0],
|
| 832 |
+
[9.0, 1.0, 10.0, 20.0],
|
| 833 |
+
],
|
| 834 |
+
index=Index([1.0, 2.0, "All"], dtype="object", name="A"),
|
| 835 |
+
columns=Index([3.0, 4.0, 5.0, "All"], dtype="object", name="B"),
|
| 836 |
+
dtype="Float64",
|
| 837 |
+
)
|
| 838 |
+
tm.assert_frame_equal(result, expected)
|
| 839 |
+
|
| 840 |
+
def test_margin_with_ordered_categorical_column(self):
|
| 841 |
+
# GH 25278
|
| 842 |
+
df = DataFrame(
|
| 843 |
+
{
|
| 844 |
+
"First": ["B", "B", "C", "A", "B", "C"],
|
| 845 |
+
"Second": ["C", "B", "B", "B", "C", "A"],
|
| 846 |
+
}
|
| 847 |
+
)
|
| 848 |
+
df["First"] = df["First"].astype(CategoricalDtype(ordered=True))
|
| 849 |
+
customized_categories_order = ["C", "A", "B"]
|
| 850 |
+
df["First"] = df["First"].cat.reorder_categories(customized_categories_order)
|
| 851 |
+
result = crosstab(df["First"], df["Second"], margins=True)
|
| 852 |
+
|
| 853 |
+
expected_index = Index(["C", "A", "B", "All"], name="First")
|
| 854 |
+
expected_columns = Index(["A", "B", "C", "All"], name="Second")
|
| 855 |
+
expected_data = [[1, 1, 0, 2], [0, 1, 0, 1], [0, 1, 2, 3], [1, 3, 2, 6]]
|
| 856 |
+
expected = DataFrame(
|
| 857 |
+
expected_data, index=expected_index, columns=expected_columns
|
| 858 |
+
)
|
| 859 |
+
tm.assert_frame_equal(result, expected)
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
@pytest.mark.parametrize("a_dtype", ["category", "int64"])
|
| 863 |
+
@pytest.mark.parametrize("b_dtype", ["category", "int64"])
|
| 864 |
+
def test_categoricals(a_dtype, b_dtype):
|
| 865 |
+
# https://github.com/pandas-dev/pandas/issues/37465
|
| 866 |
+
g = np.random.default_rng(2)
|
| 867 |
+
a = Series(g.integers(0, 3, size=100)).astype(a_dtype)
|
| 868 |
+
b = Series(g.integers(0, 2, size=100)).astype(b_dtype)
|
| 869 |
+
result = crosstab(a, b, margins=True, dropna=False)
|
| 870 |
+
columns = Index([0, 1, "All"], dtype="object", name="col_0")
|
| 871 |
+
index = Index([0, 1, 2, "All"], dtype="object", name="row_0")
|
| 872 |
+
values = [[10, 18, 28], [23, 16, 39], [17, 16, 33], [50, 50, 100]]
|
| 873 |
+
expected = DataFrame(values, index, columns)
|
| 874 |
+
tm.assert_frame_equal(result, expected)
|
| 875 |
+
|
| 876 |
+
# Verify when categorical does not have all values present
|
| 877 |
+
a.loc[a == 1] = 2
|
| 878 |
+
a_is_cat = isinstance(a.dtype, CategoricalDtype)
|
| 879 |
+
assert not a_is_cat or a.value_counts().loc[1] == 0
|
| 880 |
+
result = crosstab(a, b, margins=True, dropna=False)
|
| 881 |
+
values = [[10, 18, 28], [0, 0, 0], [40, 32, 72], [50, 50, 100]]
|
| 882 |
+
expected = DataFrame(values, index, columns)
|
| 883 |
+
if not a_is_cat:
|
| 884 |
+
expected = expected.loc[[0, 2, "All"]]
|
| 885 |
+
expected["All"] = expected["All"].astype("int64")
|
| 886 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_cut.py
ADDED
|
@@ -0,0 +1,791 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import (
|
| 6 |
+
Categorical,
|
| 7 |
+
DataFrame,
|
| 8 |
+
DatetimeIndex,
|
| 9 |
+
Index,
|
| 10 |
+
Interval,
|
| 11 |
+
IntervalIndex,
|
| 12 |
+
Series,
|
| 13 |
+
TimedeltaIndex,
|
| 14 |
+
Timestamp,
|
| 15 |
+
cut,
|
| 16 |
+
date_range,
|
| 17 |
+
interval_range,
|
| 18 |
+
isna,
|
| 19 |
+
qcut,
|
| 20 |
+
timedelta_range,
|
| 21 |
+
to_datetime,
|
| 22 |
+
)
|
| 23 |
+
import pandas._testing as tm
|
| 24 |
+
from pandas.api.types import CategoricalDtype
|
| 25 |
+
import pandas.core.reshape.tile as tmod
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def test_simple():
|
| 29 |
+
data = np.ones(5, dtype="int64")
|
| 30 |
+
result = cut(data, 4, labels=False)
|
| 31 |
+
|
| 32 |
+
expected = np.array([1, 1, 1, 1, 1])
|
| 33 |
+
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@pytest.mark.parametrize("func", [list, np.array])
|
| 37 |
+
def test_bins(func):
|
| 38 |
+
data = func([0.2, 1.4, 2.5, 6.2, 9.7, 2.1])
|
| 39 |
+
result, bins = cut(data, 3, retbins=True)
|
| 40 |
+
|
| 41 |
+
intervals = IntervalIndex.from_breaks(bins.round(3))
|
| 42 |
+
intervals = intervals.take([0, 0, 0, 1, 2, 0])
|
| 43 |
+
expected = Categorical(intervals, ordered=True)
|
| 44 |
+
|
| 45 |
+
tm.assert_categorical_equal(result, expected)
|
| 46 |
+
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, 6.53333333, 9.7]))
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def test_right():
|
| 50 |
+
data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
|
| 51 |
+
result, bins = cut(data, 4, right=True, retbins=True)
|
| 52 |
+
|
| 53 |
+
intervals = IntervalIndex.from_breaks(bins.round(3))
|
| 54 |
+
expected = Categorical(intervals, ordered=True)
|
| 55 |
+
expected = expected.take([0, 0, 0, 2, 3, 0, 0])
|
| 56 |
+
|
| 57 |
+
tm.assert_categorical_equal(result, expected)
|
| 58 |
+
tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7]))
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def test_no_right():
|
| 62 |
+
data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
|
| 63 |
+
result, bins = cut(data, 4, right=False, retbins=True)
|
| 64 |
+
|
| 65 |
+
intervals = IntervalIndex.from_breaks(bins.round(3), closed="left")
|
| 66 |
+
intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])
|
| 67 |
+
expected = Categorical(intervals, ordered=True)
|
| 68 |
+
|
| 69 |
+
tm.assert_categorical_equal(result, expected)
|
| 70 |
+
tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095]))
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def test_bins_from_interval_index():
|
| 74 |
+
c = cut(range(5), 3)
|
| 75 |
+
expected = c
|
| 76 |
+
result = cut(range(5), bins=expected.categories)
|
| 77 |
+
tm.assert_categorical_equal(result, expected)
|
| 78 |
+
|
| 79 |
+
expected = Categorical.from_codes(
|
| 80 |
+
np.append(c.codes, -1), categories=c.categories, ordered=True
|
| 81 |
+
)
|
| 82 |
+
result = cut(range(6), bins=expected.categories)
|
| 83 |
+
tm.assert_categorical_equal(result, expected)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def test_bins_from_interval_index_doc_example():
|
| 87 |
+
# Make sure we preserve the bins.
|
| 88 |
+
ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])
|
| 89 |
+
c = cut(ages, bins=[0, 18, 35, 70])
|
| 90 |
+
expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])
|
| 91 |
+
tm.assert_index_equal(c.categories, expected)
|
| 92 |
+
|
| 93 |
+
result = cut([25, 20, 50], bins=c.categories)
|
| 94 |
+
tm.assert_index_equal(result.categories, expected)
|
| 95 |
+
tm.assert_numpy_array_equal(result.codes, np.array([1, 1, 2], dtype="int8"))
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def test_bins_not_overlapping_from_interval_index():
|
| 99 |
+
# see gh-23980
|
| 100 |
+
msg = "Overlapping IntervalIndex is not accepted"
|
| 101 |
+
ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)])
|
| 102 |
+
|
| 103 |
+
with pytest.raises(ValueError, match=msg):
|
| 104 |
+
cut([5, 6], bins=ii)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def test_bins_not_monotonic():
|
| 108 |
+
msg = "bins must increase monotonically"
|
| 109 |
+
data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1]
|
| 110 |
+
|
| 111 |
+
with pytest.raises(ValueError, match=msg):
|
| 112 |
+
cut(data, [0.1, 1.5, 1, 10])
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@pytest.mark.parametrize(
|
| 116 |
+
"x, bins, expected",
|
| 117 |
+
[
|
| 118 |
+
(
|
| 119 |
+
date_range("2017-12-31", periods=3),
|
| 120 |
+
[Timestamp.min, Timestamp("2018-01-01"), Timestamp.max],
|
| 121 |
+
IntervalIndex.from_tuples(
|
| 122 |
+
[
|
| 123 |
+
(Timestamp.min, Timestamp("2018-01-01")),
|
| 124 |
+
(Timestamp("2018-01-01"), Timestamp.max),
|
| 125 |
+
]
|
| 126 |
+
),
|
| 127 |
+
),
|
| 128 |
+
(
|
| 129 |
+
[-1, 0, 1],
|
| 130 |
+
np.array(
|
| 131 |
+
[np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max], dtype="int64"
|
| 132 |
+
),
|
| 133 |
+
IntervalIndex.from_tuples(
|
| 134 |
+
[(np.iinfo(np.int64).min, 0), (0, np.iinfo(np.int64).max)]
|
| 135 |
+
),
|
| 136 |
+
),
|
| 137 |
+
(
|
| 138 |
+
[
|
| 139 |
+
np.timedelta64(-1, "ns"),
|
| 140 |
+
np.timedelta64(0, "ns"),
|
| 141 |
+
np.timedelta64(1, "ns"),
|
| 142 |
+
],
|
| 143 |
+
np.array(
|
| 144 |
+
[
|
| 145 |
+
np.timedelta64(-np.iinfo(np.int64).max, "ns"),
|
| 146 |
+
np.timedelta64(0, "ns"),
|
| 147 |
+
np.timedelta64(np.iinfo(np.int64).max, "ns"),
|
| 148 |
+
]
|
| 149 |
+
),
|
| 150 |
+
IntervalIndex.from_tuples(
|
| 151 |
+
[
|
| 152 |
+
(
|
| 153 |
+
np.timedelta64(-np.iinfo(np.int64).max, "ns"),
|
| 154 |
+
np.timedelta64(0, "ns"),
|
| 155 |
+
),
|
| 156 |
+
(
|
| 157 |
+
np.timedelta64(0, "ns"),
|
| 158 |
+
np.timedelta64(np.iinfo(np.int64).max, "ns"),
|
| 159 |
+
),
|
| 160 |
+
]
|
| 161 |
+
),
|
| 162 |
+
),
|
| 163 |
+
],
|
| 164 |
+
)
|
| 165 |
+
def test_bins_monotonic_not_overflowing(x, bins, expected):
|
| 166 |
+
# GH 26045
|
| 167 |
+
result = cut(x, bins)
|
| 168 |
+
tm.assert_index_equal(result.categories, expected)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def test_wrong_num_labels():
|
| 172 |
+
msg = "Bin labels must be one fewer than the number of bin edges"
|
| 173 |
+
data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1]
|
| 174 |
+
|
| 175 |
+
with pytest.raises(ValueError, match=msg):
|
| 176 |
+
cut(data, [0, 1, 10], labels=["foo", "bar", "baz"])
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
@pytest.mark.parametrize(
|
| 180 |
+
"x,bins,msg",
|
| 181 |
+
[
|
| 182 |
+
([], 2, "Cannot cut empty array"),
|
| 183 |
+
([1, 2, 3], 0.5, "`bins` should be a positive integer"),
|
| 184 |
+
],
|
| 185 |
+
)
|
| 186 |
+
def test_cut_corner(x, bins, msg):
|
| 187 |
+
with pytest.raises(ValueError, match=msg):
|
| 188 |
+
cut(x, bins)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
@pytest.mark.parametrize("arg", [2, np.eye(2), DataFrame(np.eye(2))])
|
| 192 |
+
@pytest.mark.parametrize("cut_func", [cut, qcut])
|
| 193 |
+
def test_cut_not_1d_arg(arg, cut_func):
|
| 194 |
+
msg = "Input array must be 1 dimensional"
|
| 195 |
+
with pytest.raises(ValueError, match=msg):
|
| 196 |
+
cut_func(arg, 2)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@pytest.mark.parametrize(
|
| 200 |
+
"data",
|
| 201 |
+
[
|
| 202 |
+
[0, 1, 2, 3, 4, np.inf],
|
| 203 |
+
[-np.inf, 0, 1, 2, 3, 4],
|
| 204 |
+
[-np.inf, 0, 1, 2, 3, 4, np.inf],
|
| 205 |
+
],
|
| 206 |
+
)
|
| 207 |
+
def test_int_bins_with_inf(data):
|
| 208 |
+
# GH 24314
|
| 209 |
+
msg = "cannot specify integer `bins` when input data contains infinity"
|
| 210 |
+
with pytest.raises(ValueError, match=msg):
|
| 211 |
+
cut(data, bins=3)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def test_cut_out_of_range_more():
|
| 215 |
+
# see gh-1511
|
| 216 |
+
name = "x"
|
| 217 |
+
|
| 218 |
+
ser = Series([0, -1, 0, 1, -3], name=name)
|
| 219 |
+
ind = cut(ser, [0, 1], labels=False)
|
| 220 |
+
|
| 221 |
+
exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name=name)
|
| 222 |
+
tm.assert_series_equal(ind, exp)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@pytest.mark.parametrize(
|
| 226 |
+
"right,breaks,closed",
|
| 227 |
+
[
|
| 228 |
+
(True, [-1e-3, 0.25, 0.5, 0.75, 1], "right"),
|
| 229 |
+
(False, [0, 0.25, 0.5, 0.75, 1 + 1e-3], "left"),
|
| 230 |
+
],
|
| 231 |
+
)
|
| 232 |
+
def test_labels(right, breaks, closed):
|
| 233 |
+
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
|
| 234 |
+
|
| 235 |
+
result, bins = cut(arr, 4, retbins=True, right=right)
|
| 236 |
+
ex_levels = IntervalIndex.from_breaks(breaks, closed=closed)
|
| 237 |
+
tm.assert_index_equal(result.categories, ex_levels)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def test_cut_pass_series_name_to_factor():
|
| 241 |
+
name = "foo"
|
| 242 |
+
ser = Series(np.random.default_rng(2).standard_normal(100), name=name)
|
| 243 |
+
|
| 244 |
+
factor = cut(ser, 4)
|
| 245 |
+
assert factor.name == name
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def test_label_precision():
|
| 249 |
+
arr = np.arange(0, 0.73, 0.01)
|
| 250 |
+
result = cut(arr, 4, precision=2)
|
| 251 |
+
|
| 252 |
+
ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72])
|
| 253 |
+
tm.assert_index_equal(result.categories, ex_levels)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
@pytest.mark.parametrize("labels", [None, False])
|
| 257 |
+
def test_na_handling(labels):
|
| 258 |
+
arr = np.arange(0, 0.75, 0.01)
|
| 259 |
+
arr[::3] = np.nan
|
| 260 |
+
|
| 261 |
+
result = cut(arr, 4, labels=labels)
|
| 262 |
+
result = np.asarray(result)
|
| 263 |
+
|
| 264 |
+
expected = np.where(isna(arr), np.nan, result)
|
| 265 |
+
tm.assert_almost_equal(result, expected)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def test_inf_handling():
|
| 269 |
+
data = np.arange(6)
|
| 270 |
+
data_ser = Series(data, dtype="int64")
|
| 271 |
+
|
| 272 |
+
bins = [-np.inf, 2, 4, np.inf]
|
| 273 |
+
result = cut(data, bins)
|
| 274 |
+
result_ser = cut(data_ser, bins)
|
| 275 |
+
|
| 276 |
+
ex_uniques = IntervalIndex.from_breaks(bins)
|
| 277 |
+
tm.assert_index_equal(result.categories, ex_uniques)
|
| 278 |
+
|
| 279 |
+
assert result[5] == Interval(4, np.inf)
|
| 280 |
+
assert result[0] == Interval(-np.inf, 2)
|
| 281 |
+
assert result_ser[5] == Interval(4, np.inf)
|
| 282 |
+
assert result_ser[0] == Interval(-np.inf, 2)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def test_cut_out_of_bounds():
|
| 286 |
+
arr = np.random.default_rng(2).standard_normal(100)
|
| 287 |
+
result = cut(arr, [-1, 0, 1])
|
| 288 |
+
|
| 289 |
+
mask = isna(result)
|
| 290 |
+
ex_mask = (arr < -1) | (arr > 1)
|
| 291 |
+
tm.assert_numpy_array_equal(mask, ex_mask)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
@pytest.mark.parametrize(
|
| 295 |
+
"get_labels,get_expected",
|
| 296 |
+
[
|
| 297 |
+
(
|
| 298 |
+
lambda labels: labels,
|
| 299 |
+
lambda labels: Categorical(
|
| 300 |
+
["Medium"] + 4 * ["Small"] + ["Medium", "Large"],
|
| 301 |
+
categories=labels,
|
| 302 |
+
ordered=True,
|
| 303 |
+
),
|
| 304 |
+
),
|
| 305 |
+
(
|
| 306 |
+
lambda labels: Categorical.from_codes([0, 1, 2], labels),
|
| 307 |
+
lambda labels: Categorical.from_codes([1] + 4 * [0] + [1, 2], labels),
|
| 308 |
+
),
|
| 309 |
+
],
|
| 310 |
+
)
|
| 311 |
+
def test_cut_pass_labels(get_labels, get_expected):
|
| 312 |
+
bins = [0, 25, 50, 100]
|
| 313 |
+
arr = [50, 5, 10, 15, 20, 30, 70]
|
| 314 |
+
labels = ["Small", "Medium", "Large"]
|
| 315 |
+
|
| 316 |
+
result = cut(arr, bins, labels=get_labels(labels))
|
| 317 |
+
tm.assert_categorical_equal(result, get_expected(labels))
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def test_cut_pass_labels_compat():
|
| 321 |
+
# see gh-16459
|
| 322 |
+
arr = [50, 5, 10, 15, 20, 30, 70]
|
| 323 |
+
labels = ["Good", "Medium", "Bad"]
|
| 324 |
+
|
| 325 |
+
result = cut(arr, 3, labels=labels)
|
| 326 |
+
exp = cut(arr, 3, labels=Categorical(labels, categories=labels, ordered=True))
|
| 327 |
+
tm.assert_categorical_equal(result, exp)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
@pytest.mark.parametrize("x", [np.arange(11.0), np.arange(11.0) / 1e10])
|
| 331 |
+
def test_round_frac_just_works(x):
|
| 332 |
+
# It works.
|
| 333 |
+
cut(x, 2)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
@pytest.mark.parametrize(
|
| 337 |
+
"val,precision,expected",
|
| 338 |
+
[
|
| 339 |
+
(-117.9998, 3, -118),
|
| 340 |
+
(117.9998, 3, 118),
|
| 341 |
+
(117.9998, 2, 118),
|
| 342 |
+
(0.000123456, 2, 0.00012),
|
| 343 |
+
],
|
| 344 |
+
)
|
| 345 |
+
def test_round_frac(val, precision, expected):
|
| 346 |
+
# see gh-1979
|
| 347 |
+
result = tmod._round_frac(val, precision=precision)
|
| 348 |
+
assert result == expected
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def test_cut_return_intervals():
|
| 352 |
+
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
|
| 353 |
+
result = cut(ser, 3)
|
| 354 |
+
|
| 355 |
+
exp_bins = np.linspace(0, 8, num=4).round(3)
|
| 356 |
+
exp_bins[0] -= 0.008
|
| 357 |
+
|
| 358 |
+
expected = Series(
|
| 359 |
+
IntervalIndex.from_breaks(exp_bins, closed="right").take(
|
| 360 |
+
[0, 0, 0, 1, 1, 1, 2, 2, 2]
|
| 361 |
+
)
|
| 362 |
+
).astype(CategoricalDtype(ordered=True))
|
| 363 |
+
tm.assert_series_equal(result, expected)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def test_series_ret_bins():
|
| 367 |
+
# see gh-8589
|
| 368 |
+
ser = Series(np.arange(4))
|
| 369 |
+
result, bins = cut(ser, 2, retbins=True)
|
| 370 |
+
|
| 371 |
+
expected = Series(
|
| 372 |
+
IntervalIndex.from_breaks([-0.003, 1.5, 3], closed="right").repeat(2)
|
| 373 |
+
).astype(CategoricalDtype(ordered=True))
|
| 374 |
+
tm.assert_series_equal(result, expected)
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
@pytest.mark.parametrize(
|
| 378 |
+
"kwargs,msg",
|
| 379 |
+
[
|
| 380 |
+
({"duplicates": "drop"}, None),
|
| 381 |
+
({}, "Bin edges must be unique"),
|
| 382 |
+
({"duplicates": "raise"}, "Bin edges must be unique"),
|
| 383 |
+
({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"),
|
| 384 |
+
],
|
| 385 |
+
)
|
| 386 |
+
def test_cut_duplicates_bin(kwargs, msg):
|
| 387 |
+
# see gh-20947
|
| 388 |
+
bins = [0, 2, 4, 6, 10, 10]
|
| 389 |
+
values = Series(np.array([1, 3, 5, 7, 9]), index=["a", "b", "c", "d", "e"])
|
| 390 |
+
|
| 391 |
+
if msg is not None:
|
| 392 |
+
with pytest.raises(ValueError, match=msg):
|
| 393 |
+
cut(values, bins, **kwargs)
|
| 394 |
+
else:
|
| 395 |
+
result = cut(values, bins, **kwargs)
|
| 396 |
+
expected = cut(values, pd.unique(np.asarray(bins)))
|
| 397 |
+
tm.assert_series_equal(result, expected)
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
@pytest.mark.parametrize("data", [9.0, -9.0, 0.0])
|
| 401 |
+
@pytest.mark.parametrize("length", [1, 2])
|
| 402 |
+
def test_single_bin(data, length):
|
| 403 |
+
# see gh-14652, gh-15428
|
| 404 |
+
ser = Series([data] * length)
|
| 405 |
+
result = cut(ser, 1, labels=False)
|
| 406 |
+
|
| 407 |
+
expected = Series([0] * length, dtype=np.intp)
|
| 408 |
+
tm.assert_series_equal(result, expected)
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
@pytest.mark.parametrize(
|
| 412 |
+
"array_1_writeable,array_2_writeable", [(True, True), (True, False), (False, False)]
|
| 413 |
+
)
|
| 414 |
+
def test_cut_read_only(array_1_writeable, array_2_writeable):
|
| 415 |
+
# issue 18773
|
| 416 |
+
array_1 = np.arange(0, 100, 10)
|
| 417 |
+
array_1.flags.writeable = array_1_writeable
|
| 418 |
+
|
| 419 |
+
array_2 = np.arange(0, 100, 10)
|
| 420 |
+
array_2.flags.writeable = array_2_writeable
|
| 421 |
+
|
| 422 |
+
hundred_elements = np.arange(100)
|
| 423 |
+
tm.assert_categorical_equal(
|
| 424 |
+
cut(hundred_elements, array_1), cut(hundred_elements, array_2)
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
@pytest.mark.parametrize(
|
| 429 |
+
"conv",
|
| 430 |
+
[
|
| 431 |
+
lambda v: Timestamp(v),
|
| 432 |
+
lambda v: to_datetime(v),
|
| 433 |
+
lambda v: np.datetime64(v),
|
| 434 |
+
lambda v: Timestamp(v).to_pydatetime(),
|
| 435 |
+
],
|
| 436 |
+
)
|
| 437 |
+
def test_datetime_bin(conv):
|
| 438 |
+
data = [np.datetime64("2012-12-13"), np.datetime64("2012-12-15")]
|
| 439 |
+
bin_data = ["2012-12-12", "2012-12-14", "2012-12-16"]
|
| 440 |
+
|
| 441 |
+
expected = Series(
|
| 442 |
+
IntervalIndex(
|
| 443 |
+
[
|
| 444 |
+
Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),
|
| 445 |
+
Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2])),
|
| 446 |
+
]
|
| 447 |
+
)
|
| 448 |
+
).astype(CategoricalDtype(ordered=True))
|
| 449 |
+
|
| 450 |
+
bins = [conv(v) for v in bin_data]
|
| 451 |
+
result = Series(cut(data, bins=bins))
|
| 452 |
+
tm.assert_series_equal(result, expected)
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
@pytest.mark.parametrize("box", [Series, Index, np.array, list])
|
| 456 |
+
def test_datetime_cut(unit, box):
|
| 457 |
+
# see gh-14714
|
| 458 |
+
#
|
| 459 |
+
# Testing time data when it comes in various collection types.
|
| 460 |
+
data = to_datetime(["2013-01-01", "2013-01-02", "2013-01-03"]).astype(f"M8[{unit}]")
|
| 461 |
+
data = box(data)
|
| 462 |
+
result, _ = cut(data, 3, retbins=True)
|
| 463 |
+
|
| 464 |
+
if box is list:
|
| 465 |
+
# We don't (yet) do inference on these, so get nanos
|
| 466 |
+
unit = "ns"
|
| 467 |
+
|
| 468 |
+
if unit == "s":
|
| 469 |
+
# See https://github.com/pandas-dev/pandas/pull/56101#discussion_r1405325425
|
| 470 |
+
# for why we round to 8 seconds instead of 7
|
| 471 |
+
left = DatetimeIndex(
|
| 472 |
+
["2012-12-31 23:57:08", "2013-01-01 16:00:00", "2013-01-02 08:00:00"],
|
| 473 |
+
dtype=f"M8[{unit}]",
|
| 474 |
+
)
|
| 475 |
+
else:
|
| 476 |
+
left = DatetimeIndex(
|
| 477 |
+
[
|
| 478 |
+
"2012-12-31 23:57:07.200000",
|
| 479 |
+
"2013-01-01 16:00:00",
|
| 480 |
+
"2013-01-02 08:00:00",
|
| 481 |
+
],
|
| 482 |
+
dtype=f"M8[{unit}]",
|
| 483 |
+
)
|
| 484 |
+
right = DatetimeIndex(
|
| 485 |
+
["2013-01-01 16:00:00", "2013-01-02 08:00:00", "2013-01-03 00:00:00"],
|
| 486 |
+
dtype=f"M8[{unit}]",
|
| 487 |
+
)
|
| 488 |
+
|
| 489 |
+
exp_intervals = IntervalIndex.from_arrays(left, right)
|
| 490 |
+
expected = Series(exp_intervals).astype(CategoricalDtype(ordered=True))
|
| 491 |
+
tm.assert_series_equal(Series(result), expected)
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
@pytest.mark.parametrize("box", [list, np.array, Index, Series])
|
| 495 |
+
def test_datetime_tz_cut_mismatched_tzawareness(box):
|
| 496 |
+
# GH#54964
|
| 497 |
+
bins = box(
|
| 498 |
+
[
|
| 499 |
+
Timestamp("2013-01-01 04:57:07.200000"),
|
| 500 |
+
Timestamp("2013-01-01 21:00:00"),
|
| 501 |
+
Timestamp("2013-01-02 13:00:00"),
|
| 502 |
+
Timestamp("2013-01-03 05:00:00"),
|
| 503 |
+
]
|
| 504 |
+
)
|
| 505 |
+
ser = Series(date_range("20130101", periods=3, tz="US/Eastern"))
|
| 506 |
+
|
| 507 |
+
msg = "Cannot use timezone-naive bins with timezone-aware values"
|
| 508 |
+
with pytest.raises(ValueError, match=msg):
|
| 509 |
+
cut(ser, bins)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
@pytest.mark.parametrize(
|
| 513 |
+
"bins",
|
| 514 |
+
[
|
| 515 |
+
3,
|
| 516 |
+
[
|
| 517 |
+
Timestamp("2013-01-01 04:57:07.200000", tz="UTC").tz_convert("US/Eastern"),
|
| 518 |
+
Timestamp("2013-01-01 21:00:00", tz="UTC").tz_convert("US/Eastern"),
|
| 519 |
+
Timestamp("2013-01-02 13:00:00", tz="UTC").tz_convert("US/Eastern"),
|
| 520 |
+
Timestamp("2013-01-03 05:00:00", tz="UTC").tz_convert("US/Eastern"),
|
| 521 |
+
],
|
| 522 |
+
],
|
| 523 |
+
)
|
| 524 |
+
@pytest.mark.parametrize("box", [list, np.array, Index, Series])
|
| 525 |
+
def test_datetime_tz_cut(bins, box):
|
| 526 |
+
# see gh-19872
|
| 527 |
+
tz = "US/Eastern"
|
| 528 |
+
ser = Series(date_range("20130101", periods=3, tz=tz))
|
| 529 |
+
|
| 530 |
+
if not isinstance(bins, int):
|
| 531 |
+
bins = box(bins)
|
| 532 |
+
|
| 533 |
+
result = cut(ser, bins)
|
| 534 |
+
expected = Series(
|
| 535 |
+
IntervalIndex(
|
| 536 |
+
[
|
| 537 |
+
Interval(
|
| 538 |
+
Timestamp("2012-12-31 23:57:07.200000", tz=tz),
|
| 539 |
+
Timestamp("2013-01-01 16:00:00", tz=tz),
|
| 540 |
+
),
|
| 541 |
+
Interval(
|
| 542 |
+
Timestamp("2013-01-01 16:00:00", tz=tz),
|
| 543 |
+
Timestamp("2013-01-02 08:00:00", tz=tz),
|
| 544 |
+
),
|
| 545 |
+
Interval(
|
| 546 |
+
Timestamp("2013-01-02 08:00:00", tz=tz),
|
| 547 |
+
Timestamp("2013-01-03 00:00:00", tz=tz),
|
| 548 |
+
),
|
| 549 |
+
]
|
| 550 |
+
)
|
| 551 |
+
).astype(CategoricalDtype(ordered=True))
|
| 552 |
+
tm.assert_series_equal(result, expected)
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
def test_datetime_nan_error():
|
| 556 |
+
msg = "bins must be of datetime64 dtype"
|
| 557 |
+
|
| 558 |
+
with pytest.raises(ValueError, match=msg):
|
| 559 |
+
cut(date_range("20130101", periods=3), bins=[0, 2, 4])
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def test_datetime_nan_mask():
|
| 563 |
+
result = cut(
|
| 564 |
+
date_range("20130102", periods=5), bins=date_range("20130101", periods=2)
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
mask = result.categories.isna()
|
| 568 |
+
tm.assert_numpy_array_equal(mask, np.array([False]))
|
| 569 |
+
|
| 570 |
+
mask = result.isna()
|
| 571 |
+
tm.assert_numpy_array_equal(mask, np.array([False, True, True, True, True]))
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"])
|
| 575 |
+
def test_datetime_cut_roundtrip(tz, unit):
|
| 576 |
+
# see gh-19891
|
| 577 |
+
ser = Series(date_range("20180101", periods=3, tz=tz, unit=unit))
|
| 578 |
+
result, result_bins = cut(ser, 2, retbins=True)
|
| 579 |
+
|
| 580 |
+
expected = cut(ser, result_bins)
|
| 581 |
+
tm.assert_series_equal(result, expected)
|
| 582 |
+
|
| 583 |
+
if unit == "s":
|
| 584 |
+
# TODO: constructing DatetimeIndex with dtype="M8[s]" without truncating
|
| 585 |
+
# the first entry here raises in array_to_datetime. Should truncate
|
| 586 |
+
# instead of raising?
|
| 587 |
+
# See https://github.com/pandas-dev/pandas/pull/56101#discussion_r1405325425
|
| 588 |
+
# for why we round to 8 seconds instead of 7
|
| 589 |
+
expected_bins = DatetimeIndex(
|
| 590 |
+
["2017-12-31 23:57:08", "2018-01-02 00:00:00", "2018-01-03 00:00:00"],
|
| 591 |
+
dtype=f"M8[{unit}]",
|
| 592 |
+
)
|
| 593 |
+
else:
|
| 594 |
+
expected_bins = DatetimeIndex(
|
| 595 |
+
[
|
| 596 |
+
"2017-12-31 23:57:07.200000",
|
| 597 |
+
"2018-01-02 00:00:00",
|
| 598 |
+
"2018-01-03 00:00:00",
|
| 599 |
+
],
|
| 600 |
+
dtype=f"M8[{unit}]",
|
| 601 |
+
)
|
| 602 |
+
expected_bins = expected_bins.tz_localize(tz)
|
| 603 |
+
tm.assert_index_equal(result_bins, expected_bins)
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
def test_timedelta_cut_roundtrip():
|
| 607 |
+
# see gh-19891
|
| 608 |
+
ser = Series(timedelta_range("1day", periods=3))
|
| 609 |
+
result, result_bins = cut(ser, 2, retbins=True)
|
| 610 |
+
|
| 611 |
+
expected = cut(ser, result_bins)
|
| 612 |
+
tm.assert_series_equal(result, expected)
|
| 613 |
+
|
| 614 |
+
expected_bins = TimedeltaIndex(
|
| 615 |
+
["0 days 23:57:07.200000", "2 days 00:00:00", "3 days 00:00:00"]
|
| 616 |
+
)
|
| 617 |
+
tm.assert_index_equal(result_bins, expected_bins)
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
@pytest.mark.parametrize("bins", [6, 7])
|
| 621 |
+
@pytest.mark.parametrize(
|
| 622 |
+
"box, compare",
|
| 623 |
+
[
|
| 624 |
+
(Series, tm.assert_series_equal),
|
| 625 |
+
(np.array, tm.assert_categorical_equal),
|
| 626 |
+
(list, tm.assert_equal),
|
| 627 |
+
],
|
| 628 |
+
)
|
| 629 |
+
def test_cut_bool_coercion_to_int(bins, box, compare):
|
| 630 |
+
# issue 20303
|
| 631 |
+
data_expected = box([0, 1, 1, 0, 1] * 10)
|
| 632 |
+
data_result = box([False, True, True, False, True] * 10)
|
| 633 |
+
expected = cut(data_expected, bins, duplicates="drop")
|
| 634 |
+
result = cut(data_result, bins, duplicates="drop")
|
| 635 |
+
compare(result, expected)
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
@pytest.mark.parametrize("labels", ["foo", 1, True])
|
| 639 |
+
def test_cut_incorrect_labels(labels):
|
| 640 |
+
# GH 13318
|
| 641 |
+
values = range(5)
|
| 642 |
+
msg = "Bin labels must either be False, None or passed in as a list-like argument"
|
| 643 |
+
with pytest.raises(ValueError, match=msg):
|
| 644 |
+
cut(values, 4, labels=labels)
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
@pytest.mark.parametrize("bins", [3, [0, 5, 15]])
|
| 648 |
+
@pytest.mark.parametrize("right", [True, False])
|
| 649 |
+
@pytest.mark.parametrize("include_lowest", [True, False])
|
| 650 |
+
def test_cut_nullable_integer(bins, right, include_lowest):
|
| 651 |
+
a = np.random.default_rng(2).integers(0, 10, size=50).astype(float)
|
| 652 |
+
a[::2] = np.nan
|
| 653 |
+
result = cut(
|
| 654 |
+
pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest
|
| 655 |
+
)
|
| 656 |
+
expected = cut(a, bins, right=right, include_lowest=include_lowest)
|
| 657 |
+
tm.assert_categorical_equal(result, expected)
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
@pytest.mark.parametrize(
|
| 661 |
+
"data, bins, labels, expected_codes, expected_labels",
|
| 662 |
+
[
|
| 663 |
+
([15, 17, 19], [14, 16, 18, 20], ["A", "B", "A"], [0, 1, 0], ["A", "B"]),
|
| 664 |
+
([1, 3, 5], [0, 2, 4, 6, 8], [2, 0, 1, 2], [2, 0, 1], [0, 1, 2]),
|
| 665 |
+
],
|
| 666 |
+
)
|
| 667 |
+
def test_cut_non_unique_labels(data, bins, labels, expected_codes, expected_labels):
|
| 668 |
+
# GH 33141
|
| 669 |
+
result = cut(data, bins=bins, labels=labels, ordered=False)
|
| 670 |
+
expected = Categorical.from_codes(
|
| 671 |
+
expected_codes, categories=expected_labels, ordered=False
|
| 672 |
+
)
|
| 673 |
+
tm.assert_categorical_equal(result, expected)
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
@pytest.mark.parametrize(
|
| 677 |
+
"data, bins, labels, expected_codes, expected_labels",
|
| 678 |
+
[
|
| 679 |
+
([15, 17, 19], [14, 16, 18, 20], ["C", "B", "A"], [0, 1, 2], ["C", "B", "A"]),
|
| 680 |
+
([1, 3, 5], [0, 2, 4, 6, 8], [3, 0, 1, 2], [0, 1, 2], [3, 0, 1, 2]),
|
| 681 |
+
],
|
| 682 |
+
)
|
| 683 |
+
def test_cut_unordered_labels(data, bins, labels, expected_codes, expected_labels):
|
| 684 |
+
# GH 33141
|
| 685 |
+
result = cut(data, bins=bins, labels=labels, ordered=False)
|
| 686 |
+
expected = Categorical.from_codes(
|
| 687 |
+
expected_codes, categories=expected_labels, ordered=False
|
| 688 |
+
)
|
| 689 |
+
tm.assert_categorical_equal(result, expected)
|
| 690 |
+
|
| 691 |
+
|
| 692 |
+
def test_cut_unordered_with_missing_labels_raises_error():
|
| 693 |
+
# GH 33141
|
| 694 |
+
msg = "'labels' must be provided if 'ordered = False'"
|
| 695 |
+
with pytest.raises(ValueError, match=msg):
|
| 696 |
+
cut([0.5, 3], bins=[0, 1, 2], ordered=False)
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
def test_cut_unordered_with_series_labels():
|
| 700 |
+
# https://github.com/pandas-dev/pandas/issues/36603
|
| 701 |
+
ser = Series([1, 2, 3, 4, 5])
|
| 702 |
+
bins = Series([0, 2, 4, 6])
|
| 703 |
+
labels = Series(["a", "b", "c"])
|
| 704 |
+
result = cut(ser, bins=bins, labels=labels, ordered=False)
|
| 705 |
+
expected = Series(["a", "a", "b", "b", "c"], dtype="category")
|
| 706 |
+
tm.assert_series_equal(result, expected)
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
def test_cut_no_warnings():
|
| 710 |
+
df = DataFrame({"value": np.random.default_rng(2).integers(0, 100, 20)})
|
| 711 |
+
labels = [f"{i} - {i + 9}" for i in range(0, 100, 10)]
|
| 712 |
+
with tm.assert_produces_warning(False):
|
| 713 |
+
df["group"] = cut(df.value, range(0, 105, 10), right=False, labels=labels)
|
| 714 |
+
|
| 715 |
+
|
| 716 |
+
def test_cut_with_duplicated_index_lowest_included():
|
| 717 |
+
# GH 42185
|
| 718 |
+
expected = Series(
|
| 719 |
+
[Interval(-0.001, 2, closed="right")] * 3
|
| 720 |
+
+ [Interval(2, 4, closed="right"), Interval(-0.001, 2, closed="right")],
|
| 721 |
+
index=[0, 1, 2, 3, 0],
|
| 722 |
+
dtype="category",
|
| 723 |
+
).cat.as_ordered()
|
| 724 |
+
|
| 725 |
+
ser = Series([0, 1, 2, 3, 0], index=[0, 1, 2, 3, 0])
|
| 726 |
+
result = cut(ser, bins=[0, 2, 4], include_lowest=True)
|
| 727 |
+
tm.assert_series_equal(result, expected)
|
| 728 |
+
|
| 729 |
+
|
| 730 |
+
def test_cut_with_nonexact_categorical_indices():
|
| 731 |
+
# GH 42424
|
| 732 |
+
|
| 733 |
+
ser = Series(range(100))
|
| 734 |
+
ser1 = cut(ser, 10).value_counts().head(5)
|
| 735 |
+
ser2 = cut(ser, 10).value_counts().tail(5)
|
| 736 |
+
result = DataFrame({"1": ser1, "2": ser2})
|
| 737 |
+
|
| 738 |
+
index = pd.CategoricalIndex(
|
| 739 |
+
[
|
| 740 |
+
Interval(-0.099, 9.9, closed="right"),
|
| 741 |
+
Interval(9.9, 19.8, closed="right"),
|
| 742 |
+
Interval(19.8, 29.7, closed="right"),
|
| 743 |
+
Interval(29.7, 39.6, closed="right"),
|
| 744 |
+
Interval(39.6, 49.5, closed="right"),
|
| 745 |
+
Interval(49.5, 59.4, closed="right"),
|
| 746 |
+
Interval(59.4, 69.3, closed="right"),
|
| 747 |
+
Interval(69.3, 79.2, closed="right"),
|
| 748 |
+
Interval(79.2, 89.1, closed="right"),
|
| 749 |
+
Interval(89.1, 99, closed="right"),
|
| 750 |
+
],
|
| 751 |
+
ordered=True,
|
| 752 |
+
)
|
| 753 |
+
|
| 754 |
+
expected = DataFrame(
|
| 755 |
+
{"1": [10] * 5 + [np.nan] * 5, "2": [np.nan] * 5 + [10] * 5}, index=index
|
| 756 |
+
)
|
| 757 |
+
|
| 758 |
+
tm.assert_frame_equal(expected, result)
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
def test_cut_with_timestamp_tuple_labels():
|
| 762 |
+
# GH 40661
|
| 763 |
+
labels = [(Timestamp(10),), (Timestamp(20),), (Timestamp(30),)]
|
| 764 |
+
result = cut([2, 4, 6], bins=[1, 3, 5, 7], labels=labels)
|
| 765 |
+
|
| 766 |
+
expected = Categorical.from_codes([0, 1, 2], labels, ordered=True)
|
| 767 |
+
tm.assert_categorical_equal(result, expected)
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
def test_cut_bins_datetime_intervalindex():
|
| 771 |
+
# https://github.com/pandas-dev/pandas/issues/46218
|
| 772 |
+
bins = interval_range(Timestamp("2022-02-25"), Timestamp("2022-02-27"), freq="1D")
|
| 773 |
+
# passing Series instead of list is important to trigger bug
|
| 774 |
+
result = cut(Series([Timestamp("2022-02-26")]).astype("M8[ns]"), bins=bins)
|
| 775 |
+
expected = Categorical.from_codes([0], bins, ordered=True)
|
| 776 |
+
tm.assert_categorical_equal(result.array, expected)
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
def test_cut_with_nullable_int64():
|
| 780 |
+
# GH 30787
|
| 781 |
+
series = Series([0, 1, 2, 3, 4, pd.NA, 6, 7], dtype="Int64")
|
| 782 |
+
bins = [0, 2, 4, 6, 8]
|
| 783 |
+
intervals = IntervalIndex.from_breaks(bins)
|
| 784 |
+
|
| 785 |
+
expected = Series(
|
| 786 |
+
Categorical.from_codes([-1, 0, 0, 1, 1, -1, 2, 3], intervals, ordered=True)
|
| 787 |
+
)
|
| 788 |
+
|
| 789 |
+
result = cut(series, bins=bins)
|
| 790 |
+
|
| 791 |
+
tm.assert_series_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_get_dummies.py
ADDED
|
@@ -0,0 +1,743 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import unicodedata
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
import pandas.util._test_decorators as td
|
| 8 |
+
|
| 9 |
+
from pandas.core.dtypes.common import is_integer_dtype
|
| 10 |
+
|
| 11 |
+
import pandas as pd
|
| 12 |
+
from pandas import (
|
| 13 |
+
ArrowDtype,
|
| 14 |
+
Categorical,
|
| 15 |
+
CategoricalDtype,
|
| 16 |
+
CategoricalIndex,
|
| 17 |
+
DataFrame,
|
| 18 |
+
Index,
|
| 19 |
+
RangeIndex,
|
| 20 |
+
Series,
|
| 21 |
+
SparseDtype,
|
| 22 |
+
get_dummies,
|
| 23 |
+
)
|
| 24 |
+
import pandas._testing as tm
|
| 25 |
+
from pandas.core.arrays.sparse import SparseArray
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import pyarrow as pa
|
| 29 |
+
except ImportError:
|
| 30 |
+
pa = None
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class TestGetDummies:
|
| 34 |
+
@pytest.fixture
|
| 35 |
+
def df(self):
|
| 36 |
+
return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]})
|
| 37 |
+
|
| 38 |
+
@pytest.fixture(params=["uint8", "i8", np.float64, bool, None])
|
| 39 |
+
def dtype(self, request):
|
| 40 |
+
return np.dtype(request.param)
|
| 41 |
+
|
| 42 |
+
@pytest.fixture(params=["dense", "sparse"])
|
| 43 |
+
def sparse(self, request):
|
| 44 |
+
# params are strings to simplify reading test results,
|
| 45 |
+
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
|
| 46 |
+
return request.param == "sparse"
|
| 47 |
+
|
| 48 |
+
def effective_dtype(self, dtype):
|
| 49 |
+
if dtype is None:
|
| 50 |
+
return np.uint8
|
| 51 |
+
return dtype
|
| 52 |
+
|
| 53 |
+
def test_get_dummies_raises_on_dtype_object(self, df):
|
| 54 |
+
msg = "dtype=object is not a valid dtype for get_dummies"
|
| 55 |
+
with pytest.raises(ValueError, match=msg):
|
| 56 |
+
get_dummies(df, dtype="object")
|
| 57 |
+
|
| 58 |
+
def test_get_dummies_basic(self, sparse, dtype):
|
| 59 |
+
s_list = list("abc")
|
| 60 |
+
s_series = Series(s_list)
|
| 61 |
+
s_series_index = Series(s_list, list("ABC"))
|
| 62 |
+
|
| 63 |
+
expected = DataFrame(
|
| 64 |
+
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
|
| 65 |
+
dtype=self.effective_dtype(dtype),
|
| 66 |
+
)
|
| 67 |
+
if sparse:
|
| 68 |
+
if dtype.kind == "b":
|
| 69 |
+
expected = expected.apply(SparseArray, fill_value=False)
|
| 70 |
+
else:
|
| 71 |
+
expected = expected.apply(SparseArray, fill_value=0.0)
|
| 72 |
+
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
|
| 73 |
+
tm.assert_frame_equal(result, expected)
|
| 74 |
+
|
| 75 |
+
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
|
| 76 |
+
tm.assert_frame_equal(result, expected)
|
| 77 |
+
|
| 78 |
+
expected.index = list("ABC")
|
| 79 |
+
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
|
| 80 |
+
tm.assert_frame_equal(result, expected)
|
| 81 |
+
|
| 82 |
+
def test_get_dummies_basic_types(self, sparse, dtype, using_infer_string):
|
| 83 |
+
# GH 10531
|
| 84 |
+
s_list = list("abc")
|
| 85 |
+
s_series = Series(s_list)
|
| 86 |
+
s_df = DataFrame(
|
| 87 |
+
{"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]}
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
expected = DataFrame(
|
| 91 |
+
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
|
| 92 |
+
dtype=self.effective_dtype(dtype),
|
| 93 |
+
columns=list("abc"),
|
| 94 |
+
)
|
| 95 |
+
if sparse:
|
| 96 |
+
if is_integer_dtype(dtype):
|
| 97 |
+
fill_value = 0
|
| 98 |
+
elif dtype == bool:
|
| 99 |
+
fill_value = False
|
| 100 |
+
else:
|
| 101 |
+
fill_value = 0.0
|
| 102 |
+
|
| 103 |
+
expected = expected.apply(SparseArray, fill_value=fill_value)
|
| 104 |
+
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
|
| 105 |
+
tm.assert_frame_equal(result, expected)
|
| 106 |
+
|
| 107 |
+
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
|
| 108 |
+
tm.assert_frame_equal(result, expected)
|
| 109 |
+
|
| 110 |
+
result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype)
|
| 111 |
+
if sparse:
|
| 112 |
+
dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]"
|
| 113 |
+
else:
|
| 114 |
+
dtype_name = self.effective_dtype(dtype).name
|
| 115 |
+
|
| 116 |
+
expected = Series({dtype_name: 8}, name="count")
|
| 117 |
+
result = result.dtypes.value_counts()
|
| 118 |
+
result.index = [str(i) for i in result.index]
|
| 119 |
+
tm.assert_series_equal(result, expected)
|
| 120 |
+
|
| 121 |
+
result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype)
|
| 122 |
+
|
| 123 |
+
key = "string" if using_infer_string else "object"
|
| 124 |
+
expected_counts = {"int64": 1, key: 1}
|
| 125 |
+
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
|
| 126 |
+
|
| 127 |
+
expected = Series(expected_counts, name="count").sort_index()
|
| 128 |
+
result = result.dtypes.value_counts()
|
| 129 |
+
result.index = [str(i) for i in result.index]
|
| 130 |
+
result = result.sort_index()
|
| 131 |
+
tm.assert_series_equal(result, expected)
|
| 132 |
+
|
| 133 |
+
def test_get_dummies_just_na(self, sparse):
|
| 134 |
+
just_na_list = [np.nan]
|
| 135 |
+
just_na_series = Series(just_na_list)
|
| 136 |
+
just_na_series_index = Series(just_na_list, index=["A"])
|
| 137 |
+
|
| 138 |
+
res_list = get_dummies(just_na_list, sparse=sparse)
|
| 139 |
+
res_series = get_dummies(just_na_series, sparse=sparse)
|
| 140 |
+
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
|
| 141 |
+
|
| 142 |
+
assert res_list.empty
|
| 143 |
+
assert res_series.empty
|
| 144 |
+
assert res_series_index.empty
|
| 145 |
+
|
| 146 |
+
assert res_list.index.tolist() == [0]
|
| 147 |
+
assert res_series.index.tolist() == [0]
|
| 148 |
+
assert res_series_index.index.tolist() == ["A"]
|
| 149 |
+
|
| 150 |
+
def test_get_dummies_include_na(self, sparse, dtype):
|
| 151 |
+
s = ["a", "b", np.nan]
|
| 152 |
+
res = get_dummies(s, sparse=sparse, dtype=dtype)
|
| 153 |
+
exp = DataFrame(
|
| 154 |
+
{"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype)
|
| 155 |
+
)
|
| 156 |
+
if sparse:
|
| 157 |
+
if dtype.kind == "b":
|
| 158 |
+
exp = exp.apply(SparseArray, fill_value=False)
|
| 159 |
+
else:
|
| 160 |
+
exp = exp.apply(SparseArray, fill_value=0.0)
|
| 161 |
+
tm.assert_frame_equal(res, exp)
|
| 162 |
+
|
| 163 |
+
# Sparse dataframes do not allow nan labelled columns, see #GH8822
|
| 164 |
+
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
|
| 165 |
+
exp_na = DataFrame(
|
| 166 |
+
{np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]},
|
| 167 |
+
dtype=self.effective_dtype(dtype),
|
| 168 |
+
)
|
| 169 |
+
exp_na = exp_na.reindex(["a", "b", np.nan], axis=1)
|
| 170 |
+
# hack (NaN handling in assert_index_equal)
|
| 171 |
+
exp_na.columns = res_na.columns
|
| 172 |
+
if sparse:
|
| 173 |
+
if dtype.kind == "b":
|
| 174 |
+
exp_na = exp_na.apply(SparseArray, fill_value=False)
|
| 175 |
+
else:
|
| 176 |
+
exp_na = exp_na.apply(SparseArray, fill_value=0.0)
|
| 177 |
+
tm.assert_frame_equal(res_na, exp_na)
|
| 178 |
+
|
| 179 |
+
res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype)
|
| 180 |
+
exp_just_na = DataFrame(
|
| 181 |
+
Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype)
|
| 182 |
+
)
|
| 183 |
+
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
|
| 184 |
+
|
| 185 |
+
def test_get_dummies_unicode(self, sparse):
|
| 186 |
+
# See GH 6885 - get_dummies chokes on unicode values
|
| 187 |
+
e = "e"
|
| 188 |
+
eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE")
|
| 189 |
+
s = [e, eacute, eacute]
|
| 190 |
+
res = get_dummies(s, prefix="letter", sparse=sparse)
|
| 191 |
+
exp = DataFrame(
|
| 192 |
+
{"letter_e": [True, False, False], f"letter_{eacute}": [False, True, True]}
|
| 193 |
+
)
|
| 194 |
+
if sparse:
|
| 195 |
+
exp = exp.apply(SparseArray, fill_value=False)
|
| 196 |
+
tm.assert_frame_equal(res, exp)
|
| 197 |
+
|
| 198 |
+
def test_dataframe_dummies_all_obj(self, df, sparse):
|
| 199 |
+
df = df[["A", "B"]]
|
| 200 |
+
result = get_dummies(df, sparse=sparse)
|
| 201 |
+
expected = DataFrame(
|
| 202 |
+
{"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]},
|
| 203 |
+
dtype=bool,
|
| 204 |
+
)
|
| 205 |
+
if sparse:
|
| 206 |
+
expected = DataFrame(
|
| 207 |
+
{
|
| 208 |
+
"A_a": SparseArray([1, 0, 1], dtype="bool"),
|
| 209 |
+
"A_b": SparseArray([0, 1, 0], dtype="bool"),
|
| 210 |
+
"B_b": SparseArray([1, 1, 0], dtype="bool"),
|
| 211 |
+
"B_c": SparseArray([0, 0, 1], dtype="bool"),
|
| 212 |
+
}
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
tm.assert_frame_equal(result, expected)
|
| 216 |
+
|
| 217 |
+
def test_dataframe_dummies_string_dtype(self, df, using_infer_string):
|
| 218 |
+
# GH44965
|
| 219 |
+
df = df[["A", "B"]]
|
| 220 |
+
df = df.astype({"A": "object", "B": "string"})
|
| 221 |
+
result = get_dummies(df)
|
| 222 |
+
expected = DataFrame(
|
| 223 |
+
{
|
| 224 |
+
"A_a": [1, 0, 1],
|
| 225 |
+
"A_b": [0, 1, 0],
|
| 226 |
+
"B_b": [1, 1, 0],
|
| 227 |
+
"B_c": [0, 0, 1],
|
| 228 |
+
},
|
| 229 |
+
dtype=bool,
|
| 230 |
+
)
|
| 231 |
+
if not using_infer_string:
|
| 232 |
+
# infer_string returns numpy bools
|
| 233 |
+
expected[["B_b", "B_c"]] = expected[["B_b", "B_c"]].astype("boolean")
|
| 234 |
+
tm.assert_frame_equal(result, expected)
|
| 235 |
+
|
| 236 |
+
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
|
| 237 |
+
result = get_dummies(df, sparse=sparse, dtype=dtype)
|
| 238 |
+
if sparse:
|
| 239 |
+
arr = SparseArray
|
| 240 |
+
if dtype.kind == "b":
|
| 241 |
+
typ = SparseDtype(dtype, False)
|
| 242 |
+
else:
|
| 243 |
+
typ = SparseDtype(dtype, 0)
|
| 244 |
+
else:
|
| 245 |
+
arr = np.array
|
| 246 |
+
typ = dtype
|
| 247 |
+
expected = DataFrame(
|
| 248 |
+
{
|
| 249 |
+
"C": [1, 2, 3],
|
| 250 |
+
"A_a": arr([1, 0, 1], dtype=typ),
|
| 251 |
+
"A_b": arr([0, 1, 0], dtype=typ),
|
| 252 |
+
"B_b": arr([1, 1, 0], dtype=typ),
|
| 253 |
+
"B_c": arr([0, 0, 1], dtype=typ),
|
| 254 |
+
}
|
| 255 |
+
)
|
| 256 |
+
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
|
| 257 |
+
tm.assert_frame_equal(result, expected)
|
| 258 |
+
|
| 259 |
+
def test_dataframe_dummies_prefix_list(self, df, sparse):
|
| 260 |
+
prefixes = ["from_A", "from_B"]
|
| 261 |
+
result = get_dummies(df, prefix=prefixes, sparse=sparse)
|
| 262 |
+
expected = DataFrame(
|
| 263 |
+
{
|
| 264 |
+
"C": [1, 2, 3],
|
| 265 |
+
"from_A_a": [True, False, True],
|
| 266 |
+
"from_A_b": [False, True, False],
|
| 267 |
+
"from_B_b": [True, True, False],
|
| 268 |
+
"from_B_c": [False, False, True],
|
| 269 |
+
},
|
| 270 |
+
)
|
| 271 |
+
expected[["C"]] = df[["C"]]
|
| 272 |
+
cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
|
| 273 |
+
expected = expected[["C"] + cols]
|
| 274 |
+
|
| 275 |
+
typ = SparseArray if sparse else Series
|
| 276 |
+
expected[cols] = expected[cols].apply(lambda x: typ(x))
|
| 277 |
+
tm.assert_frame_equal(result, expected)
|
| 278 |
+
|
| 279 |
+
def test_dataframe_dummies_prefix_str(self, df, sparse):
|
| 280 |
+
# not that you should do this...
|
| 281 |
+
result = get_dummies(df, prefix="bad", sparse=sparse)
|
| 282 |
+
bad_columns = ["bad_a", "bad_b", "bad_b", "bad_c"]
|
| 283 |
+
expected = DataFrame(
|
| 284 |
+
[
|
| 285 |
+
[1, True, False, True, False],
|
| 286 |
+
[2, False, True, True, False],
|
| 287 |
+
[3, True, False, False, True],
|
| 288 |
+
],
|
| 289 |
+
columns=["C"] + bad_columns,
|
| 290 |
+
)
|
| 291 |
+
expected = expected.astype({"C": np.int64})
|
| 292 |
+
if sparse:
|
| 293 |
+
# work around astyping & assigning with duplicate columns
|
| 294 |
+
# https://github.com/pandas-dev/pandas/issues/14427
|
| 295 |
+
expected = pd.concat(
|
| 296 |
+
[
|
| 297 |
+
Series([1, 2, 3], name="C"),
|
| 298 |
+
Series([True, False, True], name="bad_a", dtype="Sparse[bool]"),
|
| 299 |
+
Series([False, True, False], name="bad_b", dtype="Sparse[bool]"),
|
| 300 |
+
Series([True, True, False], name="bad_b", dtype="Sparse[bool]"),
|
| 301 |
+
Series([False, False, True], name="bad_c", dtype="Sparse[bool]"),
|
| 302 |
+
],
|
| 303 |
+
axis=1,
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
tm.assert_frame_equal(result, expected)
|
| 307 |
+
|
| 308 |
+
def test_dataframe_dummies_subset(self, df, sparse):
|
| 309 |
+
result = get_dummies(df, prefix=["from_A"], columns=["A"], sparse=sparse)
|
| 310 |
+
expected = DataFrame(
|
| 311 |
+
{
|
| 312 |
+
"B": ["b", "b", "c"],
|
| 313 |
+
"C": [1, 2, 3],
|
| 314 |
+
"from_A_a": [1, 0, 1],
|
| 315 |
+
"from_A_b": [0, 1, 0],
|
| 316 |
+
},
|
| 317 |
+
)
|
| 318 |
+
cols = expected.columns
|
| 319 |
+
expected[cols[1:]] = expected[cols[1:]].astype(bool)
|
| 320 |
+
expected[["C"]] = df[["C"]]
|
| 321 |
+
if sparse:
|
| 322 |
+
cols = ["from_A_a", "from_A_b"]
|
| 323 |
+
expected[cols] = expected[cols].astype(SparseDtype("bool", False))
|
| 324 |
+
tm.assert_frame_equal(result, expected)
|
| 325 |
+
|
| 326 |
+
def test_dataframe_dummies_prefix_sep(self, df, sparse):
|
| 327 |
+
result = get_dummies(df, prefix_sep="..", sparse=sparse)
|
| 328 |
+
expected = DataFrame(
|
| 329 |
+
{
|
| 330 |
+
"C": [1, 2, 3],
|
| 331 |
+
"A..a": [True, False, True],
|
| 332 |
+
"A..b": [False, True, False],
|
| 333 |
+
"B..b": [True, True, False],
|
| 334 |
+
"B..c": [False, False, True],
|
| 335 |
+
},
|
| 336 |
+
)
|
| 337 |
+
expected[["C"]] = df[["C"]]
|
| 338 |
+
expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]]
|
| 339 |
+
if sparse:
|
| 340 |
+
cols = ["A..a", "A..b", "B..b", "B..c"]
|
| 341 |
+
expected[cols] = expected[cols].astype(SparseDtype("bool", False))
|
| 342 |
+
|
| 343 |
+
tm.assert_frame_equal(result, expected)
|
| 344 |
+
|
| 345 |
+
result = get_dummies(df, prefix_sep=["..", "__"], sparse=sparse)
|
| 346 |
+
expected = expected.rename(columns={"B..b": "B__b", "B..c": "B__c"})
|
| 347 |
+
tm.assert_frame_equal(result, expected)
|
| 348 |
+
|
| 349 |
+
result = get_dummies(df, prefix_sep={"A": "..", "B": "__"}, sparse=sparse)
|
| 350 |
+
tm.assert_frame_equal(result, expected)
|
| 351 |
+
|
| 352 |
+
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
|
| 353 |
+
msg = re.escape(
|
| 354 |
+
"Length of 'prefix' (1) did not match the length of the columns being "
|
| 355 |
+
"encoded (2)"
|
| 356 |
+
)
|
| 357 |
+
with pytest.raises(ValueError, match=msg):
|
| 358 |
+
get_dummies(df, prefix=["too few"], sparse=sparse)
|
| 359 |
+
|
| 360 |
+
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
|
| 361 |
+
msg = re.escape(
|
| 362 |
+
"Length of 'prefix_sep' (1) did not match the length of the columns being "
|
| 363 |
+
"encoded (2)"
|
| 364 |
+
)
|
| 365 |
+
with pytest.raises(ValueError, match=msg):
|
| 366 |
+
get_dummies(df, prefix_sep=["bad"], sparse=sparse)
|
| 367 |
+
|
| 368 |
+
def test_dataframe_dummies_prefix_dict(self, sparse):
|
| 369 |
+
prefixes = {"A": "from_A", "B": "from_B"}
|
| 370 |
+
df = DataFrame({"C": [1, 2, 3], "A": ["a", "b", "a"], "B": ["b", "b", "c"]})
|
| 371 |
+
result = get_dummies(df, prefix=prefixes, sparse=sparse)
|
| 372 |
+
|
| 373 |
+
expected = DataFrame(
|
| 374 |
+
{
|
| 375 |
+
"C": [1, 2, 3],
|
| 376 |
+
"from_A_a": [1, 0, 1],
|
| 377 |
+
"from_A_b": [0, 1, 0],
|
| 378 |
+
"from_B_b": [1, 1, 0],
|
| 379 |
+
"from_B_c": [0, 0, 1],
|
| 380 |
+
}
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
|
| 384 |
+
expected[columns] = expected[columns].astype(bool)
|
| 385 |
+
if sparse:
|
| 386 |
+
expected[columns] = expected[columns].astype(SparseDtype("bool", False))
|
| 387 |
+
|
| 388 |
+
tm.assert_frame_equal(result, expected)
|
| 389 |
+
|
| 390 |
+
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
|
| 391 |
+
df.loc[3, :] = [np.nan, np.nan, np.nan]
|
| 392 |
+
result = get_dummies(df, dummy_na=True, sparse=sparse, dtype=dtype).sort_index(
|
| 393 |
+
axis=1
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
if sparse:
|
| 397 |
+
arr = SparseArray
|
| 398 |
+
if dtype.kind == "b":
|
| 399 |
+
typ = SparseDtype(dtype, False)
|
| 400 |
+
else:
|
| 401 |
+
typ = SparseDtype(dtype, 0)
|
| 402 |
+
else:
|
| 403 |
+
arr = np.array
|
| 404 |
+
typ = dtype
|
| 405 |
+
|
| 406 |
+
expected = DataFrame(
|
| 407 |
+
{
|
| 408 |
+
"C": [1, 2, 3, np.nan],
|
| 409 |
+
"A_a": arr([1, 0, 1, 0], dtype=typ),
|
| 410 |
+
"A_b": arr([0, 1, 0, 0], dtype=typ),
|
| 411 |
+
"A_nan": arr([0, 0, 0, 1], dtype=typ),
|
| 412 |
+
"B_b": arr([1, 1, 0, 0], dtype=typ),
|
| 413 |
+
"B_c": arr([0, 0, 1, 0], dtype=typ),
|
| 414 |
+
"B_nan": arr([0, 0, 0, 1], dtype=typ),
|
| 415 |
+
}
|
| 416 |
+
).sort_index(axis=1)
|
| 417 |
+
|
| 418 |
+
tm.assert_frame_equal(result, expected)
|
| 419 |
+
|
| 420 |
+
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
|
| 421 |
+
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
|
| 422 |
+
tm.assert_frame_equal(result, expected)
|
| 423 |
+
|
| 424 |
+
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
|
| 425 |
+
df["cat"] = Categorical(["x", "y", "y"])
|
| 426 |
+
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
|
| 427 |
+
if sparse:
|
| 428 |
+
arr = SparseArray
|
| 429 |
+
if dtype.kind == "b":
|
| 430 |
+
typ = SparseDtype(dtype, False)
|
| 431 |
+
else:
|
| 432 |
+
typ = SparseDtype(dtype, 0)
|
| 433 |
+
else:
|
| 434 |
+
arr = np.array
|
| 435 |
+
typ = dtype
|
| 436 |
+
|
| 437 |
+
expected = DataFrame(
|
| 438 |
+
{
|
| 439 |
+
"C": [1, 2, 3],
|
| 440 |
+
"A_a": arr([1, 0, 1], dtype=typ),
|
| 441 |
+
"A_b": arr([0, 1, 0], dtype=typ),
|
| 442 |
+
"B_b": arr([1, 1, 0], dtype=typ),
|
| 443 |
+
"B_c": arr([0, 0, 1], dtype=typ),
|
| 444 |
+
"cat_x": arr([1, 0, 0], dtype=typ),
|
| 445 |
+
"cat_y": arr([0, 1, 1], dtype=typ),
|
| 446 |
+
}
|
| 447 |
+
).sort_index(axis=1)
|
| 448 |
+
|
| 449 |
+
tm.assert_frame_equal(result, expected)
|
| 450 |
+
|
| 451 |
+
@pytest.mark.parametrize(
|
| 452 |
+
"get_dummies_kwargs,expected",
|
| 453 |
+
[
|
| 454 |
+
(
|
| 455 |
+
{"data": DataFrame({"ä": ["a"]})},
|
| 456 |
+
DataFrame({"ä_a": [True]}),
|
| 457 |
+
),
|
| 458 |
+
(
|
| 459 |
+
{"data": DataFrame({"x": ["ä"]})},
|
| 460 |
+
DataFrame({"x_ä": [True]}),
|
| 461 |
+
),
|
| 462 |
+
(
|
| 463 |
+
{"data": DataFrame({"x": ["a"]}), "prefix": "ä"},
|
| 464 |
+
DataFrame({"ä_a": [True]}),
|
| 465 |
+
),
|
| 466 |
+
(
|
| 467 |
+
{"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"},
|
| 468 |
+
DataFrame({"xäa": [True]}),
|
| 469 |
+
),
|
| 470 |
+
],
|
| 471 |
+
)
|
| 472 |
+
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
|
| 473 |
+
# GH22084 get_dummies incorrectly encodes unicode characters
|
| 474 |
+
# in dataframe column names
|
| 475 |
+
result = get_dummies(**get_dummies_kwargs)
|
| 476 |
+
tm.assert_frame_equal(result, expected)
|
| 477 |
+
|
| 478 |
+
def test_get_dummies_basic_drop_first(self, sparse):
|
| 479 |
+
# GH12402 Add a new parameter `drop_first` to avoid collinearity
|
| 480 |
+
# Basic case
|
| 481 |
+
s_list = list("abc")
|
| 482 |
+
s_series = Series(s_list)
|
| 483 |
+
s_series_index = Series(s_list, list("ABC"))
|
| 484 |
+
|
| 485 |
+
expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=bool)
|
| 486 |
+
|
| 487 |
+
result = get_dummies(s_list, drop_first=True, sparse=sparse)
|
| 488 |
+
if sparse:
|
| 489 |
+
expected = expected.apply(SparseArray, fill_value=False)
|
| 490 |
+
tm.assert_frame_equal(result, expected)
|
| 491 |
+
|
| 492 |
+
result = get_dummies(s_series, drop_first=True, sparse=sparse)
|
| 493 |
+
tm.assert_frame_equal(result, expected)
|
| 494 |
+
|
| 495 |
+
expected.index = list("ABC")
|
| 496 |
+
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
|
| 497 |
+
tm.assert_frame_equal(result, expected)
|
| 498 |
+
|
| 499 |
+
def test_get_dummies_basic_drop_first_one_level(self, sparse):
|
| 500 |
+
# Test the case that categorical variable only has one level.
|
| 501 |
+
s_list = list("aaa")
|
| 502 |
+
s_series = Series(s_list)
|
| 503 |
+
s_series_index = Series(s_list, list("ABC"))
|
| 504 |
+
|
| 505 |
+
expected = DataFrame(index=RangeIndex(3))
|
| 506 |
+
|
| 507 |
+
result = get_dummies(s_list, drop_first=True, sparse=sparse)
|
| 508 |
+
tm.assert_frame_equal(result, expected)
|
| 509 |
+
|
| 510 |
+
result = get_dummies(s_series, drop_first=True, sparse=sparse)
|
| 511 |
+
tm.assert_frame_equal(result, expected)
|
| 512 |
+
|
| 513 |
+
expected = DataFrame(index=list("ABC"))
|
| 514 |
+
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
|
| 515 |
+
tm.assert_frame_equal(result, expected)
|
| 516 |
+
|
| 517 |
+
def test_get_dummies_basic_drop_first_NA(self, sparse):
|
| 518 |
+
# Test NA handling together with drop_first
|
| 519 |
+
s_NA = ["a", "b", np.nan]
|
| 520 |
+
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
|
| 521 |
+
exp = DataFrame({"b": [0, 1, 0]}, dtype=bool)
|
| 522 |
+
if sparse:
|
| 523 |
+
exp = exp.apply(SparseArray, fill_value=False)
|
| 524 |
+
|
| 525 |
+
tm.assert_frame_equal(res, exp)
|
| 526 |
+
|
| 527 |
+
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True, sparse=sparse)
|
| 528 |
+
exp_na = DataFrame({"b": [0, 1, 0], np.nan: [0, 0, 1]}, dtype=bool).reindex(
|
| 529 |
+
["b", np.nan], axis=1
|
| 530 |
+
)
|
| 531 |
+
if sparse:
|
| 532 |
+
exp_na = exp_na.apply(SparseArray, fill_value=False)
|
| 533 |
+
tm.assert_frame_equal(res_na, exp_na)
|
| 534 |
+
|
| 535 |
+
res_just_na = get_dummies(
|
| 536 |
+
[np.nan], dummy_na=True, drop_first=True, sparse=sparse
|
| 537 |
+
)
|
| 538 |
+
exp_just_na = DataFrame(index=RangeIndex(1))
|
| 539 |
+
tm.assert_frame_equal(res_just_na, exp_just_na)
|
| 540 |
+
|
| 541 |
+
def test_dataframe_dummies_drop_first(self, df, sparse):
|
| 542 |
+
df = df[["A", "B"]]
|
| 543 |
+
result = get_dummies(df, drop_first=True, sparse=sparse)
|
| 544 |
+
expected = DataFrame({"A_b": [0, 1, 0], "B_c": [0, 0, 1]}, dtype=bool)
|
| 545 |
+
if sparse:
|
| 546 |
+
expected = expected.apply(SparseArray, fill_value=False)
|
| 547 |
+
tm.assert_frame_equal(result, expected)
|
| 548 |
+
|
| 549 |
+
def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype):
|
| 550 |
+
df["cat"] = Categorical(["x", "y", "y"])
|
| 551 |
+
result = get_dummies(df, drop_first=True, sparse=sparse)
|
| 552 |
+
expected = DataFrame(
|
| 553 |
+
{"C": [1, 2, 3], "A_b": [0, 1, 0], "B_c": [0, 0, 1], "cat_y": [0, 1, 1]}
|
| 554 |
+
)
|
| 555 |
+
cols = ["A_b", "B_c", "cat_y"]
|
| 556 |
+
expected[cols] = expected[cols].astype(bool)
|
| 557 |
+
expected = expected[["C", "A_b", "B_c", "cat_y"]]
|
| 558 |
+
if sparse:
|
| 559 |
+
for col in cols:
|
| 560 |
+
expected[col] = SparseArray(expected[col])
|
| 561 |
+
tm.assert_frame_equal(result, expected)
|
| 562 |
+
|
| 563 |
+
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
|
| 564 |
+
df.loc[3, :] = [np.nan, np.nan, np.nan]
|
| 565 |
+
result = get_dummies(
|
| 566 |
+
df, dummy_na=True, drop_first=True, sparse=sparse
|
| 567 |
+
).sort_index(axis=1)
|
| 568 |
+
expected = DataFrame(
|
| 569 |
+
{
|
| 570 |
+
"C": [1, 2, 3, np.nan],
|
| 571 |
+
"A_b": [0, 1, 0, 0],
|
| 572 |
+
"A_nan": [0, 0, 0, 1],
|
| 573 |
+
"B_c": [0, 0, 1, 0],
|
| 574 |
+
"B_nan": [0, 0, 0, 1],
|
| 575 |
+
}
|
| 576 |
+
)
|
| 577 |
+
cols = ["A_b", "A_nan", "B_c", "B_nan"]
|
| 578 |
+
expected[cols] = expected[cols].astype(bool)
|
| 579 |
+
expected = expected.sort_index(axis=1)
|
| 580 |
+
if sparse:
|
| 581 |
+
for col in cols:
|
| 582 |
+
expected[col] = SparseArray(expected[col])
|
| 583 |
+
|
| 584 |
+
tm.assert_frame_equal(result, expected)
|
| 585 |
+
|
| 586 |
+
result = get_dummies(df, dummy_na=False, drop_first=True, sparse=sparse)
|
| 587 |
+
expected = expected[["C", "A_b", "B_c"]]
|
| 588 |
+
tm.assert_frame_equal(result, expected)
|
| 589 |
+
|
| 590 |
+
def test_get_dummies_int_int(self):
|
| 591 |
+
data = Series([1, 2, 1])
|
| 592 |
+
result = get_dummies(data)
|
| 593 |
+
expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2], dtype=bool)
|
| 594 |
+
tm.assert_frame_equal(result, expected)
|
| 595 |
+
|
| 596 |
+
data = Series(Categorical(["a", "b", "a"]))
|
| 597 |
+
result = get_dummies(data)
|
| 598 |
+
expected = DataFrame(
|
| 599 |
+
[[1, 0], [0, 1], [1, 0]], columns=Categorical(["a", "b"]), dtype=bool
|
| 600 |
+
)
|
| 601 |
+
tm.assert_frame_equal(result, expected)
|
| 602 |
+
|
| 603 |
+
def test_get_dummies_int_df(self, dtype):
|
| 604 |
+
data = DataFrame(
|
| 605 |
+
{
|
| 606 |
+
"A": [1, 2, 1],
|
| 607 |
+
"B": Categorical(["a", "b", "a"]),
|
| 608 |
+
"C": [1, 2, 1],
|
| 609 |
+
"D": [1.0, 2.0, 1.0],
|
| 610 |
+
}
|
| 611 |
+
)
|
| 612 |
+
columns = ["C", "D", "A_1", "A_2", "B_a", "B_b"]
|
| 613 |
+
expected = DataFrame(
|
| 614 |
+
[[1, 1.0, 1, 0, 1, 0], [2, 2.0, 0, 1, 0, 1], [1, 1.0, 1, 0, 1, 0]],
|
| 615 |
+
columns=columns,
|
| 616 |
+
)
|
| 617 |
+
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
|
| 618 |
+
result = get_dummies(data, columns=["A", "B"], dtype=dtype)
|
| 619 |
+
tm.assert_frame_equal(result, expected)
|
| 620 |
+
|
| 621 |
+
@pytest.mark.parametrize("ordered", [True, False])
|
| 622 |
+
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype, ordered):
|
| 623 |
+
# GH13854
|
| 624 |
+
cat = Categorical(list("xy"), categories=list("xyz"), ordered=ordered)
|
| 625 |
+
result = get_dummies(cat, dtype=dtype)
|
| 626 |
+
|
| 627 |
+
data = np.array([[1, 0, 0], [0, 1, 0]], dtype=self.effective_dtype(dtype))
|
| 628 |
+
cols = CategoricalIndex(
|
| 629 |
+
cat.categories, categories=cat.categories, ordered=ordered
|
| 630 |
+
)
|
| 631 |
+
expected = DataFrame(data, columns=cols, dtype=self.effective_dtype(dtype))
|
| 632 |
+
|
| 633 |
+
tm.assert_frame_equal(result, expected)
|
| 634 |
+
|
| 635 |
+
@pytest.mark.parametrize("sparse", [True, False])
|
| 636 |
+
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
|
| 637 |
+
# GH18914
|
| 638 |
+
df = DataFrame.from_dict({"GDP": [1, 2], "Nation": ["AB", "CD"]})
|
| 639 |
+
df = get_dummies(df, columns=["Nation"], sparse=sparse)
|
| 640 |
+
df2 = df.reindex(columns=["GDP"])
|
| 641 |
+
|
| 642 |
+
tm.assert_frame_equal(df[["GDP"]], df2)
|
| 643 |
+
|
| 644 |
+
def test_get_dummies_duplicate_columns(self, df):
|
| 645 |
+
# GH20839
|
| 646 |
+
df.columns = ["A", "A", "A"]
|
| 647 |
+
result = get_dummies(df).sort_index(axis=1)
|
| 648 |
+
|
| 649 |
+
expected = DataFrame(
|
| 650 |
+
[
|
| 651 |
+
[1, True, False, True, False],
|
| 652 |
+
[2, False, True, True, False],
|
| 653 |
+
[3, True, False, False, True],
|
| 654 |
+
],
|
| 655 |
+
columns=["A", "A_a", "A_b", "A_b", "A_c"],
|
| 656 |
+
).sort_index(axis=1)
|
| 657 |
+
|
| 658 |
+
expected = expected.astype({"A": np.int64})
|
| 659 |
+
|
| 660 |
+
tm.assert_frame_equal(result, expected)
|
| 661 |
+
|
| 662 |
+
def test_get_dummies_all_sparse(self):
|
| 663 |
+
df = DataFrame({"A": [1, 2]})
|
| 664 |
+
result = get_dummies(df, columns=["A"], sparse=True)
|
| 665 |
+
dtype = SparseDtype("bool", False)
|
| 666 |
+
expected = DataFrame(
|
| 667 |
+
{
|
| 668 |
+
"A_1": SparseArray([1, 0], dtype=dtype),
|
| 669 |
+
"A_2": SparseArray([0, 1], dtype=dtype),
|
| 670 |
+
}
|
| 671 |
+
)
|
| 672 |
+
tm.assert_frame_equal(result, expected)
|
| 673 |
+
|
| 674 |
+
@pytest.mark.parametrize("values", ["baz"])
|
| 675 |
+
def test_get_dummies_with_string_values(self, values):
|
| 676 |
+
# issue #28383
|
| 677 |
+
df = DataFrame(
|
| 678 |
+
{
|
| 679 |
+
"bar": [1, 2, 3, 4, 5, 6],
|
| 680 |
+
"foo": ["one", "one", "one", "two", "two", "two"],
|
| 681 |
+
"baz": ["A", "B", "C", "A", "B", "C"],
|
| 682 |
+
"zoo": ["x", "y", "z", "q", "w", "t"],
|
| 683 |
+
}
|
| 684 |
+
)
|
| 685 |
+
|
| 686 |
+
msg = "Input must be a list-like for parameter `columns`"
|
| 687 |
+
|
| 688 |
+
with pytest.raises(TypeError, match=msg):
|
| 689 |
+
get_dummies(df, columns=values)
|
| 690 |
+
|
| 691 |
+
def test_get_dummies_ea_dtype_series(self, any_numeric_ea_and_arrow_dtype):
|
| 692 |
+
# GH#32430
|
| 693 |
+
ser = Series(list("abca"))
|
| 694 |
+
result = get_dummies(ser, dtype=any_numeric_ea_and_arrow_dtype)
|
| 695 |
+
expected = DataFrame(
|
| 696 |
+
{"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], "c": [0, 0, 1, 0]},
|
| 697 |
+
dtype=any_numeric_ea_and_arrow_dtype,
|
| 698 |
+
)
|
| 699 |
+
tm.assert_frame_equal(result, expected)
|
| 700 |
+
|
| 701 |
+
def test_get_dummies_ea_dtype_dataframe(self, any_numeric_ea_and_arrow_dtype):
|
| 702 |
+
# GH#32430
|
| 703 |
+
df = DataFrame({"x": list("abca")})
|
| 704 |
+
result = get_dummies(df, dtype=any_numeric_ea_and_arrow_dtype)
|
| 705 |
+
expected = DataFrame(
|
| 706 |
+
{"x_a": [1, 0, 0, 1], "x_b": [0, 1, 0, 0], "x_c": [0, 0, 1, 0]},
|
| 707 |
+
dtype=any_numeric_ea_and_arrow_dtype,
|
| 708 |
+
)
|
| 709 |
+
tm.assert_frame_equal(result, expected)
|
| 710 |
+
|
| 711 |
+
@td.skip_if_no("pyarrow")
|
| 712 |
+
def test_get_dummies_ea_dtype(self):
|
| 713 |
+
# GH#56273
|
| 714 |
+
for dtype, exp_dtype in [
|
| 715 |
+
("string[pyarrow]", "boolean"),
|
| 716 |
+
("string[pyarrow_numpy]", "bool"),
|
| 717 |
+
(CategoricalDtype(Index(["a"], dtype="string[pyarrow]")), "boolean"),
|
| 718 |
+
(CategoricalDtype(Index(["a"], dtype="string[pyarrow_numpy]")), "bool"),
|
| 719 |
+
]:
|
| 720 |
+
df = DataFrame({"name": Series(["a"], dtype=dtype), "x": 1})
|
| 721 |
+
result = get_dummies(df)
|
| 722 |
+
expected = DataFrame({"x": 1, "name_a": Series([True], dtype=exp_dtype)})
|
| 723 |
+
tm.assert_frame_equal(result, expected)
|
| 724 |
+
|
| 725 |
+
@td.skip_if_no("pyarrow")
|
| 726 |
+
def test_get_dummies_arrow_dtype(self):
|
| 727 |
+
# GH#56273
|
| 728 |
+
df = DataFrame({"name": Series(["a"], dtype=ArrowDtype(pa.string())), "x": 1})
|
| 729 |
+
result = get_dummies(df)
|
| 730 |
+
expected = DataFrame({"x": 1, "name_a": Series([True], dtype="bool[pyarrow]")})
|
| 731 |
+
tm.assert_frame_equal(result, expected)
|
| 732 |
+
|
| 733 |
+
df = DataFrame(
|
| 734 |
+
{
|
| 735 |
+
"name": Series(
|
| 736 |
+
["a"],
|
| 737 |
+
dtype=CategoricalDtype(Index(["a"], dtype=ArrowDtype(pa.string()))),
|
| 738 |
+
),
|
| 739 |
+
"x": 1,
|
| 740 |
+
}
|
| 741 |
+
)
|
| 742 |
+
result = get_dummies(df)
|
| 743 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_melt.py
ADDED
|
@@ -0,0 +1,1252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from pandas import (
|
| 8 |
+
DataFrame,
|
| 9 |
+
Index,
|
| 10 |
+
date_range,
|
| 11 |
+
lreshape,
|
| 12 |
+
melt,
|
| 13 |
+
wide_to_long,
|
| 14 |
+
)
|
| 15 |
+
import pandas._testing as tm
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@pytest.fixture
|
| 19 |
+
def df():
|
| 20 |
+
res = DataFrame(
|
| 21 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
| 22 |
+
columns=Index(list("ABCD"), dtype=object),
|
| 23 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
| 24 |
+
)
|
| 25 |
+
res["id1"] = (res["A"] > 0).astype(np.int64)
|
| 26 |
+
res["id2"] = (res["B"] > 0).astype(np.int64)
|
| 27 |
+
return res
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@pytest.fixture
|
| 31 |
+
def df1():
|
| 32 |
+
res = DataFrame(
|
| 33 |
+
[
|
| 34 |
+
[1.067683, -1.110463, 0.20867],
|
| 35 |
+
[-1.321405, 0.368915, -1.055342],
|
| 36 |
+
[-0.807333, 0.08298, -0.873361],
|
| 37 |
+
]
|
| 38 |
+
)
|
| 39 |
+
res.columns = [list("ABC"), list("abc")]
|
| 40 |
+
res.columns.names = ["CAP", "low"]
|
| 41 |
+
return res
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@pytest.fixture
|
| 45 |
+
def var_name():
|
| 46 |
+
return "var"
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@pytest.fixture
|
| 50 |
+
def value_name():
|
| 51 |
+
return "val"
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class TestMelt:
|
| 55 |
+
def test_top_level_method(self, df):
|
| 56 |
+
result = melt(df)
|
| 57 |
+
assert result.columns.tolist() == ["variable", "value"]
|
| 58 |
+
|
| 59 |
+
def test_method_signatures(self, df, df1, var_name, value_name):
|
| 60 |
+
tm.assert_frame_equal(df.melt(), melt(df))
|
| 61 |
+
|
| 62 |
+
tm.assert_frame_equal(
|
| 63 |
+
df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
|
| 64 |
+
melt(df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
tm.assert_frame_equal(
|
| 68 |
+
df.melt(var_name=var_name, value_name=value_name),
|
| 69 |
+
melt(df, var_name=var_name, value_name=value_name),
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
tm.assert_frame_equal(df1.melt(col_level=0), melt(df1, col_level=0))
|
| 73 |
+
|
| 74 |
+
def test_default_col_names(self, df):
|
| 75 |
+
result = df.melt()
|
| 76 |
+
assert result.columns.tolist() == ["variable", "value"]
|
| 77 |
+
|
| 78 |
+
result1 = df.melt(id_vars=["id1"])
|
| 79 |
+
assert result1.columns.tolist() == ["id1", "variable", "value"]
|
| 80 |
+
|
| 81 |
+
result2 = df.melt(id_vars=["id1", "id2"])
|
| 82 |
+
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
|
| 83 |
+
|
| 84 |
+
def test_value_vars(self, df):
|
| 85 |
+
result3 = df.melt(id_vars=["id1", "id2"], value_vars="A")
|
| 86 |
+
assert len(result3) == 10
|
| 87 |
+
|
| 88 |
+
result4 = df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
|
| 89 |
+
expected4 = DataFrame(
|
| 90 |
+
{
|
| 91 |
+
"id1": df["id1"].tolist() * 2,
|
| 92 |
+
"id2": df["id2"].tolist() * 2,
|
| 93 |
+
"variable": ["A"] * 10 + ["B"] * 10,
|
| 94 |
+
"value": (df["A"].tolist() + df["B"].tolist()),
|
| 95 |
+
},
|
| 96 |
+
columns=["id1", "id2", "variable", "value"],
|
| 97 |
+
)
|
| 98 |
+
tm.assert_frame_equal(result4, expected4)
|
| 99 |
+
|
| 100 |
+
@pytest.mark.parametrize("type_", (tuple, list, np.array))
|
| 101 |
+
def test_value_vars_types(self, type_, df):
|
| 102 |
+
# GH 15348
|
| 103 |
+
expected = DataFrame(
|
| 104 |
+
{
|
| 105 |
+
"id1": df["id1"].tolist() * 2,
|
| 106 |
+
"id2": df["id2"].tolist() * 2,
|
| 107 |
+
"variable": ["A"] * 10 + ["B"] * 10,
|
| 108 |
+
"value": (df["A"].tolist() + df["B"].tolist()),
|
| 109 |
+
},
|
| 110 |
+
columns=["id1", "id2", "variable", "value"],
|
| 111 |
+
)
|
| 112 |
+
result = df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B")))
|
| 113 |
+
tm.assert_frame_equal(result, expected)
|
| 114 |
+
|
| 115 |
+
def test_vars_work_with_multiindex(self, df1):
|
| 116 |
+
expected = DataFrame(
|
| 117 |
+
{
|
| 118 |
+
("A", "a"): df1[("A", "a")],
|
| 119 |
+
"CAP": ["B"] * len(df1),
|
| 120 |
+
"low": ["b"] * len(df1),
|
| 121 |
+
"value": df1[("B", "b")],
|
| 122 |
+
},
|
| 123 |
+
columns=[("A", "a"), "CAP", "low", "value"],
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
result = df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")])
|
| 127 |
+
tm.assert_frame_equal(result, expected)
|
| 128 |
+
|
| 129 |
+
@pytest.mark.parametrize(
|
| 130 |
+
"id_vars, value_vars, col_level, expected",
|
| 131 |
+
[
|
| 132 |
+
(
|
| 133 |
+
["A"],
|
| 134 |
+
["B"],
|
| 135 |
+
0,
|
| 136 |
+
DataFrame(
|
| 137 |
+
{
|
| 138 |
+
"A": {0: 1.067683, 1: -1.321405, 2: -0.807333},
|
| 139 |
+
"CAP": {0: "B", 1: "B", 2: "B"},
|
| 140 |
+
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
|
| 141 |
+
}
|
| 142 |
+
),
|
| 143 |
+
),
|
| 144 |
+
(
|
| 145 |
+
["a"],
|
| 146 |
+
["b"],
|
| 147 |
+
1,
|
| 148 |
+
DataFrame(
|
| 149 |
+
{
|
| 150 |
+
"a": {0: 1.067683, 1: -1.321405, 2: -0.807333},
|
| 151 |
+
"low": {0: "b", 1: "b", 2: "b"},
|
| 152 |
+
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
|
| 153 |
+
}
|
| 154 |
+
),
|
| 155 |
+
),
|
| 156 |
+
],
|
| 157 |
+
)
|
| 158 |
+
def test_single_vars_work_with_multiindex(
|
| 159 |
+
self, id_vars, value_vars, col_level, expected, df1
|
| 160 |
+
):
|
| 161 |
+
result = df1.melt(id_vars, value_vars, col_level=col_level)
|
| 162 |
+
tm.assert_frame_equal(result, expected)
|
| 163 |
+
|
| 164 |
+
@pytest.mark.parametrize(
|
| 165 |
+
"id_vars, value_vars",
|
| 166 |
+
[
|
| 167 |
+
[("A", "a"), [("B", "b")]],
|
| 168 |
+
[[("A", "a")], ("B", "b")],
|
| 169 |
+
[("A", "a"), ("B", "b")],
|
| 170 |
+
],
|
| 171 |
+
)
|
| 172 |
+
def test_tuple_vars_fail_with_multiindex(self, id_vars, value_vars, df1):
|
| 173 |
+
# melt should fail with an informative error message if
|
| 174 |
+
# the columns have a MultiIndex and a tuple is passed
|
| 175 |
+
# for id_vars or value_vars.
|
| 176 |
+
msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex"
|
| 177 |
+
with pytest.raises(ValueError, match=msg):
|
| 178 |
+
df1.melt(id_vars=id_vars, value_vars=value_vars)
|
| 179 |
+
|
| 180 |
+
def test_custom_var_name(self, df, var_name):
|
| 181 |
+
result5 = df.melt(var_name=var_name)
|
| 182 |
+
assert result5.columns.tolist() == ["var", "value"]
|
| 183 |
+
|
| 184 |
+
result6 = df.melt(id_vars=["id1"], var_name=var_name)
|
| 185 |
+
assert result6.columns.tolist() == ["id1", "var", "value"]
|
| 186 |
+
|
| 187 |
+
result7 = df.melt(id_vars=["id1", "id2"], var_name=var_name)
|
| 188 |
+
assert result7.columns.tolist() == ["id1", "id2", "var", "value"]
|
| 189 |
+
|
| 190 |
+
result8 = df.melt(id_vars=["id1", "id2"], value_vars="A", var_name=var_name)
|
| 191 |
+
assert result8.columns.tolist() == ["id1", "id2", "var", "value"]
|
| 192 |
+
|
| 193 |
+
result9 = df.melt(
|
| 194 |
+
id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=var_name
|
| 195 |
+
)
|
| 196 |
+
expected9 = DataFrame(
|
| 197 |
+
{
|
| 198 |
+
"id1": df["id1"].tolist() * 2,
|
| 199 |
+
"id2": df["id2"].tolist() * 2,
|
| 200 |
+
var_name: ["A"] * 10 + ["B"] * 10,
|
| 201 |
+
"value": (df["A"].tolist() + df["B"].tolist()),
|
| 202 |
+
},
|
| 203 |
+
columns=["id1", "id2", var_name, "value"],
|
| 204 |
+
)
|
| 205 |
+
tm.assert_frame_equal(result9, expected9)
|
| 206 |
+
|
| 207 |
+
def test_custom_value_name(self, df, value_name):
|
| 208 |
+
result10 = df.melt(value_name=value_name)
|
| 209 |
+
assert result10.columns.tolist() == ["variable", "val"]
|
| 210 |
+
|
| 211 |
+
result11 = df.melt(id_vars=["id1"], value_name=value_name)
|
| 212 |
+
assert result11.columns.tolist() == ["id1", "variable", "val"]
|
| 213 |
+
|
| 214 |
+
result12 = df.melt(id_vars=["id1", "id2"], value_name=value_name)
|
| 215 |
+
assert result12.columns.tolist() == ["id1", "id2", "variable", "val"]
|
| 216 |
+
|
| 217 |
+
result13 = df.melt(
|
| 218 |
+
id_vars=["id1", "id2"], value_vars="A", value_name=value_name
|
| 219 |
+
)
|
| 220 |
+
assert result13.columns.tolist() == ["id1", "id2", "variable", "val"]
|
| 221 |
+
|
| 222 |
+
result14 = df.melt(
|
| 223 |
+
id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=value_name
|
| 224 |
+
)
|
| 225 |
+
expected14 = DataFrame(
|
| 226 |
+
{
|
| 227 |
+
"id1": df["id1"].tolist() * 2,
|
| 228 |
+
"id2": df["id2"].tolist() * 2,
|
| 229 |
+
"variable": ["A"] * 10 + ["B"] * 10,
|
| 230 |
+
value_name: (df["A"].tolist() + df["B"].tolist()),
|
| 231 |
+
},
|
| 232 |
+
columns=["id1", "id2", "variable", value_name],
|
| 233 |
+
)
|
| 234 |
+
tm.assert_frame_equal(result14, expected14)
|
| 235 |
+
|
| 236 |
+
def test_custom_var_and_value_name(self, df, value_name, var_name):
|
| 237 |
+
result15 = df.melt(var_name=var_name, value_name=value_name)
|
| 238 |
+
assert result15.columns.tolist() == ["var", "val"]
|
| 239 |
+
|
| 240 |
+
result16 = df.melt(id_vars=["id1"], var_name=var_name, value_name=value_name)
|
| 241 |
+
assert result16.columns.tolist() == ["id1", "var", "val"]
|
| 242 |
+
|
| 243 |
+
result17 = df.melt(
|
| 244 |
+
id_vars=["id1", "id2"], var_name=var_name, value_name=value_name
|
| 245 |
+
)
|
| 246 |
+
assert result17.columns.tolist() == ["id1", "id2", "var", "val"]
|
| 247 |
+
|
| 248 |
+
result18 = df.melt(
|
| 249 |
+
id_vars=["id1", "id2"],
|
| 250 |
+
value_vars="A",
|
| 251 |
+
var_name=var_name,
|
| 252 |
+
value_name=value_name,
|
| 253 |
+
)
|
| 254 |
+
assert result18.columns.tolist() == ["id1", "id2", "var", "val"]
|
| 255 |
+
|
| 256 |
+
result19 = df.melt(
|
| 257 |
+
id_vars=["id1", "id2"],
|
| 258 |
+
value_vars=["A", "B"],
|
| 259 |
+
var_name=var_name,
|
| 260 |
+
value_name=value_name,
|
| 261 |
+
)
|
| 262 |
+
expected19 = DataFrame(
|
| 263 |
+
{
|
| 264 |
+
"id1": df["id1"].tolist() * 2,
|
| 265 |
+
"id2": df["id2"].tolist() * 2,
|
| 266 |
+
var_name: ["A"] * 10 + ["B"] * 10,
|
| 267 |
+
value_name: (df["A"].tolist() + df["B"].tolist()),
|
| 268 |
+
},
|
| 269 |
+
columns=["id1", "id2", var_name, value_name],
|
| 270 |
+
)
|
| 271 |
+
tm.assert_frame_equal(result19, expected19)
|
| 272 |
+
|
| 273 |
+
df20 = df.copy()
|
| 274 |
+
df20.columns.name = "foo"
|
| 275 |
+
result20 = df20.melt()
|
| 276 |
+
assert result20.columns.tolist() == ["foo", "value"]
|
| 277 |
+
|
| 278 |
+
@pytest.mark.parametrize("col_level", [0, "CAP"])
|
| 279 |
+
def test_col_level(self, col_level, df1):
|
| 280 |
+
res = df1.melt(col_level=col_level)
|
| 281 |
+
assert res.columns.tolist() == ["CAP", "value"]
|
| 282 |
+
|
| 283 |
+
def test_multiindex(self, df1):
|
| 284 |
+
res = df1.melt()
|
| 285 |
+
assert res.columns.tolist() == ["CAP", "low", "value"]
|
| 286 |
+
|
| 287 |
+
@pytest.mark.parametrize(
|
| 288 |
+
"col",
|
| 289 |
+
[
|
| 290 |
+
pd.Series(date_range("2010", periods=5, tz="US/Pacific")),
|
| 291 |
+
pd.Series(["a", "b", "c", "a", "d"], dtype="category"),
|
| 292 |
+
pd.Series([0, 1, 0, 0, 0]),
|
| 293 |
+
],
|
| 294 |
+
)
|
| 295 |
+
def test_pandas_dtypes(self, col):
|
| 296 |
+
# GH 15785
|
| 297 |
+
df = DataFrame(
|
| 298 |
+
{"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col}
|
| 299 |
+
)
|
| 300 |
+
expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True)
|
| 301 |
+
result = melt(
|
| 302 |
+
df, id_vars=["klass", "col"], var_name="attribute", value_name="value"
|
| 303 |
+
)
|
| 304 |
+
expected = DataFrame(
|
| 305 |
+
{
|
| 306 |
+
0: list(range(5)) * 2,
|
| 307 |
+
1: pd.concat([col] * 2, ignore_index=True),
|
| 308 |
+
2: ["attr1"] * 5 + ["attr2"] * 5,
|
| 309 |
+
3: expected_value,
|
| 310 |
+
}
|
| 311 |
+
)
|
| 312 |
+
expected.columns = ["klass", "col", "attribute", "value"]
|
| 313 |
+
tm.assert_frame_equal(result, expected)
|
| 314 |
+
|
| 315 |
+
def test_preserve_category(self):
|
| 316 |
+
# GH 15853
|
| 317 |
+
data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])})
|
| 318 |
+
result = melt(data, ["B"], ["A"])
|
| 319 |
+
expected = DataFrame(
|
| 320 |
+
{"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]}
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
tm.assert_frame_equal(result, expected)
|
| 324 |
+
|
| 325 |
+
def test_melt_missing_columns_raises(self):
|
| 326 |
+
# GH-23575
|
| 327 |
+
# This test is to ensure that pandas raises an error if melting is
|
| 328 |
+
# attempted with column names absent from the dataframe
|
| 329 |
+
|
| 330 |
+
# Generate data
|
| 331 |
+
df = DataFrame(
|
| 332 |
+
np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd")
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
# Try to melt with missing `value_vars` column name
|
| 336 |
+
msg = "The following id_vars or value_vars are not present in the DataFrame:"
|
| 337 |
+
with pytest.raises(KeyError, match=msg):
|
| 338 |
+
df.melt(["a", "b"], ["C", "d"])
|
| 339 |
+
|
| 340 |
+
# Try to melt with missing `id_vars` column name
|
| 341 |
+
with pytest.raises(KeyError, match=msg):
|
| 342 |
+
df.melt(["A", "b"], ["c", "d"])
|
| 343 |
+
|
| 344 |
+
# Multiple missing
|
| 345 |
+
with pytest.raises(
|
| 346 |
+
KeyError,
|
| 347 |
+
match=msg,
|
| 348 |
+
):
|
| 349 |
+
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
|
| 350 |
+
|
| 351 |
+
# Multiindex melt fails if column is missing from multilevel melt
|
| 352 |
+
multi = df.copy()
|
| 353 |
+
multi.columns = [list("ABCD"), list("abcd")]
|
| 354 |
+
with pytest.raises(KeyError, match=msg):
|
| 355 |
+
multi.melt([("E", "a")], [("B", "b")])
|
| 356 |
+
# Multiindex fails if column is missing from single level melt
|
| 357 |
+
with pytest.raises(KeyError, match=msg):
|
| 358 |
+
multi.melt(["A"], ["F"], col_level=0)
|
| 359 |
+
|
| 360 |
+
def test_melt_mixed_int_str_id_vars(self):
|
| 361 |
+
# GH 29718
|
| 362 |
+
df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]})
|
| 363 |
+
result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"])
|
| 364 |
+
expected = DataFrame(
|
| 365 |
+
{0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]}
|
| 366 |
+
)
|
| 367 |
+
tm.assert_frame_equal(result, expected)
|
| 368 |
+
|
| 369 |
+
def test_melt_mixed_int_str_value_vars(self):
|
| 370 |
+
# GH 29718
|
| 371 |
+
df = DataFrame({0: ["foo"], "a": ["bar"]})
|
| 372 |
+
result = melt(df, value_vars=[0, "a"])
|
| 373 |
+
expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]})
|
| 374 |
+
tm.assert_frame_equal(result, expected)
|
| 375 |
+
|
| 376 |
+
def test_ignore_index(self):
|
| 377 |
+
# GH 17440
|
| 378 |
+
df = DataFrame({"foo": [0], "bar": [1]}, index=["first"])
|
| 379 |
+
result = melt(df, ignore_index=False)
|
| 380 |
+
expected = DataFrame(
|
| 381 |
+
{"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"]
|
| 382 |
+
)
|
| 383 |
+
tm.assert_frame_equal(result, expected)
|
| 384 |
+
|
| 385 |
+
def test_ignore_multiindex(self):
|
| 386 |
+
# GH 17440
|
| 387 |
+
index = pd.MultiIndex.from_tuples(
|
| 388 |
+
[("first", "second"), ("first", "third")], names=["baz", "foobar"]
|
| 389 |
+
)
|
| 390 |
+
df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index)
|
| 391 |
+
result = melt(df, ignore_index=False)
|
| 392 |
+
|
| 393 |
+
expected_index = pd.MultiIndex.from_tuples(
|
| 394 |
+
[("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"]
|
| 395 |
+
)
|
| 396 |
+
expected = DataFrame(
|
| 397 |
+
{"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]},
|
| 398 |
+
index=expected_index,
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
tm.assert_frame_equal(result, expected)
|
| 402 |
+
|
| 403 |
+
def test_ignore_index_name_and_type(self):
|
| 404 |
+
# GH 17440
|
| 405 |
+
index = Index(["foo", "bar"], dtype="category", name="baz")
|
| 406 |
+
df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index)
|
| 407 |
+
result = melt(df, ignore_index=False)
|
| 408 |
+
|
| 409 |
+
expected_index = Index(["foo", "bar"] * 2, dtype="category", name="baz")
|
| 410 |
+
expected = DataFrame(
|
| 411 |
+
{"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]},
|
| 412 |
+
index=expected_index,
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
tm.assert_frame_equal(result, expected)
|
| 416 |
+
|
| 417 |
+
def test_melt_with_duplicate_columns(self):
|
| 418 |
+
# GH#41951
|
| 419 |
+
df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"])
|
| 420 |
+
result = df.melt(id_vars=["a"], value_vars=["b"])
|
| 421 |
+
expected = DataFrame(
|
| 422 |
+
[["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"]
|
| 423 |
+
)
|
| 424 |
+
tm.assert_frame_equal(result, expected)
|
| 425 |
+
|
| 426 |
+
@pytest.mark.parametrize("dtype", ["Int8", "Int64"])
|
| 427 |
+
def test_melt_ea_dtype(self, dtype):
|
| 428 |
+
# GH#41570
|
| 429 |
+
df = DataFrame(
|
| 430 |
+
{
|
| 431 |
+
"a": pd.Series([1, 2], dtype="Int8"),
|
| 432 |
+
"b": pd.Series([3, 4], dtype=dtype),
|
| 433 |
+
}
|
| 434 |
+
)
|
| 435 |
+
result = df.melt()
|
| 436 |
+
expected = DataFrame(
|
| 437 |
+
{
|
| 438 |
+
"variable": ["a", "a", "b", "b"],
|
| 439 |
+
"value": pd.Series([1, 2, 3, 4], dtype=dtype),
|
| 440 |
+
}
|
| 441 |
+
)
|
| 442 |
+
tm.assert_frame_equal(result, expected)
|
| 443 |
+
|
| 444 |
+
def test_melt_ea_columns(self):
|
| 445 |
+
# GH 54297
|
| 446 |
+
df = DataFrame(
|
| 447 |
+
{
|
| 448 |
+
"A": {0: "a", 1: "b", 2: "c"},
|
| 449 |
+
"B": {0: 1, 1: 3, 2: 5},
|
| 450 |
+
"C": {0: 2, 1: 4, 2: 6},
|
| 451 |
+
}
|
| 452 |
+
)
|
| 453 |
+
df.columns = df.columns.astype("string[python]")
|
| 454 |
+
result = df.melt(id_vars=["A"], value_vars=["B"])
|
| 455 |
+
expected = DataFrame(
|
| 456 |
+
{
|
| 457 |
+
"A": list("abc"),
|
| 458 |
+
"variable": pd.Series(["B"] * 3, dtype="string[python]"),
|
| 459 |
+
"value": [1, 3, 5],
|
| 460 |
+
}
|
| 461 |
+
)
|
| 462 |
+
tm.assert_frame_equal(result, expected)
|
| 463 |
+
|
| 464 |
+
def test_melt_preserves_datetime(self):
|
| 465 |
+
df = DataFrame(
|
| 466 |
+
data=[
|
| 467 |
+
{
|
| 468 |
+
"type": "A0",
|
| 469 |
+
"start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"),
|
| 470 |
+
"end_date": pd.Timestamp("2023/03/10", tz="Asia/Tokyo"),
|
| 471 |
+
},
|
| 472 |
+
{
|
| 473 |
+
"type": "A1",
|
| 474 |
+
"start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"),
|
| 475 |
+
"end_date": pd.Timestamp("2023/03/11", tz="Asia/Tokyo"),
|
| 476 |
+
},
|
| 477 |
+
],
|
| 478 |
+
index=["aaaa", "bbbb"],
|
| 479 |
+
)
|
| 480 |
+
result = df.melt(
|
| 481 |
+
id_vars=["type"],
|
| 482 |
+
value_vars=["start_date", "end_date"],
|
| 483 |
+
var_name="start/end",
|
| 484 |
+
value_name="date",
|
| 485 |
+
)
|
| 486 |
+
expected = DataFrame(
|
| 487 |
+
{
|
| 488 |
+
"type": {0: "A0", 1: "A1", 2: "A0", 3: "A1"},
|
| 489 |
+
"start/end": {
|
| 490 |
+
0: "start_date",
|
| 491 |
+
1: "start_date",
|
| 492 |
+
2: "end_date",
|
| 493 |
+
3: "end_date",
|
| 494 |
+
},
|
| 495 |
+
"date": {
|
| 496 |
+
0: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"),
|
| 497 |
+
1: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"),
|
| 498 |
+
2: pd.Timestamp("2023-03-10 00:00:00+0900", tz="Asia/Tokyo"),
|
| 499 |
+
3: pd.Timestamp("2023-03-11 00:00:00+0900", tz="Asia/Tokyo"),
|
| 500 |
+
},
|
| 501 |
+
}
|
| 502 |
+
)
|
| 503 |
+
tm.assert_frame_equal(result, expected)
|
| 504 |
+
|
| 505 |
+
def test_melt_allows_non_scalar_id_vars(self):
|
| 506 |
+
df = DataFrame(
|
| 507 |
+
data={"a": [1, 2, 3], "b": [4, 5, 6]},
|
| 508 |
+
index=["11", "22", "33"],
|
| 509 |
+
)
|
| 510 |
+
result = df.melt(
|
| 511 |
+
id_vars="a",
|
| 512 |
+
var_name=0,
|
| 513 |
+
value_name=1,
|
| 514 |
+
)
|
| 515 |
+
expected = DataFrame({"a": [1, 2, 3], 0: ["b"] * 3, 1: [4, 5, 6]})
|
| 516 |
+
tm.assert_frame_equal(result, expected)
|
| 517 |
+
|
| 518 |
+
def test_melt_allows_non_string_var_name(self):
|
| 519 |
+
df = DataFrame(
|
| 520 |
+
data={"a": [1, 2, 3], "b": [4, 5, 6]},
|
| 521 |
+
index=["11", "22", "33"],
|
| 522 |
+
)
|
| 523 |
+
result = df.melt(
|
| 524 |
+
id_vars=["a"],
|
| 525 |
+
var_name=0,
|
| 526 |
+
value_name=1,
|
| 527 |
+
)
|
| 528 |
+
expected = DataFrame({"a": [1, 2, 3], 0: ["b"] * 3, 1: [4, 5, 6]})
|
| 529 |
+
tm.assert_frame_equal(result, expected)
|
| 530 |
+
|
| 531 |
+
def test_melt_non_scalar_var_name_raises(self):
|
| 532 |
+
df = DataFrame(
|
| 533 |
+
data={"a": [1, 2, 3], "b": [4, 5, 6]},
|
| 534 |
+
index=["11", "22", "33"],
|
| 535 |
+
)
|
| 536 |
+
with pytest.raises(ValueError, match=r".* must be a scalar."):
|
| 537 |
+
df.melt(id_vars=["a"], var_name=[1, 2])
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
class TestLreshape:
|
| 541 |
+
def test_pairs(self):
|
| 542 |
+
data = {
|
| 543 |
+
"birthdt": [
|
| 544 |
+
"08jan2009",
|
| 545 |
+
"20dec2008",
|
| 546 |
+
"30dec2008",
|
| 547 |
+
"21dec2008",
|
| 548 |
+
"11jan2009",
|
| 549 |
+
],
|
| 550 |
+
"birthwt": [1766, 3301, 1454, 3139, 4133],
|
| 551 |
+
"id": [101, 102, 103, 104, 105],
|
| 552 |
+
"sex": ["Male", "Female", "Female", "Female", "Female"],
|
| 553 |
+
"visitdt1": [
|
| 554 |
+
"11jan2009",
|
| 555 |
+
"22dec2008",
|
| 556 |
+
"04jan2009",
|
| 557 |
+
"29dec2008",
|
| 558 |
+
"20jan2009",
|
| 559 |
+
],
|
| 560 |
+
"visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"],
|
| 561 |
+
"visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"],
|
| 562 |
+
"wt1": [1823, 3338, 1549, 3298, 4306],
|
| 563 |
+
"wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0],
|
| 564 |
+
"wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0],
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
df = DataFrame(data)
|
| 568 |
+
|
| 569 |
+
spec = {
|
| 570 |
+
"visitdt": [f"visitdt{i:d}" for i in range(1, 4)],
|
| 571 |
+
"wt": [f"wt{i:d}" for i in range(1, 4)],
|
| 572 |
+
}
|
| 573 |
+
result = lreshape(df, spec)
|
| 574 |
+
|
| 575 |
+
exp_data = {
|
| 576 |
+
"birthdt": [
|
| 577 |
+
"08jan2009",
|
| 578 |
+
"20dec2008",
|
| 579 |
+
"30dec2008",
|
| 580 |
+
"21dec2008",
|
| 581 |
+
"11jan2009",
|
| 582 |
+
"08jan2009",
|
| 583 |
+
"30dec2008",
|
| 584 |
+
"21dec2008",
|
| 585 |
+
"11jan2009",
|
| 586 |
+
"08jan2009",
|
| 587 |
+
"21dec2008",
|
| 588 |
+
"11jan2009",
|
| 589 |
+
],
|
| 590 |
+
"birthwt": [
|
| 591 |
+
1766,
|
| 592 |
+
3301,
|
| 593 |
+
1454,
|
| 594 |
+
3139,
|
| 595 |
+
4133,
|
| 596 |
+
1766,
|
| 597 |
+
1454,
|
| 598 |
+
3139,
|
| 599 |
+
4133,
|
| 600 |
+
1766,
|
| 601 |
+
3139,
|
| 602 |
+
4133,
|
| 603 |
+
],
|
| 604 |
+
"id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105],
|
| 605 |
+
"sex": [
|
| 606 |
+
"Male",
|
| 607 |
+
"Female",
|
| 608 |
+
"Female",
|
| 609 |
+
"Female",
|
| 610 |
+
"Female",
|
| 611 |
+
"Male",
|
| 612 |
+
"Female",
|
| 613 |
+
"Female",
|
| 614 |
+
"Female",
|
| 615 |
+
"Male",
|
| 616 |
+
"Female",
|
| 617 |
+
"Female",
|
| 618 |
+
],
|
| 619 |
+
"visitdt": [
|
| 620 |
+
"11jan2009",
|
| 621 |
+
"22dec2008",
|
| 622 |
+
"04jan2009",
|
| 623 |
+
"29dec2008",
|
| 624 |
+
"20jan2009",
|
| 625 |
+
"21jan2009",
|
| 626 |
+
"22jan2009",
|
| 627 |
+
"31dec2008",
|
| 628 |
+
"03feb2009",
|
| 629 |
+
"05feb2009",
|
| 630 |
+
"02jan2009",
|
| 631 |
+
"15feb2009",
|
| 632 |
+
],
|
| 633 |
+
"wt": [
|
| 634 |
+
1823.0,
|
| 635 |
+
3338.0,
|
| 636 |
+
1549.0,
|
| 637 |
+
3298.0,
|
| 638 |
+
4306.0,
|
| 639 |
+
2011.0,
|
| 640 |
+
1892.0,
|
| 641 |
+
3338.0,
|
| 642 |
+
4575.0,
|
| 643 |
+
2293.0,
|
| 644 |
+
3377.0,
|
| 645 |
+
4805.0,
|
| 646 |
+
],
|
| 647 |
+
}
|
| 648 |
+
exp = DataFrame(exp_data, columns=result.columns)
|
| 649 |
+
tm.assert_frame_equal(result, exp)
|
| 650 |
+
|
| 651 |
+
result = lreshape(df, spec, dropna=False)
|
| 652 |
+
exp_data = {
|
| 653 |
+
"birthdt": [
|
| 654 |
+
"08jan2009",
|
| 655 |
+
"20dec2008",
|
| 656 |
+
"30dec2008",
|
| 657 |
+
"21dec2008",
|
| 658 |
+
"11jan2009",
|
| 659 |
+
"08jan2009",
|
| 660 |
+
"20dec2008",
|
| 661 |
+
"30dec2008",
|
| 662 |
+
"21dec2008",
|
| 663 |
+
"11jan2009",
|
| 664 |
+
"08jan2009",
|
| 665 |
+
"20dec2008",
|
| 666 |
+
"30dec2008",
|
| 667 |
+
"21dec2008",
|
| 668 |
+
"11jan2009",
|
| 669 |
+
],
|
| 670 |
+
"birthwt": [
|
| 671 |
+
1766,
|
| 672 |
+
3301,
|
| 673 |
+
1454,
|
| 674 |
+
3139,
|
| 675 |
+
4133,
|
| 676 |
+
1766,
|
| 677 |
+
3301,
|
| 678 |
+
1454,
|
| 679 |
+
3139,
|
| 680 |
+
4133,
|
| 681 |
+
1766,
|
| 682 |
+
3301,
|
| 683 |
+
1454,
|
| 684 |
+
3139,
|
| 685 |
+
4133,
|
| 686 |
+
],
|
| 687 |
+
"id": [
|
| 688 |
+
101,
|
| 689 |
+
102,
|
| 690 |
+
103,
|
| 691 |
+
104,
|
| 692 |
+
105,
|
| 693 |
+
101,
|
| 694 |
+
102,
|
| 695 |
+
103,
|
| 696 |
+
104,
|
| 697 |
+
105,
|
| 698 |
+
101,
|
| 699 |
+
102,
|
| 700 |
+
103,
|
| 701 |
+
104,
|
| 702 |
+
105,
|
| 703 |
+
],
|
| 704 |
+
"sex": [
|
| 705 |
+
"Male",
|
| 706 |
+
"Female",
|
| 707 |
+
"Female",
|
| 708 |
+
"Female",
|
| 709 |
+
"Female",
|
| 710 |
+
"Male",
|
| 711 |
+
"Female",
|
| 712 |
+
"Female",
|
| 713 |
+
"Female",
|
| 714 |
+
"Female",
|
| 715 |
+
"Male",
|
| 716 |
+
"Female",
|
| 717 |
+
"Female",
|
| 718 |
+
"Female",
|
| 719 |
+
"Female",
|
| 720 |
+
],
|
| 721 |
+
"visitdt": [
|
| 722 |
+
"11jan2009",
|
| 723 |
+
"22dec2008",
|
| 724 |
+
"04jan2009",
|
| 725 |
+
"29dec2008",
|
| 726 |
+
"20jan2009",
|
| 727 |
+
"21jan2009",
|
| 728 |
+
np.nan,
|
| 729 |
+
"22jan2009",
|
| 730 |
+
"31dec2008",
|
| 731 |
+
"03feb2009",
|
| 732 |
+
"05feb2009",
|
| 733 |
+
np.nan,
|
| 734 |
+
np.nan,
|
| 735 |
+
"02jan2009",
|
| 736 |
+
"15feb2009",
|
| 737 |
+
],
|
| 738 |
+
"wt": [
|
| 739 |
+
1823.0,
|
| 740 |
+
3338.0,
|
| 741 |
+
1549.0,
|
| 742 |
+
3298.0,
|
| 743 |
+
4306.0,
|
| 744 |
+
2011.0,
|
| 745 |
+
np.nan,
|
| 746 |
+
1892.0,
|
| 747 |
+
3338.0,
|
| 748 |
+
4575.0,
|
| 749 |
+
2293.0,
|
| 750 |
+
np.nan,
|
| 751 |
+
np.nan,
|
| 752 |
+
3377.0,
|
| 753 |
+
4805.0,
|
| 754 |
+
],
|
| 755 |
+
}
|
| 756 |
+
exp = DataFrame(exp_data, columns=result.columns)
|
| 757 |
+
tm.assert_frame_equal(result, exp)
|
| 758 |
+
|
| 759 |
+
spec = {
|
| 760 |
+
"visitdt": [f"visitdt{i:d}" for i in range(1, 3)],
|
| 761 |
+
"wt": [f"wt{i:d}" for i in range(1, 4)],
|
| 762 |
+
}
|
| 763 |
+
msg = "All column lists must be same length"
|
| 764 |
+
with pytest.raises(ValueError, match=msg):
|
| 765 |
+
lreshape(df, spec)
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
class TestWideToLong:
|
| 769 |
+
def test_simple(self):
|
| 770 |
+
x = np.random.default_rng(2).standard_normal(3)
|
| 771 |
+
df = DataFrame(
|
| 772 |
+
{
|
| 773 |
+
"A1970": {0: "a", 1: "b", 2: "c"},
|
| 774 |
+
"A1980": {0: "d", 1: "e", 2: "f"},
|
| 775 |
+
"B1970": {0: 2.5, 1: 1.2, 2: 0.7},
|
| 776 |
+
"B1980": {0: 3.2, 1: 1.3, 2: 0.1},
|
| 777 |
+
"X": dict(zip(range(3), x)),
|
| 778 |
+
}
|
| 779 |
+
)
|
| 780 |
+
df["id"] = df.index
|
| 781 |
+
exp_data = {
|
| 782 |
+
"X": x.tolist() + x.tolist(),
|
| 783 |
+
"A": ["a", "b", "c", "d", "e", "f"],
|
| 784 |
+
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
|
| 785 |
+
"year": [1970, 1970, 1970, 1980, 1980, 1980],
|
| 786 |
+
"id": [0, 1, 2, 0, 1, 2],
|
| 787 |
+
}
|
| 788 |
+
expected = DataFrame(exp_data)
|
| 789 |
+
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
|
| 790 |
+
result = wide_to_long(df, ["A", "B"], i="id", j="year")
|
| 791 |
+
tm.assert_frame_equal(result, expected)
|
| 792 |
+
|
| 793 |
+
def test_stubs(self):
|
| 794 |
+
# GH9204 wide_to_long call should not modify 'stubs' list
|
| 795 |
+
df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
|
| 796 |
+
df.columns = ["id", "inc1", "inc2", "edu1", "edu2"]
|
| 797 |
+
stubs = ["inc", "edu"]
|
| 798 |
+
|
| 799 |
+
wide_to_long(df, stubs, i="id", j="age")
|
| 800 |
+
|
| 801 |
+
assert stubs == ["inc", "edu"]
|
| 802 |
+
|
| 803 |
+
def test_separating_character(self):
|
| 804 |
+
# GH14779
|
| 805 |
+
|
| 806 |
+
x = np.random.default_rng(2).standard_normal(3)
|
| 807 |
+
df = DataFrame(
|
| 808 |
+
{
|
| 809 |
+
"A.1970": {0: "a", 1: "b", 2: "c"},
|
| 810 |
+
"A.1980": {0: "d", 1: "e", 2: "f"},
|
| 811 |
+
"B.1970": {0: 2.5, 1: 1.2, 2: 0.7},
|
| 812 |
+
"B.1980": {0: 3.2, 1: 1.3, 2: 0.1},
|
| 813 |
+
"X": dict(zip(range(3), x)),
|
| 814 |
+
}
|
| 815 |
+
)
|
| 816 |
+
df["id"] = df.index
|
| 817 |
+
exp_data = {
|
| 818 |
+
"X": x.tolist() + x.tolist(),
|
| 819 |
+
"A": ["a", "b", "c", "d", "e", "f"],
|
| 820 |
+
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
|
| 821 |
+
"year": [1970, 1970, 1970, 1980, 1980, 1980],
|
| 822 |
+
"id": [0, 1, 2, 0, 1, 2],
|
| 823 |
+
}
|
| 824 |
+
expected = DataFrame(exp_data)
|
| 825 |
+
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
|
| 826 |
+
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
|
| 827 |
+
tm.assert_frame_equal(result, expected)
|
| 828 |
+
|
| 829 |
+
def test_escapable_characters(self):
|
| 830 |
+
x = np.random.default_rng(2).standard_normal(3)
|
| 831 |
+
df = DataFrame(
|
| 832 |
+
{
|
| 833 |
+
"A(quarterly)1970": {0: "a", 1: "b", 2: "c"},
|
| 834 |
+
"A(quarterly)1980": {0: "d", 1: "e", 2: "f"},
|
| 835 |
+
"B(quarterly)1970": {0: 2.5, 1: 1.2, 2: 0.7},
|
| 836 |
+
"B(quarterly)1980": {0: 3.2, 1: 1.3, 2: 0.1},
|
| 837 |
+
"X": dict(zip(range(3), x)),
|
| 838 |
+
}
|
| 839 |
+
)
|
| 840 |
+
df["id"] = df.index
|
| 841 |
+
exp_data = {
|
| 842 |
+
"X": x.tolist() + x.tolist(),
|
| 843 |
+
"A(quarterly)": ["a", "b", "c", "d", "e", "f"],
|
| 844 |
+
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
|
| 845 |
+
"year": [1970, 1970, 1970, 1980, 1980, 1980],
|
| 846 |
+
"id": [0, 1, 2, 0, 1, 2],
|
| 847 |
+
}
|
| 848 |
+
expected = DataFrame(exp_data)
|
| 849 |
+
expected = expected.set_index(["id", "year"])[
|
| 850 |
+
["X", "A(quarterly)", "B(quarterly)"]
|
| 851 |
+
]
|
| 852 |
+
result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"], i="id", j="year")
|
| 853 |
+
tm.assert_frame_equal(result, expected)
|
| 854 |
+
|
| 855 |
+
def test_unbalanced(self):
|
| 856 |
+
# test that we can have a varying amount of time variables
|
| 857 |
+
df = DataFrame(
|
| 858 |
+
{
|
| 859 |
+
"A2010": [1.0, 2.0],
|
| 860 |
+
"A2011": [3.0, 4.0],
|
| 861 |
+
"B2010": [5.0, 6.0],
|
| 862 |
+
"X": ["X1", "X2"],
|
| 863 |
+
}
|
| 864 |
+
)
|
| 865 |
+
df["id"] = df.index
|
| 866 |
+
exp_data = {
|
| 867 |
+
"X": ["X1", "X2", "X1", "X2"],
|
| 868 |
+
"A": [1.0, 2.0, 3.0, 4.0],
|
| 869 |
+
"B": [5.0, 6.0, np.nan, np.nan],
|
| 870 |
+
"id": [0, 1, 0, 1],
|
| 871 |
+
"year": [2010, 2010, 2011, 2011],
|
| 872 |
+
}
|
| 873 |
+
expected = DataFrame(exp_data)
|
| 874 |
+
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
|
| 875 |
+
result = wide_to_long(df, ["A", "B"], i="id", j="year")
|
| 876 |
+
tm.assert_frame_equal(result, expected)
|
| 877 |
+
|
| 878 |
+
def test_character_overlap(self):
|
| 879 |
+
# Test we handle overlapping characters in both id_vars and value_vars
|
| 880 |
+
df = DataFrame(
|
| 881 |
+
{
|
| 882 |
+
"A11": ["a11", "a22", "a33"],
|
| 883 |
+
"A12": ["a21", "a22", "a23"],
|
| 884 |
+
"B11": ["b11", "b12", "b13"],
|
| 885 |
+
"B12": ["b21", "b22", "b23"],
|
| 886 |
+
"BB11": [1, 2, 3],
|
| 887 |
+
"BB12": [4, 5, 6],
|
| 888 |
+
"BBBX": [91, 92, 93],
|
| 889 |
+
"BBBZ": [91, 92, 93],
|
| 890 |
+
}
|
| 891 |
+
)
|
| 892 |
+
df["id"] = df.index
|
| 893 |
+
expected = DataFrame(
|
| 894 |
+
{
|
| 895 |
+
"BBBX": [91, 92, 93, 91, 92, 93],
|
| 896 |
+
"BBBZ": [91, 92, 93, 91, 92, 93],
|
| 897 |
+
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
|
| 898 |
+
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
|
| 899 |
+
"BB": [1, 2, 3, 4, 5, 6],
|
| 900 |
+
"id": [0, 1, 2, 0, 1, 2],
|
| 901 |
+
"year": [11, 11, 11, 12, 12, 12],
|
| 902 |
+
}
|
| 903 |
+
)
|
| 904 |
+
expected = expected.set_index(["id", "year"])[["BBBX", "BBBZ", "A", "B", "BB"]]
|
| 905 |
+
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
|
| 906 |
+
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
|
| 907 |
+
|
| 908 |
+
def test_invalid_separator(self):
|
| 909 |
+
# if an invalid separator is supplied a empty data frame is returned
|
| 910 |
+
sep = "nope!"
|
| 911 |
+
df = DataFrame(
|
| 912 |
+
{
|
| 913 |
+
"A2010": [1.0, 2.0],
|
| 914 |
+
"A2011": [3.0, 4.0],
|
| 915 |
+
"B2010": [5.0, 6.0],
|
| 916 |
+
"X": ["X1", "X2"],
|
| 917 |
+
}
|
| 918 |
+
)
|
| 919 |
+
df["id"] = df.index
|
| 920 |
+
exp_data = {
|
| 921 |
+
"X": "",
|
| 922 |
+
"A2010": [],
|
| 923 |
+
"A2011": [],
|
| 924 |
+
"B2010": [],
|
| 925 |
+
"id": [],
|
| 926 |
+
"year": [],
|
| 927 |
+
"A": [],
|
| 928 |
+
"B": [],
|
| 929 |
+
}
|
| 930 |
+
expected = DataFrame(exp_data).astype({"year": np.int64})
|
| 931 |
+
expected = expected.set_index(["id", "year"])[
|
| 932 |
+
["X", "A2010", "A2011", "B2010", "A", "B"]
|
| 933 |
+
]
|
| 934 |
+
expected.index = expected.index.set_levels([0, 1], level=0)
|
| 935 |
+
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep)
|
| 936 |
+
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
|
| 937 |
+
|
| 938 |
+
def test_num_string_disambiguation(self):
|
| 939 |
+
# Test that we can disambiguate number value_vars from
|
| 940 |
+
# string value_vars
|
| 941 |
+
df = DataFrame(
|
| 942 |
+
{
|
| 943 |
+
"A11": ["a11", "a22", "a33"],
|
| 944 |
+
"A12": ["a21", "a22", "a23"],
|
| 945 |
+
"B11": ["b11", "b12", "b13"],
|
| 946 |
+
"B12": ["b21", "b22", "b23"],
|
| 947 |
+
"BB11": [1, 2, 3],
|
| 948 |
+
"BB12": [4, 5, 6],
|
| 949 |
+
"Arating": [91, 92, 93],
|
| 950 |
+
"Arating_old": [91, 92, 93],
|
| 951 |
+
}
|
| 952 |
+
)
|
| 953 |
+
df["id"] = df.index
|
| 954 |
+
expected = DataFrame(
|
| 955 |
+
{
|
| 956 |
+
"Arating": [91, 92, 93, 91, 92, 93],
|
| 957 |
+
"Arating_old": [91, 92, 93, 91, 92, 93],
|
| 958 |
+
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
|
| 959 |
+
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
|
| 960 |
+
"BB": [1, 2, 3, 4, 5, 6],
|
| 961 |
+
"id": [0, 1, 2, 0, 1, 2],
|
| 962 |
+
"year": [11, 11, 11, 12, 12, 12],
|
| 963 |
+
}
|
| 964 |
+
)
|
| 965 |
+
expected = expected.set_index(["id", "year"])[
|
| 966 |
+
["Arating", "Arating_old", "A", "B", "BB"]
|
| 967 |
+
]
|
| 968 |
+
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
|
| 969 |
+
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
|
| 970 |
+
|
| 971 |
+
def test_invalid_suffixtype(self):
|
| 972 |
+
# If all stubs names end with a string, but a numeric suffix is
|
| 973 |
+
# assumed, an empty data frame is returned
|
| 974 |
+
df = DataFrame(
|
| 975 |
+
{
|
| 976 |
+
"Aone": [1.0, 2.0],
|
| 977 |
+
"Atwo": [3.0, 4.0],
|
| 978 |
+
"Bone": [5.0, 6.0],
|
| 979 |
+
"X": ["X1", "X2"],
|
| 980 |
+
}
|
| 981 |
+
)
|
| 982 |
+
df["id"] = df.index
|
| 983 |
+
exp_data = {
|
| 984 |
+
"X": "",
|
| 985 |
+
"Aone": [],
|
| 986 |
+
"Atwo": [],
|
| 987 |
+
"Bone": [],
|
| 988 |
+
"id": [],
|
| 989 |
+
"year": [],
|
| 990 |
+
"A": [],
|
| 991 |
+
"B": [],
|
| 992 |
+
}
|
| 993 |
+
expected = DataFrame(exp_data).astype({"year": np.int64})
|
| 994 |
+
|
| 995 |
+
expected = expected.set_index(["id", "year"])
|
| 996 |
+
expected.index = expected.index.set_levels([0, 1], level=0)
|
| 997 |
+
result = wide_to_long(df, ["A", "B"], i="id", j="year")
|
| 998 |
+
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
|
| 999 |
+
|
| 1000 |
+
def test_multiple_id_columns(self):
|
| 1001 |
+
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
|
| 1002 |
+
df = DataFrame(
|
| 1003 |
+
{
|
| 1004 |
+
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
|
| 1005 |
+
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
|
| 1006 |
+
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
|
| 1007 |
+
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
|
| 1008 |
+
}
|
| 1009 |
+
)
|
| 1010 |
+
expected = DataFrame(
|
| 1011 |
+
{
|
| 1012 |
+
"ht": [
|
| 1013 |
+
2.8,
|
| 1014 |
+
3.4,
|
| 1015 |
+
2.9,
|
| 1016 |
+
3.8,
|
| 1017 |
+
2.2,
|
| 1018 |
+
2.9,
|
| 1019 |
+
2.0,
|
| 1020 |
+
3.2,
|
| 1021 |
+
1.8,
|
| 1022 |
+
2.8,
|
| 1023 |
+
1.9,
|
| 1024 |
+
2.4,
|
| 1025 |
+
2.2,
|
| 1026 |
+
3.3,
|
| 1027 |
+
2.3,
|
| 1028 |
+
3.4,
|
| 1029 |
+
2.1,
|
| 1030 |
+
2.9,
|
| 1031 |
+
],
|
| 1032 |
+
"famid": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
|
| 1033 |
+
"birth": [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
|
| 1034 |
+
"age": [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
|
| 1035 |
+
}
|
| 1036 |
+
)
|
| 1037 |
+
expected = expected.set_index(["famid", "birth", "age"])[["ht"]]
|
| 1038 |
+
result = wide_to_long(df, "ht", i=["famid", "birth"], j="age")
|
| 1039 |
+
tm.assert_frame_equal(result, expected)
|
| 1040 |
+
|
| 1041 |
+
def test_non_unique_idvars(self):
|
| 1042 |
+
# GH16382
|
| 1043 |
+
# Raise an error message if non unique id vars (i) are passed
|
| 1044 |
+
df = DataFrame(
|
| 1045 |
+
{"A_A1": [1, 2, 3, 4, 5], "B_B1": [1, 2, 3, 4, 5], "x": [1, 1, 1, 1, 1]}
|
| 1046 |
+
)
|
| 1047 |
+
msg = "the id variables need to uniquely identify each row"
|
| 1048 |
+
with pytest.raises(ValueError, match=msg):
|
| 1049 |
+
wide_to_long(df, ["A_A", "B_B"], i="x", j="colname")
|
| 1050 |
+
|
| 1051 |
+
def test_cast_j_int(self):
|
| 1052 |
+
df = DataFrame(
|
| 1053 |
+
{
|
| 1054 |
+
"actor_1": ["CCH Pounder", "Johnny Depp", "Christoph Waltz"],
|
| 1055 |
+
"actor_2": ["Joel David Moore", "Orlando Bloom", "Rory Kinnear"],
|
| 1056 |
+
"actor_fb_likes_1": [1000.0, 40000.0, 11000.0],
|
| 1057 |
+
"actor_fb_likes_2": [936.0, 5000.0, 393.0],
|
| 1058 |
+
"title": ["Avatar", "Pirates of the Caribbean", "Spectre"],
|
| 1059 |
+
}
|
| 1060 |
+
)
|
| 1061 |
+
|
| 1062 |
+
expected = DataFrame(
|
| 1063 |
+
{
|
| 1064 |
+
"actor": [
|
| 1065 |
+
"CCH Pounder",
|
| 1066 |
+
"Johnny Depp",
|
| 1067 |
+
"Christoph Waltz",
|
| 1068 |
+
"Joel David Moore",
|
| 1069 |
+
"Orlando Bloom",
|
| 1070 |
+
"Rory Kinnear",
|
| 1071 |
+
],
|
| 1072 |
+
"actor_fb_likes": [1000.0, 40000.0, 11000.0, 936.0, 5000.0, 393.0],
|
| 1073 |
+
"num": [1, 1, 1, 2, 2, 2],
|
| 1074 |
+
"title": [
|
| 1075 |
+
"Avatar",
|
| 1076 |
+
"Pirates of the Caribbean",
|
| 1077 |
+
"Spectre",
|
| 1078 |
+
"Avatar",
|
| 1079 |
+
"Pirates of the Caribbean",
|
| 1080 |
+
"Spectre",
|
| 1081 |
+
],
|
| 1082 |
+
}
|
| 1083 |
+
).set_index(["title", "num"])
|
| 1084 |
+
result = wide_to_long(
|
| 1085 |
+
df, ["actor", "actor_fb_likes"], i="title", j="num", sep="_"
|
| 1086 |
+
)
|
| 1087 |
+
|
| 1088 |
+
tm.assert_frame_equal(result, expected)
|
| 1089 |
+
|
| 1090 |
+
def test_identical_stubnames(self):
|
| 1091 |
+
df = DataFrame(
|
| 1092 |
+
{
|
| 1093 |
+
"A2010": [1.0, 2.0],
|
| 1094 |
+
"A2011": [3.0, 4.0],
|
| 1095 |
+
"B2010": [5.0, 6.0],
|
| 1096 |
+
"A": ["X1", "X2"],
|
| 1097 |
+
}
|
| 1098 |
+
)
|
| 1099 |
+
msg = "stubname can't be identical to a column name"
|
| 1100 |
+
with pytest.raises(ValueError, match=msg):
|
| 1101 |
+
wide_to_long(df, ["A", "B"], i="A", j="colname")
|
| 1102 |
+
|
| 1103 |
+
def test_nonnumeric_suffix(self):
|
| 1104 |
+
df = DataFrame(
|
| 1105 |
+
{
|
| 1106 |
+
"treatment_placebo": [1.0, 2.0],
|
| 1107 |
+
"treatment_test": [3.0, 4.0],
|
| 1108 |
+
"result_placebo": [5.0, 6.0],
|
| 1109 |
+
"A": ["X1", "X2"],
|
| 1110 |
+
}
|
| 1111 |
+
)
|
| 1112 |
+
expected = DataFrame(
|
| 1113 |
+
{
|
| 1114 |
+
"A": ["X1", "X2", "X1", "X2"],
|
| 1115 |
+
"colname": ["placebo", "placebo", "test", "test"],
|
| 1116 |
+
"result": [5.0, 6.0, np.nan, np.nan],
|
| 1117 |
+
"treatment": [1.0, 2.0, 3.0, 4.0],
|
| 1118 |
+
}
|
| 1119 |
+
)
|
| 1120 |
+
expected = expected.set_index(["A", "colname"])
|
| 1121 |
+
result = wide_to_long(
|
| 1122 |
+
df, ["result", "treatment"], i="A", j="colname", suffix="[a-z]+", sep="_"
|
| 1123 |
+
)
|
| 1124 |
+
tm.assert_frame_equal(result, expected)
|
| 1125 |
+
|
| 1126 |
+
def test_mixed_type_suffix(self):
|
| 1127 |
+
df = DataFrame(
|
| 1128 |
+
{
|
| 1129 |
+
"A": ["X1", "X2"],
|
| 1130 |
+
"result_1": [0, 9],
|
| 1131 |
+
"result_foo": [5.0, 6.0],
|
| 1132 |
+
"treatment_1": [1.0, 2.0],
|
| 1133 |
+
"treatment_foo": [3.0, 4.0],
|
| 1134 |
+
}
|
| 1135 |
+
)
|
| 1136 |
+
expected = DataFrame(
|
| 1137 |
+
{
|
| 1138 |
+
"A": ["X1", "X2", "X1", "X2"],
|
| 1139 |
+
"colname": ["1", "1", "foo", "foo"],
|
| 1140 |
+
"result": [0.0, 9.0, 5.0, 6.0],
|
| 1141 |
+
"treatment": [1.0, 2.0, 3.0, 4.0],
|
| 1142 |
+
}
|
| 1143 |
+
).set_index(["A", "colname"])
|
| 1144 |
+
result = wide_to_long(
|
| 1145 |
+
df, ["result", "treatment"], i="A", j="colname", suffix=".+", sep="_"
|
| 1146 |
+
)
|
| 1147 |
+
tm.assert_frame_equal(result, expected)
|
| 1148 |
+
|
| 1149 |
+
def test_float_suffix(self):
|
| 1150 |
+
df = DataFrame(
|
| 1151 |
+
{
|
| 1152 |
+
"treatment_1.1": [1.0, 2.0],
|
| 1153 |
+
"treatment_2.1": [3.0, 4.0],
|
| 1154 |
+
"result_1.2": [5.0, 6.0],
|
| 1155 |
+
"result_1": [0, 9],
|
| 1156 |
+
"A": ["X1", "X2"],
|
| 1157 |
+
}
|
| 1158 |
+
)
|
| 1159 |
+
expected = DataFrame(
|
| 1160 |
+
{
|
| 1161 |
+
"A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"],
|
| 1162 |
+
"colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1],
|
| 1163 |
+
"result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan],
|
| 1164 |
+
"treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0],
|
| 1165 |
+
}
|
| 1166 |
+
)
|
| 1167 |
+
expected = expected.set_index(["A", "colname"])
|
| 1168 |
+
result = wide_to_long(
|
| 1169 |
+
df, ["result", "treatment"], i="A", j="colname", suffix="[0-9.]+", sep="_"
|
| 1170 |
+
)
|
| 1171 |
+
tm.assert_frame_equal(result, expected)
|
| 1172 |
+
|
| 1173 |
+
def test_col_substring_of_stubname(self):
|
| 1174 |
+
# GH22468
|
| 1175 |
+
# Don't raise ValueError when a column name is a substring
|
| 1176 |
+
# of a stubname that's been passed as a string
|
| 1177 |
+
wide_data = {
|
| 1178 |
+
"node_id": {0: 0, 1: 1, 2: 2, 3: 3, 4: 4},
|
| 1179 |
+
"A": {0: 0.80, 1: 0.0, 2: 0.25, 3: 1.0, 4: 0.81},
|
| 1180 |
+
"PA0": {0: 0.74, 1: 0.56, 2: 0.56, 3: 0.98, 4: 0.6},
|
| 1181 |
+
"PA1": {0: 0.77, 1: 0.64, 2: 0.52, 3: 0.98, 4: 0.67},
|
| 1182 |
+
"PA3": {0: 0.34, 1: 0.70, 2: 0.52, 3: 0.98, 4: 0.67},
|
| 1183 |
+
}
|
| 1184 |
+
wide_df = DataFrame.from_dict(wide_data)
|
| 1185 |
+
expected = wide_to_long(wide_df, stubnames=["PA"], i=["node_id", "A"], j="time")
|
| 1186 |
+
result = wide_to_long(wide_df, stubnames="PA", i=["node_id", "A"], j="time")
|
| 1187 |
+
tm.assert_frame_equal(result, expected)
|
| 1188 |
+
|
| 1189 |
+
def test_raise_of_column_name_value(self):
|
| 1190 |
+
# GH34731, enforced in 2.0
|
| 1191 |
+
# raise a ValueError if the resultant value column name matches
|
| 1192 |
+
# a name in the dataframe already (default name is "value")
|
| 1193 |
+
df = DataFrame({"col": list("ABC"), "value": range(10, 16, 2)})
|
| 1194 |
+
|
| 1195 |
+
with pytest.raises(
|
| 1196 |
+
ValueError, match=re.escape("value_name (value) cannot match")
|
| 1197 |
+
):
|
| 1198 |
+
df.melt(id_vars="value", value_name="value")
|
| 1199 |
+
|
| 1200 |
+
@pytest.mark.parametrize("dtype", ["O", "string"])
|
| 1201 |
+
def test_missing_stubname(self, dtype):
|
| 1202 |
+
# GH46044
|
| 1203 |
+
df = DataFrame({"id": ["1", "2"], "a-1": [100, 200], "a-2": [300, 400]})
|
| 1204 |
+
df = df.astype({"id": dtype})
|
| 1205 |
+
result = wide_to_long(
|
| 1206 |
+
df,
|
| 1207 |
+
stubnames=["a", "b"],
|
| 1208 |
+
i="id",
|
| 1209 |
+
j="num",
|
| 1210 |
+
sep="-",
|
| 1211 |
+
)
|
| 1212 |
+
index = Index(
|
| 1213 |
+
[("1", 1), ("2", 1), ("1", 2), ("2", 2)],
|
| 1214 |
+
name=("id", "num"),
|
| 1215 |
+
)
|
| 1216 |
+
expected = DataFrame(
|
| 1217 |
+
{"a": [100, 200, 300, 400], "b": [np.nan] * 4},
|
| 1218 |
+
index=index,
|
| 1219 |
+
)
|
| 1220 |
+
new_level = expected.index.levels[0].astype(dtype)
|
| 1221 |
+
expected.index = expected.index.set_levels(new_level, level=0)
|
| 1222 |
+
tm.assert_frame_equal(result, expected)
|
| 1223 |
+
|
| 1224 |
+
|
| 1225 |
+
def test_wide_to_long_pyarrow_string_columns():
|
| 1226 |
+
# GH 57066
|
| 1227 |
+
pytest.importorskip("pyarrow")
|
| 1228 |
+
df = DataFrame(
|
| 1229 |
+
{
|
| 1230 |
+
"ID": {0: 1},
|
| 1231 |
+
"R_test1": {0: 1},
|
| 1232 |
+
"R_test2": {0: 1},
|
| 1233 |
+
"R_test3": {0: 2},
|
| 1234 |
+
"D": {0: 1},
|
| 1235 |
+
}
|
| 1236 |
+
)
|
| 1237 |
+
df.columns = df.columns.astype("string[pyarrow_numpy]")
|
| 1238 |
+
result = wide_to_long(
|
| 1239 |
+
df, stubnames="R", i="ID", j="UNPIVOTED", sep="_", suffix=".*"
|
| 1240 |
+
)
|
| 1241 |
+
expected = DataFrame(
|
| 1242 |
+
[[1, 1], [1, 1], [1, 2]],
|
| 1243 |
+
columns=Index(["D", "R"], dtype=object),
|
| 1244 |
+
index=pd.MultiIndex.from_arrays(
|
| 1245 |
+
[
|
| 1246 |
+
[1, 1, 1],
|
| 1247 |
+
Index(["test1", "test2", "test3"], dtype="string[pyarrow_numpy]"),
|
| 1248 |
+
],
|
| 1249 |
+
names=["ID", "UNPIVOTED"],
|
| 1250 |
+
),
|
| 1251 |
+
)
|
| 1252 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_pivot.py
ADDED
|
@@ -0,0 +1,2714 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import (
|
| 2 |
+
date,
|
| 3 |
+
datetime,
|
| 4 |
+
timedelta,
|
| 5 |
+
)
|
| 6 |
+
from itertools import product
|
| 7 |
+
import re
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import pytest
|
| 11 |
+
|
| 12 |
+
from pandas._config import using_pyarrow_string_dtype
|
| 13 |
+
|
| 14 |
+
from pandas.errors import PerformanceWarning
|
| 15 |
+
|
| 16 |
+
import pandas as pd
|
| 17 |
+
from pandas import (
|
| 18 |
+
Categorical,
|
| 19 |
+
DataFrame,
|
| 20 |
+
Grouper,
|
| 21 |
+
Index,
|
| 22 |
+
MultiIndex,
|
| 23 |
+
Series,
|
| 24 |
+
concat,
|
| 25 |
+
date_range,
|
| 26 |
+
)
|
| 27 |
+
import pandas._testing as tm
|
| 28 |
+
from pandas.api.types import CategoricalDtype
|
| 29 |
+
from pandas.core.reshape import reshape as reshape_lib
|
| 30 |
+
from pandas.core.reshape.pivot import pivot_table
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@pytest.fixture(params=[True, False])
|
| 34 |
+
def dropna(request):
|
| 35 |
+
return request.param
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@pytest.fixture(params=[([0] * 4, [1] * 4), (range(3), range(1, 4))])
|
| 39 |
+
def interval_values(request, closed):
|
| 40 |
+
left, right = request.param
|
| 41 |
+
return Categorical(pd.IntervalIndex.from_arrays(left, right, closed))
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class TestPivotTable:
|
| 45 |
+
@pytest.fixture
|
| 46 |
+
def data(self):
|
| 47 |
+
return DataFrame(
|
| 48 |
+
{
|
| 49 |
+
"A": [
|
| 50 |
+
"foo",
|
| 51 |
+
"foo",
|
| 52 |
+
"foo",
|
| 53 |
+
"foo",
|
| 54 |
+
"bar",
|
| 55 |
+
"bar",
|
| 56 |
+
"bar",
|
| 57 |
+
"bar",
|
| 58 |
+
"foo",
|
| 59 |
+
"foo",
|
| 60 |
+
"foo",
|
| 61 |
+
],
|
| 62 |
+
"B": [
|
| 63 |
+
"one",
|
| 64 |
+
"one",
|
| 65 |
+
"one",
|
| 66 |
+
"two",
|
| 67 |
+
"one",
|
| 68 |
+
"one",
|
| 69 |
+
"one",
|
| 70 |
+
"two",
|
| 71 |
+
"two",
|
| 72 |
+
"two",
|
| 73 |
+
"one",
|
| 74 |
+
],
|
| 75 |
+
"C": [
|
| 76 |
+
"dull",
|
| 77 |
+
"dull",
|
| 78 |
+
"shiny",
|
| 79 |
+
"dull",
|
| 80 |
+
"dull",
|
| 81 |
+
"shiny",
|
| 82 |
+
"shiny",
|
| 83 |
+
"dull",
|
| 84 |
+
"shiny",
|
| 85 |
+
"shiny",
|
| 86 |
+
"shiny",
|
| 87 |
+
],
|
| 88 |
+
"D": np.random.default_rng(2).standard_normal(11),
|
| 89 |
+
"E": np.random.default_rng(2).standard_normal(11),
|
| 90 |
+
"F": np.random.default_rng(2).standard_normal(11),
|
| 91 |
+
}
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
def test_pivot_table(self, observed, data):
|
| 95 |
+
index = ["A", "B"]
|
| 96 |
+
columns = "C"
|
| 97 |
+
table = pivot_table(
|
| 98 |
+
data, values="D", index=index, columns=columns, observed=observed
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
table2 = data.pivot_table(
|
| 102 |
+
values="D", index=index, columns=columns, observed=observed
|
| 103 |
+
)
|
| 104 |
+
tm.assert_frame_equal(table, table2)
|
| 105 |
+
|
| 106 |
+
# this works
|
| 107 |
+
pivot_table(data, values="D", index=index, observed=observed)
|
| 108 |
+
|
| 109 |
+
if len(index) > 1:
|
| 110 |
+
assert table.index.names == tuple(index)
|
| 111 |
+
else:
|
| 112 |
+
assert table.index.name == index[0]
|
| 113 |
+
|
| 114 |
+
if len(columns) > 1:
|
| 115 |
+
assert table.columns.names == columns
|
| 116 |
+
else:
|
| 117 |
+
assert table.columns.name == columns[0]
|
| 118 |
+
|
| 119 |
+
expected = data.groupby(index + [columns])["D"].agg("mean").unstack()
|
| 120 |
+
tm.assert_frame_equal(table, expected)
|
| 121 |
+
|
| 122 |
+
def test_pivot_table_categorical_observed_equal(self, observed):
|
| 123 |
+
# issue #24923
|
| 124 |
+
df = DataFrame(
|
| 125 |
+
{"col1": list("abcde"), "col2": list("fghij"), "col3": [1, 2, 3, 4, 5]}
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
expected = df.pivot_table(
|
| 129 |
+
index="col1", values="col3", columns="col2", aggfunc="sum", fill_value=0
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
expected.index = expected.index.astype("category")
|
| 133 |
+
expected.columns = expected.columns.astype("category")
|
| 134 |
+
|
| 135 |
+
df.col1 = df.col1.astype("category")
|
| 136 |
+
df.col2 = df.col2.astype("category")
|
| 137 |
+
|
| 138 |
+
result = df.pivot_table(
|
| 139 |
+
index="col1",
|
| 140 |
+
values="col3",
|
| 141 |
+
columns="col2",
|
| 142 |
+
aggfunc="sum",
|
| 143 |
+
fill_value=0,
|
| 144 |
+
observed=observed,
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
tm.assert_frame_equal(result, expected)
|
| 148 |
+
|
| 149 |
+
def test_pivot_table_nocols(self):
|
| 150 |
+
df = DataFrame(
|
| 151 |
+
{"rows": ["a", "b", "c"], "cols": ["x", "y", "z"], "values": [1, 2, 3]}
|
| 152 |
+
)
|
| 153 |
+
rs = df.pivot_table(columns="cols", aggfunc="sum")
|
| 154 |
+
xp = df.pivot_table(index="cols", aggfunc="sum").T
|
| 155 |
+
tm.assert_frame_equal(rs, xp)
|
| 156 |
+
|
| 157 |
+
rs = df.pivot_table(columns="cols", aggfunc={"values": "mean"})
|
| 158 |
+
xp = df.pivot_table(index="cols", aggfunc={"values": "mean"}).T
|
| 159 |
+
tm.assert_frame_equal(rs, xp)
|
| 160 |
+
|
| 161 |
+
def test_pivot_table_dropna(self):
|
| 162 |
+
df = DataFrame(
|
| 163 |
+
{
|
| 164 |
+
"amount": {0: 60000, 1: 100000, 2: 50000, 3: 30000},
|
| 165 |
+
"customer": {0: "A", 1: "A", 2: "B", 3: "C"},
|
| 166 |
+
"month": {0: 201307, 1: 201309, 2: 201308, 3: 201310},
|
| 167 |
+
"product": {0: "a", 1: "b", 2: "c", 3: "d"},
|
| 168 |
+
"quantity": {0: 2000000, 1: 500000, 2: 1000000, 3: 1000000},
|
| 169 |
+
}
|
| 170 |
+
)
|
| 171 |
+
pv_col = df.pivot_table(
|
| 172 |
+
"quantity", "month", ["customer", "product"], dropna=False
|
| 173 |
+
)
|
| 174 |
+
pv_ind = df.pivot_table(
|
| 175 |
+
"quantity", ["customer", "product"], "month", dropna=False
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
m = MultiIndex.from_tuples(
|
| 179 |
+
[
|
| 180 |
+
("A", "a"),
|
| 181 |
+
("A", "b"),
|
| 182 |
+
("A", "c"),
|
| 183 |
+
("A", "d"),
|
| 184 |
+
("B", "a"),
|
| 185 |
+
("B", "b"),
|
| 186 |
+
("B", "c"),
|
| 187 |
+
("B", "d"),
|
| 188 |
+
("C", "a"),
|
| 189 |
+
("C", "b"),
|
| 190 |
+
("C", "c"),
|
| 191 |
+
("C", "d"),
|
| 192 |
+
],
|
| 193 |
+
names=["customer", "product"],
|
| 194 |
+
)
|
| 195 |
+
tm.assert_index_equal(pv_col.columns, m)
|
| 196 |
+
tm.assert_index_equal(pv_ind.index, m)
|
| 197 |
+
|
| 198 |
+
def test_pivot_table_categorical(self):
|
| 199 |
+
cat1 = Categorical(
|
| 200 |
+
["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True
|
| 201 |
+
)
|
| 202 |
+
cat2 = Categorical(
|
| 203 |
+
["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True
|
| 204 |
+
)
|
| 205 |
+
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
|
| 206 |
+
msg = "The default value of observed=False is deprecated"
|
| 207 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 208 |
+
result = pivot_table(df, values="values", index=["A", "B"], dropna=True)
|
| 209 |
+
|
| 210 |
+
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
|
| 211 |
+
expected = DataFrame({"values": [1.0, 2.0, 3.0, 4.0]}, index=exp_index)
|
| 212 |
+
tm.assert_frame_equal(result, expected)
|
| 213 |
+
|
| 214 |
+
def test_pivot_table_dropna_categoricals(self, dropna):
|
| 215 |
+
# GH 15193
|
| 216 |
+
categories = ["a", "b", "c", "d"]
|
| 217 |
+
|
| 218 |
+
df = DataFrame(
|
| 219 |
+
{
|
| 220 |
+
"A": ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
|
| 221 |
+
"B": [1, 2, 3, 1, 2, 3, 1, 2, 3],
|
| 222 |
+
"C": range(9),
|
| 223 |
+
}
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
df["A"] = df["A"].astype(CategoricalDtype(categories, ordered=False))
|
| 227 |
+
msg = "The default value of observed=False is deprecated"
|
| 228 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 229 |
+
result = df.pivot_table(index="B", columns="A", values="C", dropna=dropna)
|
| 230 |
+
expected_columns = Series(["a", "b", "c"], name="A")
|
| 231 |
+
expected_columns = expected_columns.astype(
|
| 232 |
+
CategoricalDtype(categories, ordered=False)
|
| 233 |
+
)
|
| 234 |
+
expected_index = Series([1, 2, 3], name="B")
|
| 235 |
+
expected = DataFrame(
|
| 236 |
+
[[0.0, 3.0, 6.0], [1.0, 4.0, 7.0], [2.0, 5.0, 8.0]],
|
| 237 |
+
index=expected_index,
|
| 238 |
+
columns=expected_columns,
|
| 239 |
+
)
|
| 240 |
+
if not dropna:
|
| 241 |
+
# add back the non observed to compare
|
| 242 |
+
expected = expected.reindex(columns=Categorical(categories)).astype("float")
|
| 243 |
+
|
| 244 |
+
tm.assert_frame_equal(result, expected)
|
| 245 |
+
|
| 246 |
+
def test_pivot_with_non_observable_dropna(self, dropna):
|
| 247 |
+
# gh-21133
|
| 248 |
+
df = DataFrame(
|
| 249 |
+
{
|
| 250 |
+
"A": Categorical(
|
| 251 |
+
[np.nan, "low", "high", "low", "high"],
|
| 252 |
+
categories=["low", "high"],
|
| 253 |
+
ordered=True,
|
| 254 |
+
),
|
| 255 |
+
"B": [0.0, 1.0, 2.0, 3.0, 4.0],
|
| 256 |
+
}
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
msg = "The default value of observed=False is deprecated"
|
| 260 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 261 |
+
result = df.pivot_table(index="A", values="B", dropna=dropna)
|
| 262 |
+
if dropna:
|
| 263 |
+
values = [2.0, 3.0]
|
| 264 |
+
codes = [0, 1]
|
| 265 |
+
else:
|
| 266 |
+
# GH: 10772
|
| 267 |
+
values = [2.0, 3.0, 0.0]
|
| 268 |
+
codes = [0, 1, -1]
|
| 269 |
+
expected = DataFrame(
|
| 270 |
+
{"B": values},
|
| 271 |
+
index=Index(
|
| 272 |
+
Categorical.from_codes(
|
| 273 |
+
codes, categories=["low", "high"], ordered=dropna
|
| 274 |
+
),
|
| 275 |
+
name="A",
|
| 276 |
+
),
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
tm.assert_frame_equal(result, expected)
|
| 280 |
+
|
| 281 |
+
def test_pivot_with_non_observable_dropna_multi_cat(self, dropna):
|
| 282 |
+
# gh-21378
|
| 283 |
+
df = DataFrame(
|
| 284 |
+
{
|
| 285 |
+
"A": Categorical(
|
| 286 |
+
["left", "low", "high", "low", "high"],
|
| 287 |
+
categories=["low", "high", "left"],
|
| 288 |
+
ordered=True,
|
| 289 |
+
),
|
| 290 |
+
"B": range(5),
|
| 291 |
+
}
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
msg = "The default value of observed=False is deprecated"
|
| 295 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 296 |
+
result = df.pivot_table(index="A", values="B", dropna=dropna)
|
| 297 |
+
expected = DataFrame(
|
| 298 |
+
{"B": [2.0, 3.0, 0.0]},
|
| 299 |
+
index=Index(
|
| 300 |
+
Categorical.from_codes(
|
| 301 |
+
[0, 1, 2], categories=["low", "high", "left"], ordered=True
|
| 302 |
+
),
|
| 303 |
+
name="A",
|
| 304 |
+
),
|
| 305 |
+
)
|
| 306 |
+
if not dropna:
|
| 307 |
+
expected["B"] = expected["B"].astype(float)
|
| 308 |
+
|
| 309 |
+
tm.assert_frame_equal(result, expected)
|
| 310 |
+
|
| 311 |
+
def test_pivot_with_interval_index(self, interval_values, dropna):
|
| 312 |
+
# GH 25814
|
| 313 |
+
df = DataFrame({"A": interval_values, "B": 1})
|
| 314 |
+
|
| 315 |
+
msg = "The default value of observed=False is deprecated"
|
| 316 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 317 |
+
result = df.pivot_table(index="A", values="B", dropna=dropna)
|
| 318 |
+
expected = DataFrame(
|
| 319 |
+
{"B": 1.0}, index=Index(interval_values.unique(), name="A")
|
| 320 |
+
)
|
| 321 |
+
if not dropna:
|
| 322 |
+
expected = expected.astype(float)
|
| 323 |
+
tm.assert_frame_equal(result, expected)
|
| 324 |
+
|
| 325 |
+
def test_pivot_with_interval_index_margins(self):
|
| 326 |
+
# GH 25815
|
| 327 |
+
ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2])
|
| 328 |
+
df = DataFrame(
|
| 329 |
+
{
|
| 330 |
+
"A": np.arange(4, 0, -1, dtype=np.intp),
|
| 331 |
+
"B": ["a", "b", "a", "b"],
|
| 332 |
+
"C": Categorical(ordered_cat, ordered=True).sort_values(
|
| 333 |
+
ascending=False
|
| 334 |
+
),
|
| 335 |
+
}
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
msg = "The default value of observed=False is deprecated"
|
| 339 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 340 |
+
pivot_tab = pivot_table(
|
| 341 |
+
df, index="C", columns="B", values="A", aggfunc="sum", margins=True
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
result = pivot_tab["All"]
|
| 345 |
+
expected = Series(
|
| 346 |
+
[3, 7, 10],
|
| 347 |
+
index=Index([pd.Interval(0, 1), pd.Interval(1, 2), "All"], name="C"),
|
| 348 |
+
name="All",
|
| 349 |
+
dtype=np.intp,
|
| 350 |
+
)
|
| 351 |
+
tm.assert_series_equal(result, expected)
|
| 352 |
+
|
| 353 |
+
def test_pass_array(self, data):
|
| 354 |
+
result = data.pivot_table("D", index=data.A, columns=data.C)
|
| 355 |
+
expected = data.pivot_table("D", index="A", columns="C")
|
| 356 |
+
tm.assert_frame_equal(result, expected)
|
| 357 |
+
|
| 358 |
+
def test_pass_function(self, data):
|
| 359 |
+
result = data.pivot_table("D", index=lambda x: x // 5, columns=data.C)
|
| 360 |
+
expected = data.pivot_table("D", index=data.index // 5, columns="C")
|
| 361 |
+
tm.assert_frame_equal(result, expected)
|
| 362 |
+
|
| 363 |
+
def test_pivot_table_multiple(self, data):
|
| 364 |
+
index = ["A", "B"]
|
| 365 |
+
columns = "C"
|
| 366 |
+
table = pivot_table(data, index=index, columns=columns)
|
| 367 |
+
expected = data.groupby(index + [columns]).agg("mean").unstack()
|
| 368 |
+
tm.assert_frame_equal(table, expected)
|
| 369 |
+
|
| 370 |
+
def test_pivot_dtypes(self):
|
| 371 |
+
# can convert dtypes
|
| 372 |
+
f = DataFrame(
|
| 373 |
+
{
|
| 374 |
+
"a": ["cat", "bat", "cat", "bat"],
|
| 375 |
+
"v": [1, 2, 3, 4],
|
| 376 |
+
"i": ["a", "b", "a", "b"],
|
| 377 |
+
}
|
| 378 |
+
)
|
| 379 |
+
assert f.dtypes["v"] == "int64"
|
| 380 |
+
|
| 381 |
+
z = pivot_table(
|
| 382 |
+
f, values="v", index=["a"], columns=["i"], fill_value=0, aggfunc="sum"
|
| 383 |
+
)
|
| 384 |
+
result = z.dtypes
|
| 385 |
+
expected = Series([np.dtype("int64")] * 2, index=Index(list("ab"), name="i"))
|
| 386 |
+
tm.assert_series_equal(result, expected)
|
| 387 |
+
|
| 388 |
+
# cannot convert dtypes
|
| 389 |
+
f = DataFrame(
|
| 390 |
+
{
|
| 391 |
+
"a": ["cat", "bat", "cat", "bat"],
|
| 392 |
+
"v": [1.5, 2.5, 3.5, 4.5],
|
| 393 |
+
"i": ["a", "b", "a", "b"],
|
| 394 |
+
}
|
| 395 |
+
)
|
| 396 |
+
assert f.dtypes["v"] == "float64"
|
| 397 |
+
|
| 398 |
+
z = pivot_table(
|
| 399 |
+
f, values="v", index=["a"], columns=["i"], fill_value=0, aggfunc="mean"
|
| 400 |
+
)
|
| 401 |
+
result = z.dtypes
|
| 402 |
+
expected = Series([np.dtype("float64")] * 2, index=Index(list("ab"), name="i"))
|
| 403 |
+
tm.assert_series_equal(result, expected)
|
| 404 |
+
|
| 405 |
+
@pytest.mark.parametrize(
|
| 406 |
+
"columns,values",
|
| 407 |
+
[
|
| 408 |
+
("bool1", ["float1", "float2"]),
|
| 409 |
+
("bool1", ["float1", "float2", "bool1"]),
|
| 410 |
+
("bool2", ["float1", "float2", "bool1"]),
|
| 411 |
+
],
|
| 412 |
+
)
|
| 413 |
+
def test_pivot_preserve_dtypes(self, columns, values):
|
| 414 |
+
# GH 7142 regression test
|
| 415 |
+
v = np.arange(5, dtype=np.float64)
|
| 416 |
+
df = DataFrame(
|
| 417 |
+
{"float1": v, "float2": v + 2.0, "bool1": v <= 2, "bool2": v <= 3}
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
df_res = df.reset_index().pivot_table(
|
| 421 |
+
index="index", columns=columns, values=values
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
result = dict(df_res.dtypes)
|
| 425 |
+
expected = {col: np.dtype("float64") for col in df_res}
|
| 426 |
+
assert result == expected
|
| 427 |
+
|
| 428 |
+
def test_pivot_no_values(self):
|
| 429 |
+
# GH 14380
|
| 430 |
+
idx = pd.DatetimeIndex(
|
| 431 |
+
["2011-01-01", "2011-02-01", "2011-01-02", "2011-01-01", "2011-01-02"]
|
| 432 |
+
)
|
| 433 |
+
df = DataFrame({"A": [1, 2, 3, 4, 5]}, index=idx)
|
| 434 |
+
res = df.pivot_table(index=df.index.month, columns=df.index.day)
|
| 435 |
+
|
| 436 |
+
exp_columns = MultiIndex.from_tuples([("A", 1), ("A", 2)])
|
| 437 |
+
exp_columns = exp_columns.set_levels(
|
| 438 |
+
exp_columns.levels[1].astype(np.int32), level=1
|
| 439 |
+
)
|
| 440 |
+
exp = DataFrame(
|
| 441 |
+
[[2.5, 4.0], [2.0, np.nan]],
|
| 442 |
+
index=Index([1, 2], dtype=np.int32),
|
| 443 |
+
columns=exp_columns,
|
| 444 |
+
)
|
| 445 |
+
tm.assert_frame_equal(res, exp)
|
| 446 |
+
|
| 447 |
+
df = DataFrame(
|
| 448 |
+
{
|
| 449 |
+
"A": [1, 2, 3, 4, 5],
|
| 450 |
+
"dt": date_range("2011-01-01", freq="D", periods=5),
|
| 451 |
+
},
|
| 452 |
+
index=idx,
|
| 453 |
+
)
|
| 454 |
+
res = df.pivot_table(index=df.index.month, columns=Grouper(key="dt", freq="ME"))
|
| 455 |
+
exp_columns = MultiIndex.from_arrays(
|
| 456 |
+
[["A"], pd.DatetimeIndex(["2011-01-31"], dtype="M8[ns]")],
|
| 457 |
+
names=[None, "dt"],
|
| 458 |
+
)
|
| 459 |
+
exp = DataFrame(
|
| 460 |
+
[3.25, 2.0], index=Index([1, 2], dtype=np.int32), columns=exp_columns
|
| 461 |
+
)
|
| 462 |
+
tm.assert_frame_equal(res, exp)
|
| 463 |
+
|
| 464 |
+
res = df.pivot_table(
|
| 465 |
+
index=Grouper(freq="YE"), columns=Grouper(key="dt", freq="ME")
|
| 466 |
+
)
|
| 467 |
+
exp = DataFrame(
|
| 468 |
+
[3.0],
|
| 469 |
+
index=pd.DatetimeIndex(["2011-12-31"], freq="YE"),
|
| 470 |
+
columns=exp_columns,
|
| 471 |
+
)
|
| 472 |
+
tm.assert_frame_equal(res, exp)
|
| 473 |
+
|
| 474 |
+
def test_pivot_multi_values(self, data):
|
| 475 |
+
result = pivot_table(
|
| 476 |
+
data, values=["D", "E"], index="A", columns=["B", "C"], fill_value=0
|
| 477 |
+
)
|
| 478 |
+
expected = pivot_table(
|
| 479 |
+
data.drop(["F"], axis=1), index="A", columns=["B", "C"], fill_value=0
|
| 480 |
+
)
|
| 481 |
+
tm.assert_frame_equal(result, expected)
|
| 482 |
+
|
| 483 |
+
def test_pivot_multi_functions(self, data):
|
| 484 |
+
f = lambda func: pivot_table(
|
| 485 |
+
data, values=["D", "E"], index=["A", "B"], columns="C", aggfunc=func
|
| 486 |
+
)
|
| 487 |
+
result = f(["mean", "std"])
|
| 488 |
+
means = f("mean")
|
| 489 |
+
stds = f("std")
|
| 490 |
+
expected = concat([means, stds], keys=["mean", "std"], axis=1)
|
| 491 |
+
tm.assert_frame_equal(result, expected)
|
| 492 |
+
|
| 493 |
+
# margins not supported??
|
| 494 |
+
f = lambda func: pivot_table(
|
| 495 |
+
data,
|
| 496 |
+
values=["D", "E"],
|
| 497 |
+
index=["A", "B"],
|
| 498 |
+
columns="C",
|
| 499 |
+
aggfunc=func,
|
| 500 |
+
margins=True,
|
| 501 |
+
)
|
| 502 |
+
result = f(["mean", "std"])
|
| 503 |
+
means = f("mean")
|
| 504 |
+
stds = f("std")
|
| 505 |
+
expected = concat([means, stds], keys=["mean", "std"], axis=1)
|
| 506 |
+
tm.assert_frame_equal(result, expected)
|
| 507 |
+
|
| 508 |
+
@pytest.mark.parametrize("method", [True, False])
|
| 509 |
+
def test_pivot_index_with_nan(self, method):
|
| 510 |
+
# GH 3588
|
| 511 |
+
nan = np.nan
|
| 512 |
+
df = DataFrame(
|
| 513 |
+
{
|
| 514 |
+
"a": ["R1", "R2", nan, "R4"],
|
| 515 |
+
"b": ["C1", "C2", "C3", "C4"],
|
| 516 |
+
"c": [10, 15, 17, 20],
|
| 517 |
+
}
|
| 518 |
+
)
|
| 519 |
+
if method:
|
| 520 |
+
result = df.pivot(index="a", columns="b", values="c")
|
| 521 |
+
else:
|
| 522 |
+
result = pd.pivot(df, index="a", columns="b", values="c")
|
| 523 |
+
expected = DataFrame(
|
| 524 |
+
[
|
| 525 |
+
[nan, nan, 17, nan],
|
| 526 |
+
[10, nan, nan, nan],
|
| 527 |
+
[nan, 15, nan, nan],
|
| 528 |
+
[nan, nan, nan, 20],
|
| 529 |
+
],
|
| 530 |
+
index=Index([nan, "R1", "R2", "R4"], name="a"),
|
| 531 |
+
columns=Index(["C1", "C2", "C3", "C4"], name="b"),
|
| 532 |
+
)
|
| 533 |
+
tm.assert_frame_equal(result, expected)
|
| 534 |
+
tm.assert_frame_equal(df.pivot(index="b", columns="a", values="c"), expected.T)
|
| 535 |
+
|
| 536 |
+
@pytest.mark.parametrize("method", [True, False])
|
| 537 |
+
def test_pivot_index_with_nan_dates(self, method):
|
| 538 |
+
# GH9491
|
| 539 |
+
df = DataFrame(
|
| 540 |
+
{
|
| 541 |
+
"a": date_range("2014-02-01", periods=6, freq="D"),
|
| 542 |
+
"c": 100 + np.arange(6),
|
| 543 |
+
}
|
| 544 |
+
)
|
| 545 |
+
df["b"] = df["a"] - pd.Timestamp("2014-02-02")
|
| 546 |
+
df.loc[1, "a"] = df.loc[3, "a"] = np.nan
|
| 547 |
+
df.loc[1, "b"] = df.loc[4, "b"] = np.nan
|
| 548 |
+
|
| 549 |
+
if method:
|
| 550 |
+
pv = df.pivot(index="a", columns="b", values="c")
|
| 551 |
+
else:
|
| 552 |
+
pv = pd.pivot(df, index="a", columns="b", values="c")
|
| 553 |
+
assert pv.notna().values.sum() == len(df)
|
| 554 |
+
|
| 555 |
+
for _, row in df.iterrows():
|
| 556 |
+
assert pv.loc[row["a"], row["b"]] == row["c"]
|
| 557 |
+
|
| 558 |
+
if method:
|
| 559 |
+
result = df.pivot(index="b", columns="a", values="c")
|
| 560 |
+
else:
|
| 561 |
+
result = pd.pivot(df, index="b", columns="a", values="c")
|
| 562 |
+
tm.assert_frame_equal(result, pv.T)
|
| 563 |
+
|
| 564 |
+
@pytest.mark.parametrize("method", [True, False])
|
| 565 |
+
def test_pivot_with_tz(self, method, unit):
|
| 566 |
+
# GH 5878
|
| 567 |
+
df = DataFrame(
|
| 568 |
+
{
|
| 569 |
+
"dt1": pd.DatetimeIndex(
|
| 570 |
+
[
|
| 571 |
+
datetime(2013, 1, 1, 9, 0),
|
| 572 |
+
datetime(2013, 1, 2, 9, 0),
|
| 573 |
+
datetime(2013, 1, 1, 9, 0),
|
| 574 |
+
datetime(2013, 1, 2, 9, 0),
|
| 575 |
+
],
|
| 576 |
+
dtype=f"M8[{unit}, US/Pacific]",
|
| 577 |
+
),
|
| 578 |
+
"dt2": pd.DatetimeIndex(
|
| 579 |
+
[
|
| 580 |
+
datetime(2014, 1, 1, 9, 0),
|
| 581 |
+
datetime(2014, 1, 1, 9, 0),
|
| 582 |
+
datetime(2014, 1, 2, 9, 0),
|
| 583 |
+
datetime(2014, 1, 2, 9, 0),
|
| 584 |
+
],
|
| 585 |
+
dtype=f"M8[{unit}, Asia/Tokyo]",
|
| 586 |
+
),
|
| 587 |
+
"data1": np.arange(4, dtype="int64"),
|
| 588 |
+
"data2": np.arange(4, dtype="int64"),
|
| 589 |
+
}
|
| 590 |
+
)
|
| 591 |
+
|
| 592 |
+
exp_col1 = Index(["data1", "data1", "data2", "data2"])
|
| 593 |
+
exp_col2 = pd.DatetimeIndex(
|
| 594 |
+
["2014/01/01 09:00", "2014/01/02 09:00"] * 2,
|
| 595 |
+
name="dt2",
|
| 596 |
+
dtype=f"M8[{unit}, Asia/Tokyo]",
|
| 597 |
+
)
|
| 598 |
+
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
|
| 599 |
+
exp_idx = pd.DatetimeIndex(
|
| 600 |
+
["2013/01/01 09:00", "2013/01/02 09:00"],
|
| 601 |
+
name="dt1",
|
| 602 |
+
dtype=f"M8[{unit}, US/Pacific]",
|
| 603 |
+
)
|
| 604 |
+
expected = DataFrame(
|
| 605 |
+
[[0, 2, 0, 2], [1, 3, 1, 3]],
|
| 606 |
+
index=exp_idx,
|
| 607 |
+
columns=exp_col,
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
if method:
|
| 611 |
+
pv = df.pivot(index="dt1", columns="dt2")
|
| 612 |
+
else:
|
| 613 |
+
pv = pd.pivot(df, index="dt1", columns="dt2")
|
| 614 |
+
tm.assert_frame_equal(pv, expected)
|
| 615 |
+
|
| 616 |
+
expected = DataFrame(
|
| 617 |
+
[[0, 2], [1, 3]],
|
| 618 |
+
index=exp_idx,
|
| 619 |
+
columns=exp_col2[:2],
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
if method:
|
| 623 |
+
pv = df.pivot(index="dt1", columns="dt2", values="data1")
|
| 624 |
+
else:
|
| 625 |
+
pv = pd.pivot(df, index="dt1", columns="dt2", values="data1")
|
| 626 |
+
tm.assert_frame_equal(pv, expected)
|
| 627 |
+
|
| 628 |
+
def test_pivot_tz_in_values(self):
|
| 629 |
+
# GH 14948
|
| 630 |
+
df = DataFrame(
|
| 631 |
+
[
|
| 632 |
+
{
|
| 633 |
+
"uid": "aa",
|
| 634 |
+
"ts": pd.Timestamp("2016-08-12 13:00:00-0700", tz="US/Pacific"),
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"uid": "aa",
|
| 638 |
+
"ts": pd.Timestamp("2016-08-12 08:00:00-0700", tz="US/Pacific"),
|
| 639 |
+
},
|
| 640 |
+
{
|
| 641 |
+
"uid": "aa",
|
| 642 |
+
"ts": pd.Timestamp("2016-08-12 14:00:00-0700", tz="US/Pacific"),
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"uid": "aa",
|
| 646 |
+
"ts": pd.Timestamp("2016-08-25 11:00:00-0700", tz="US/Pacific"),
|
| 647 |
+
},
|
| 648 |
+
{
|
| 649 |
+
"uid": "aa",
|
| 650 |
+
"ts": pd.Timestamp("2016-08-25 13:00:00-0700", tz="US/Pacific"),
|
| 651 |
+
},
|
| 652 |
+
]
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
df = df.set_index("ts").reset_index()
|
| 656 |
+
mins = df.ts.map(lambda x: x.replace(hour=0, minute=0, second=0, microsecond=0))
|
| 657 |
+
|
| 658 |
+
result = pivot_table(
|
| 659 |
+
df.set_index("ts").reset_index(),
|
| 660 |
+
values="ts",
|
| 661 |
+
index=["uid"],
|
| 662 |
+
columns=[mins],
|
| 663 |
+
aggfunc="min",
|
| 664 |
+
)
|
| 665 |
+
expected = DataFrame(
|
| 666 |
+
[
|
| 667 |
+
[
|
| 668 |
+
pd.Timestamp("2016-08-12 08:00:00-0700", tz="US/Pacific"),
|
| 669 |
+
pd.Timestamp("2016-08-25 11:00:00-0700", tz="US/Pacific"),
|
| 670 |
+
]
|
| 671 |
+
],
|
| 672 |
+
index=Index(["aa"], name="uid"),
|
| 673 |
+
columns=pd.DatetimeIndex(
|
| 674 |
+
[
|
| 675 |
+
pd.Timestamp("2016-08-12 00:00:00", tz="US/Pacific"),
|
| 676 |
+
pd.Timestamp("2016-08-25 00:00:00", tz="US/Pacific"),
|
| 677 |
+
],
|
| 678 |
+
name="ts",
|
| 679 |
+
),
|
| 680 |
+
)
|
| 681 |
+
tm.assert_frame_equal(result, expected)
|
| 682 |
+
|
| 683 |
+
@pytest.mark.parametrize("method", [True, False])
|
| 684 |
+
def test_pivot_periods(self, method):
|
| 685 |
+
df = DataFrame(
|
| 686 |
+
{
|
| 687 |
+
"p1": [
|
| 688 |
+
pd.Period("2013-01-01", "D"),
|
| 689 |
+
pd.Period("2013-01-02", "D"),
|
| 690 |
+
pd.Period("2013-01-01", "D"),
|
| 691 |
+
pd.Period("2013-01-02", "D"),
|
| 692 |
+
],
|
| 693 |
+
"p2": [
|
| 694 |
+
pd.Period("2013-01", "M"),
|
| 695 |
+
pd.Period("2013-01", "M"),
|
| 696 |
+
pd.Period("2013-02", "M"),
|
| 697 |
+
pd.Period("2013-02", "M"),
|
| 698 |
+
],
|
| 699 |
+
"data1": np.arange(4, dtype="int64"),
|
| 700 |
+
"data2": np.arange(4, dtype="int64"),
|
| 701 |
+
}
|
| 702 |
+
)
|
| 703 |
+
|
| 704 |
+
exp_col1 = Index(["data1", "data1", "data2", "data2"])
|
| 705 |
+
exp_col2 = pd.PeriodIndex(["2013-01", "2013-02"] * 2, name="p2", freq="M")
|
| 706 |
+
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
|
| 707 |
+
expected = DataFrame(
|
| 708 |
+
[[0, 2, 0, 2], [1, 3, 1, 3]],
|
| 709 |
+
index=pd.PeriodIndex(["2013-01-01", "2013-01-02"], name="p1", freq="D"),
|
| 710 |
+
columns=exp_col,
|
| 711 |
+
)
|
| 712 |
+
if method:
|
| 713 |
+
pv = df.pivot(index="p1", columns="p2")
|
| 714 |
+
else:
|
| 715 |
+
pv = pd.pivot(df, index="p1", columns="p2")
|
| 716 |
+
tm.assert_frame_equal(pv, expected)
|
| 717 |
+
|
| 718 |
+
expected = DataFrame(
|
| 719 |
+
[[0, 2], [1, 3]],
|
| 720 |
+
index=pd.PeriodIndex(["2013-01-01", "2013-01-02"], name="p1", freq="D"),
|
| 721 |
+
columns=pd.PeriodIndex(["2013-01", "2013-02"], name="p2", freq="M"),
|
| 722 |
+
)
|
| 723 |
+
if method:
|
| 724 |
+
pv = df.pivot(index="p1", columns="p2", values="data1")
|
| 725 |
+
else:
|
| 726 |
+
pv = pd.pivot(df, index="p1", columns="p2", values="data1")
|
| 727 |
+
tm.assert_frame_equal(pv, expected)
|
| 728 |
+
|
| 729 |
+
def test_pivot_periods_with_margins(self):
|
| 730 |
+
# GH 28323
|
| 731 |
+
df = DataFrame(
|
| 732 |
+
{
|
| 733 |
+
"a": [1, 1, 2, 2],
|
| 734 |
+
"b": [
|
| 735 |
+
pd.Period("2019Q1"),
|
| 736 |
+
pd.Period("2019Q2"),
|
| 737 |
+
pd.Period("2019Q1"),
|
| 738 |
+
pd.Period("2019Q2"),
|
| 739 |
+
],
|
| 740 |
+
"x": 1.0,
|
| 741 |
+
}
|
| 742 |
+
)
|
| 743 |
+
|
| 744 |
+
expected = DataFrame(
|
| 745 |
+
data=1.0,
|
| 746 |
+
index=Index([1, 2, "All"], name="a"),
|
| 747 |
+
columns=Index([pd.Period("2019Q1"), pd.Period("2019Q2"), "All"], name="b"),
|
| 748 |
+
)
|
| 749 |
+
|
| 750 |
+
result = df.pivot_table(index="a", columns="b", values="x", margins=True)
|
| 751 |
+
tm.assert_frame_equal(expected, result)
|
| 752 |
+
|
| 753 |
+
@pytest.mark.parametrize(
|
| 754 |
+
"values",
|
| 755 |
+
[
|
| 756 |
+
["baz", "zoo"],
|
| 757 |
+
np.array(["baz", "zoo"]),
|
| 758 |
+
Series(["baz", "zoo"]),
|
| 759 |
+
Index(["baz", "zoo"]),
|
| 760 |
+
],
|
| 761 |
+
)
|
| 762 |
+
@pytest.mark.parametrize("method", [True, False])
|
| 763 |
+
def test_pivot_with_list_like_values(self, values, method):
|
| 764 |
+
# issue #17160
|
| 765 |
+
df = DataFrame(
|
| 766 |
+
{
|
| 767 |
+
"foo": ["one", "one", "one", "two", "two", "two"],
|
| 768 |
+
"bar": ["A", "B", "C", "A", "B", "C"],
|
| 769 |
+
"baz": [1, 2, 3, 4, 5, 6],
|
| 770 |
+
"zoo": ["x", "y", "z", "q", "w", "t"],
|
| 771 |
+
}
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
if method:
|
| 775 |
+
result = df.pivot(index="foo", columns="bar", values=values)
|
| 776 |
+
else:
|
| 777 |
+
result = pd.pivot(df, index="foo", columns="bar", values=values)
|
| 778 |
+
|
| 779 |
+
data = [[1, 2, 3, "x", "y", "z"], [4, 5, 6, "q", "w", "t"]]
|
| 780 |
+
index = Index(data=["one", "two"], name="foo")
|
| 781 |
+
columns = MultiIndex(
|
| 782 |
+
levels=[["baz", "zoo"], ["A", "B", "C"]],
|
| 783 |
+
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
|
| 784 |
+
names=[None, "bar"],
|
| 785 |
+
)
|
| 786 |
+
expected = DataFrame(data=data, index=index, columns=columns)
|
| 787 |
+
expected["baz"] = expected["baz"].astype(object)
|
| 788 |
+
tm.assert_frame_equal(result, expected)
|
| 789 |
+
|
| 790 |
+
@pytest.mark.parametrize(
|
| 791 |
+
"values",
|
| 792 |
+
[
|
| 793 |
+
["bar", "baz"],
|
| 794 |
+
np.array(["bar", "baz"]),
|
| 795 |
+
Series(["bar", "baz"]),
|
| 796 |
+
Index(["bar", "baz"]),
|
| 797 |
+
],
|
| 798 |
+
)
|
| 799 |
+
@pytest.mark.parametrize("method", [True, False])
|
| 800 |
+
def test_pivot_with_list_like_values_nans(self, values, method):
|
| 801 |
+
# issue #17160
|
| 802 |
+
df = DataFrame(
|
| 803 |
+
{
|
| 804 |
+
"foo": ["one", "one", "one", "two", "two", "two"],
|
| 805 |
+
"bar": ["A", "B", "C", "A", "B", "C"],
|
| 806 |
+
"baz": [1, 2, 3, 4, 5, 6],
|
| 807 |
+
"zoo": ["x", "y", "z", "q", "w", "t"],
|
| 808 |
+
}
|
| 809 |
+
)
|
| 810 |
+
|
| 811 |
+
if method:
|
| 812 |
+
result = df.pivot(index="zoo", columns="foo", values=values)
|
| 813 |
+
else:
|
| 814 |
+
result = pd.pivot(df, index="zoo", columns="foo", values=values)
|
| 815 |
+
|
| 816 |
+
data = [
|
| 817 |
+
[np.nan, "A", np.nan, 4],
|
| 818 |
+
[np.nan, "C", np.nan, 6],
|
| 819 |
+
[np.nan, "B", np.nan, 5],
|
| 820 |
+
["A", np.nan, 1, np.nan],
|
| 821 |
+
["B", np.nan, 2, np.nan],
|
| 822 |
+
["C", np.nan, 3, np.nan],
|
| 823 |
+
]
|
| 824 |
+
index = Index(data=["q", "t", "w", "x", "y", "z"], name="zoo")
|
| 825 |
+
columns = MultiIndex(
|
| 826 |
+
levels=[["bar", "baz"], ["one", "two"]],
|
| 827 |
+
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
|
| 828 |
+
names=[None, "foo"],
|
| 829 |
+
)
|
| 830 |
+
expected = DataFrame(data=data, index=index, columns=columns)
|
| 831 |
+
expected["baz"] = expected["baz"].astype(object)
|
| 832 |
+
tm.assert_frame_equal(result, expected)
|
| 833 |
+
|
| 834 |
+
def test_pivot_columns_none_raise_error(self):
|
| 835 |
+
# GH 30924
|
| 836 |
+
df = DataFrame({"col1": ["a", "b", "c"], "col2": [1, 2, 3], "col3": [1, 2, 3]})
|
| 837 |
+
msg = r"pivot\(\) missing 1 required keyword-only argument: 'columns'"
|
| 838 |
+
with pytest.raises(TypeError, match=msg):
|
| 839 |
+
df.pivot(index="col1", values="col3") # pylint: disable=missing-kwoa
|
| 840 |
+
|
| 841 |
+
@pytest.mark.xfail(
|
| 842 |
+
reason="MultiIndexed unstack with tuple names fails with KeyError GH#19966"
|
| 843 |
+
)
|
| 844 |
+
@pytest.mark.parametrize("method", [True, False])
|
| 845 |
+
def test_pivot_with_multiindex(self, method):
|
| 846 |
+
# issue #17160
|
| 847 |
+
index = Index(data=[0, 1, 2, 3, 4, 5])
|
| 848 |
+
data = [
|
| 849 |
+
["one", "A", 1, "x"],
|
| 850 |
+
["one", "B", 2, "y"],
|
| 851 |
+
["one", "C", 3, "z"],
|
| 852 |
+
["two", "A", 4, "q"],
|
| 853 |
+
["two", "B", 5, "w"],
|
| 854 |
+
["two", "C", 6, "t"],
|
| 855 |
+
]
|
| 856 |
+
columns = MultiIndex(
|
| 857 |
+
levels=[["bar", "baz"], ["first", "second"]],
|
| 858 |
+
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
|
| 859 |
+
)
|
| 860 |
+
df = DataFrame(data=data, index=index, columns=columns, dtype="object")
|
| 861 |
+
if method:
|
| 862 |
+
result = df.pivot(
|
| 863 |
+
index=("bar", "first"),
|
| 864 |
+
columns=("bar", "second"),
|
| 865 |
+
values=("baz", "first"),
|
| 866 |
+
)
|
| 867 |
+
else:
|
| 868 |
+
result = pd.pivot(
|
| 869 |
+
df,
|
| 870 |
+
index=("bar", "first"),
|
| 871 |
+
columns=("bar", "second"),
|
| 872 |
+
values=("baz", "first"),
|
| 873 |
+
)
|
| 874 |
+
|
| 875 |
+
data = {
|
| 876 |
+
"A": Series([1, 4], index=["one", "two"]),
|
| 877 |
+
"B": Series([2, 5], index=["one", "two"]),
|
| 878 |
+
"C": Series([3, 6], index=["one", "two"]),
|
| 879 |
+
}
|
| 880 |
+
expected = DataFrame(data)
|
| 881 |
+
tm.assert_frame_equal(result, expected)
|
| 882 |
+
|
| 883 |
+
@pytest.mark.parametrize("method", [True, False])
|
| 884 |
+
def test_pivot_with_tuple_of_values(self, method):
|
| 885 |
+
# issue #17160
|
| 886 |
+
df = DataFrame(
|
| 887 |
+
{
|
| 888 |
+
"foo": ["one", "one", "one", "two", "two", "two"],
|
| 889 |
+
"bar": ["A", "B", "C", "A", "B", "C"],
|
| 890 |
+
"baz": [1, 2, 3, 4, 5, 6],
|
| 891 |
+
"zoo": ["x", "y", "z", "q", "w", "t"],
|
| 892 |
+
}
|
| 893 |
+
)
|
| 894 |
+
with pytest.raises(KeyError, match=r"^\('bar', 'baz'\)$"):
|
| 895 |
+
# tuple is seen as a single column name
|
| 896 |
+
if method:
|
| 897 |
+
df.pivot(index="zoo", columns="foo", values=("bar", "baz"))
|
| 898 |
+
else:
|
| 899 |
+
pd.pivot(df, index="zoo", columns="foo", values=("bar", "baz"))
|
| 900 |
+
|
| 901 |
+
def _check_output(
|
| 902 |
+
self,
|
| 903 |
+
result,
|
| 904 |
+
values_col,
|
| 905 |
+
data,
|
| 906 |
+
index=["A", "B"],
|
| 907 |
+
columns=["C"],
|
| 908 |
+
margins_col="All",
|
| 909 |
+
):
|
| 910 |
+
col_margins = result.loc[result.index[:-1], margins_col]
|
| 911 |
+
expected_col_margins = data.groupby(index)[values_col].mean()
|
| 912 |
+
tm.assert_series_equal(col_margins, expected_col_margins, check_names=False)
|
| 913 |
+
assert col_margins.name == margins_col
|
| 914 |
+
|
| 915 |
+
result = result.sort_index()
|
| 916 |
+
index_margins = result.loc[(margins_col, "")].iloc[:-1]
|
| 917 |
+
|
| 918 |
+
expected_ix_margins = data.groupby(columns)[values_col].mean()
|
| 919 |
+
tm.assert_series_equal(index_margins, expected_ix_margins, check_names=False)
|
| 920 |
+
assert index_margins.name == (margins_col, "")
|
| 921 |
+
|
| 922 |
+
grand_total_margins = result.loc[(margins_col, ""), margins_col]
|
| 923 |
+
expected_total_margins = data[values_col].mean()
|
| 924 |
+
assert grand_total_margins == expected_total_margins
|
| 925 |
+
|
| 926 |
+
def test_margins(self, data):
|
| 927 |
+
# column specified
|
| 928 |
+
result = data.pivot_table(
|
| 929 |
+
values="D", index=["A", "B"], columns="C", margins=True, aggfunc="mean"
|
| 930 |
+
)
|
| 931 |
+
self._check_output(result, "D", data)
|
| 932 |
+
|
| 933 |
+
# Set a different margins_name (not 'All')
|
| 934 |
+
result = data.pivot_table(
|
| 935 |
+
values="D",
|
| 936 |
+
index=["A", "B"],
|
| 937 |
+
columns="C",
|
| 938 |
+
margins=True,
|
| 939 |
+
aggfunc="mean",
|
| 940 |
+
margins_name="Totals",
|
| 941 |
+
)
|
| 942 |
+
self._check_output(result, "D", data, margins_col="Totals")
|
| 943 |
+
|
| 944 |
+
# no column specified
|
| 945 |
+
table = data.pivot_table(
|
| 946 |
+
index=["A", "B"], columns="C", margins=True, aggfunc="mean"
|
| 947 |
+
)
|
| 948 |
+
for value_col in table.columns.levels[0]:
|
| 949 |
+
self._check_output(table[value_col], value_col, data)
|
| 950 |
+
|
| 951 |
+
def test_no_col(self, data):
|
| 952 |
+
# no col
|
| 953 |
+
|
| 954 |
+
# to help with a buglet
|
| 955 |
+
data.columns = [k * 2 for k in data.columns]
|
| 956 |
+
msg = re.escape("agg function failed [how->mean,dtype->")
|
| 957 |
+
with pytest.raises(TypeError, match=msg):
|
| 958 |
+
data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean")
|
| 959 |
+
table = data.drop(columns="CC").pivot_table(
|
| 960 |
+
index=["AA", "BB"], margins=True, aggfunc="mean"
|
| 961 |
+
)
|
| 962 |
+
for value_col in table.columns:
|
| 963 |
+
totals = table.loc[("All", ""), value_col]
|
| 964 |
+
assert totals == data[value_col].mean()
|
| 965 |
+
|
| 966 |
+
with pytest.raises(TypeError, match=msg):
|
| 967 |
+
data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean")
|
| 968 |
+
table = data.drop(columns="CC").pivot_table(
|
| 969 |
+
index=["AA", "BB"], margins=True, aggfunc="mean"
|
| 970 |
+
)
|
| 971 |
+
for item in ["DD", "EE", "FF"]:
|
| 972 |
+
totals = table.loc[("All", ""), item]
|
| 973 |
+
assert totals == data[item].mean()
|
| 974 |
+
|
| 975 |
+
@pytest.mark.parametrize(
|
| 976 |
+
"columns, aggfunc, values, expected_columns",
|
| 977 |
+
[
|
| 978 |
+
(
|
| 979 |
+
"A",
|
| 980 |
+
"mean",
|
| 981 |
+
[[5.5, 5.5, 2.2, 2.2], [8.0, 8.0, 4.4, 4.4]],
|
| 982 |
+
Index(["bar", "All", "foo", "All"], name="A"),
|
| 983 |
+
),
|
| 984 |
+
(
|
| 985 |
+
["A", "B"],
|
| 986 |
+
"sum",
|
| 987 |
+
[
|
| 988 |
+
[9, 13, 22, 5, 6, 11],
|
| 989 |
+
[14, 18, 32, 11, 11, 22],
|
| 990 |
+
],
|
| 991 |
+
MultiIndex.from_tuples(
|
| 992 |
+
[
|
| 993 |
+
("bar", "one"),
|
| 994 |
+
("bar", "two"),
|
| 995 |
+
("bar", "All"),
|
| 996 |
+
("foo", "one"),
|
| 997 |
+
("foo", "two"),
|
| 998 |
+
("foo", "All"),
|
| 999 |
+
],
|
| 1000 |
+
names=["A", "B"],
|
| 1001 |
+
),
|
| 1002 |
+
),
|
| 1003 |
+
],
|
| 1004 |
+
)
|
| 1005 |
+
def test_margin_with_only_columns_defined(
|
| 1006 |
+
self, columns, aggfunc, values, expected_columns
|
| 1007 |
+
):
|
| 1008 |
+
# GH 31016
|
| 1009 |
+
df = DataFrame(
|
| 1010 |
+
{
|
| 1011 |
+
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
|
| 1012 |
+
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
|
| 1013 |
+
"C": [
|
| 1014 |
+
"small",
|
| 1015 |
+
"large",
|
| 1016 |
+
"large",
|
| 1017 |
+
"small",
|
| 1018 |
+
"small",
|
| 1019 |
+
"large",
|
| 1020 |
+
"small",
|
| 1021 |
+
"small",
|
| 1022 |
+
"large",
|
| 1023 |
+
],
|
| 1024 |
+
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
|
| 1025 |
+
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
|
| 1026 |
+
}
|
| 1027 |
+
)
|
| 1028 |
+
if aggfunc != "sum":
|
| 1029 |
+
msg = re.escape("agg function failed [how->mean,dtype->")
|
| 1030 |
+
with pytest.raises(TypeError, match=msg):
|
| 1031 |
+
df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc)
|
| 1032 |
+
if "B" not in columns:
|
| 1033 |
+
df = df.drop(columns="B")
|
| 1034 |
+
result = df.drop(columns="C").pivot_table(
|
| 1035 |
+
columns=columns, margins=True, aggfunc=aggfunc
|
| 1036 |
+
)
|
| 1037 |
+
expected = DataFrame(values, index=Index(["D", "E"]), columns=expected_columns)
|
| 1038 |
+
|
| 1039 |
+
tm.assert_frame_equal(result, expected)
|
| 1040 |
+
|
| 1041 |
+
def test_margins_dtype(self, data):
|
| 1042 |
+
# GH 17013
|
| 1043 |
+
|
| 1044 |
+
df = data.copy()
|
| 1045 |
+
df[["D", "E", "F"]] = np.arange(len(df) * 3).reshape(len(df), 3).astype("i8")
|
| 1046 |
+
|
| 1047 |
+
mi_val = list(product(["bar", "foo"], ["one", "two"])) + [("All", "")]
|
| 1048 |
+
mi = MultiIndex.from_tuples(mi_val, names=("A", "B"))
|
| 1049 |
+
expected = DataFrame(
|
| 1050 |
+
{"dull": [12, 21, 3, 9, 45], "shiny": [33, 0, 36, 51, 120]}, index=mi
|
| 1051 |
+
).rename_axis("C", axis=1)
|
| 1052 |
+
expected["All"] = expected["dull"] + expected["shiny"]
|
| 1053 |
+
|
| 1054 |
+
result = df.pivot_table(
|
| 1055 |
+
values="D",
|
| 1056 |
+
index=["A", "B"],
|
| 1057 |
+
columns="C",
|
| 1058 |
+
margins=True,
|
| 1059 |
+
aggfunc="sum",
|
| 1060 |
+
fill_value=0,
|
| 1061 |
+
)
|
| 1062 |
+
|
| 1063 |
+
tm.assert_frame_equal(expected, result)
|
| 1064 |
+
|
| 1065 |
+
def test_margins_dtype_len(self, data):
|
| 1066 |
+
mi_val = list(product(["bar", "foo"], ["one", "two"])) + [("All", "")]
|
| 1067 |
+
mi = MultiIndex.from_tuples(mi_val, names=("A", "B"))
|
| 1068 |
+
expected = DataFrame(
|
| 1069 |
+
{"dull": [1, 1, 2, 1, 5], "shiny": [2, 0, 2, 2, 6]}, index=mi
|
| 1070 |
+
).rename_axis("C", axis=1)
|
| 1071 |
+
expected["All"] = expected["dull"] + expected["shiny"]
|
| 1072 |
+
|
| 1073 |
+
result = data.pivot_table(
|
| 1074 |
+
values="D",
|
| 1075 |
+
index=["A", "B"],
|
| 1076 |
+
columns="C",
|
| 1077 |
+
margins=True,
|
| 1078 |
+
aggfunc=len,
|
| 1079 |
+
fill_value=0,
|
| 1080 |
+
)
|
| 1081 |
+
|
| 1082 |
+
tm.assert_frame_equal(expected, result)
|
| 1083 |
+
|
| 1084 |
+
@pytest.mark.parametrize("cols", [(1, 2), ("a", "b"), (1, "b"), ("a", 1)])
|
| 1085 |
+
def test_pivot_table_multiindex_only(self, cols):
|
| 1086 |
+
# GH 17038
|
| 1087 |
+
df2 = DataFrame({cols[0]: [1, 2, 3], cols[1]: [1, 2, 3], "v": [4, 5, 6]})
|
| 1088 |
+
|
| 1089 |
+
result = df2.pivot_table(values="v", columns=cols)
|
| 1090 |
+
expected = DataFrame(
|
| 1091 |
+
[[4.0, 5.0, 6.0]],
|
| 1092 |
+
columns=MultiIndex.from_tuples([(1, 1), (2, 2), (3, 3)], names=cols),
|
| 1093 |
+
index=Index(["v"], dtype=object),
|
| 1094 |
+
)
|
| 1095 |
+
|
| 1096 |
+
tm.assert_frame_equal(result, expected)
|
| 1097 |
+
|
| 1098 |
+
def test_pivot_table_retains_tz(self):
|
| 1099 |
+
dti = date_range("2016-01-01", periods=3, tz="Europe/Amsterdam")
|
| 1100 |
+
df = DataFrame(
|
| 1101 |
+
{
|
| 1102 |
+
"A": np.random.default_rng(2).standard_normal(3),
|
| 1103 |
+
"B": np.random.default_rng(2).standard_normal(3),
|
| 1104 |
+
"C": dti,
|
| 1105 |
+
}
|
| 1106 |
+
)
|
| 1107 |
+
result = df.pivot_table(index=["B", "C"], dropna=False)
|
| 1108 |
+
|
| 1109 |
+
# check tz retention
|
| 1110 |
+
assert result.index.levels[1].equals(dti)
|
| 1111 |
+
|
| 1112 |
+
def test_pivot_integer_columns(self):
|
| 1113 |
+
# caused by upstream bug in unstack
|
| 1114 |
+
|
| 1115 |
+
d = date.min
|
| 1116 |
+
data = list(
|
| 1117 |
+
product(
|
| 1118 |
+
["foo", "bar"],
|
| 1119 |
+
["A", "B", "C"],
|
| 1120 |
+
["x1", "x2"],
|
| 1121 |
+
[d + timedelta(i) for i in range(20)],
|
| 1122 |
+
[1.0],
|
| 1123 |
+
)
|
| 1124 |
+
)
|
| 1125 |
+
df = DataFrame(data)
|
| 1126 |
+
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
|
| 1127 |
+
|
| 1128 |
+
df2 = df.rename(columns=str)
|
| 1129 |
+
table2 = df2.pivot_table(values="4", index=["0", "1", "3"], columns=["2"])
|
| 1130 |
+
|
| 1131 |
+
tm.assert_frame_equal(table, table2, check_names=False)
|
| 1132 |
+
|
| 1133 |
+
def test_pivot_no_level_overlap(self):
|
| 1134 |
+
# GH #1181
|
| 1135 |
+
|
| 1136 |
+
data = DataFrame(
|
| 1137 |
+
{
|
| 1138 |
+
"a": ["a", "a", "a", "a", "b", "b", "b", "b"] * 2,
|
| 1139 |
+
"b": [0, 0, 0, 0, 1, 1, 1, 1] * 2,
|
| 1140 |
+
"c": (["foo"] * 4 + ["bar"] * 4) * 2,
|
| 1141 |
+
"value": np.random.default_rng(2).standard_normal(16),
|
| 1142 |
+
}
|
| 1143 |
+
)
|
| 1144 |
+
|
| 1145 |
+
table = data.pivot_table("value", index="a", columns=["b", "c"])
|
| 1146 |
+
|
| 1147 |
+
grouped = data.groupby(["a", "b", "c"])["value"].mean()
|
| 1148 |
+
expected = grouped.unstack("b").unstack("c").dropna(axis=1, how="all")
|
| 1149 |
+
tm.assert_frame_equal(table, expected)
|
| 1150 |
+
|
| 1151 |
+
def test_pivot_columns_lexsorted(self):
|
| 1152 |
+
n = 10000
|
| 1153 |
+
|
| 1154 |
+
dtype = np.dtype(
|
| 1155 |
+
[
|
| 1156 |
+
("Index", object),
|
| 1157 |
+
("Symbol", object),
|
| 1158 |
+
("Year", int),
|
| 1159 |
+
("Month", int),
|
| 1160 |
+
("Day", int),
|
| 1161 |
+
("Quantity", int),
|
| 1162 |
+
("Price", float),
|
| 1163 |
+
]
|
| 1164 |
+
)
|
| 1165 |
+
|
| 1166 |
+
products = np.array(
|
| 1167 |
+
[
|
| 1168 |
+
("SP500", "ADBE"),
|
| 1169 |
+
("SP500", "NVDA"),
|
| 1170 |
+
("SP500", "ORCL"),
|
| 1171 |
+
("NDQ100", "AAPL"),
|
| 1172 |
+
("NDQ100", "MSFT"),
|
| 1173 |
+
("NDQ100", "GOOG"),
|
| 1174 |
+
("FTSE", "DGE.L"),
|
| 1175 |
+
("FTSE", "TSCO.L"),
|
| 1176 |
+
("FTSE", "GSK.L"),
|
| 1177 |
+
],
|
| 1178 |
+
dtype=[("Index", object), ("Symbol", object)],
|
| 1179 |
+
)
|
| 1180 |
+
items = np.empty(n, dtype=dtype)
|
| 1181 |
+
iproduct = np.random.default_rng(2).integers(0, len(products), n)
|
| 1182 |
+
items["Index"] = products["Index"][iproduct]
|
| 1183 |
+
items["Symbol"] = products["Symbol"][iproduct]
|
| 1184 |
+
dr = date_range(date(2000, 1, 1), date(2010, 12, 31))
|
| 1185 |
+
dates = dr[np.random.default_rng(2).integers(0, len(dr), n)]
|
| 1186 |
+
items["Year"] = dates.year
|
| 1187 |
+
items["Month"] = dates.month
|
| 1188 |
+
items["Day"] = dates.day
|
| 1189 |
+
items["Price"] = np.random.default_rng(2).lognormal(4.0, 2.0, n)
|
| 1190 |
+
|
| 1191 |
+
df = DataFrame(items)
|
| 1192 |
+
|
| 1193 |
+
pivoted = df.pivot_table(
|
| 1194 |
+
"Price",
|
| 1195 |
+
index=["Month", "Day"],
|
| 1196 |
+
columns=["Index", "Symbol", "Year"],
|
| 1197 |
+
aggfunc="mean",
|
| 1198 |
+
)
|
| 1199 |
+
|
| 1200 |
+
assert pivoted.columns.is_monotonic_increasing
|
| 1201 |
+
|
| 1202 |
+
def test_pivot_complex_aggfunc(self, data):
|
| 1203 |
+
f = {"D": ["std"], "E": ["sum"]}
|
| 1204 |
+
expected = data.groupby(["A", "B"]).agg(f).unstack("B")
|
| 1205 |
+
result = data.pivot_table(index="A", columns="B", aggfunc=f)
|
| 1206 |
+
|
| 1207 |
+
tm.assert_frame_equal(result, expected)
|
| 1208 |
+
|
| 1209 |
+
def test_margins_no_values_no_cols(self, data):
|
| 1210 |
+
# Regression test on pivot table: no values or cols passed.
|
| 1211 |
+
result = data[["A", "B"]].pivot_table(
|
| 1212 |
+
index=["A", "B"], aggfunc=len, margins=True
|
| 1213 |
+
)
|
| 1214 |
+
result_list = result.tolist()
|
| 1215 |
+
assert sum(result_list[:-1]) == result_list[-1]
|
| 1216 |
+
|
| 1217 |
+
def test_margins_no_values_two_rows(self, data):
|
| 1218 |
+
# Regression test on pivot table: no values passed but rows are a
|
| 1219 |
+
# multi-index
|
| 1220 |
+
result = data[["A", "B", "C"]].pivot_table(
|
| 1221 |
+
index=["A", "B"], columns="C", aggfunc=len, margins=True
|
| 1222 |
+
)
|
| 1223 |
+
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
|
| 1224 |
+
|
| 1225 |
+
def test_margins_no_values_one_row_one_col(self, data):
|
| 1226 |
+
# Regression test on pivot table: no values passed but row and col
|
| 1227 |
+
# defined
|
| 1228 |
+
result = data[["A", "B"]].pivot_table(
|
| 1229 |
+
index="A", columns="B", aggfunc=len, margins=True
|
| 1230 |
+
)
|
| 1231 |
+
assert result.All.tolist() == [4.0, 7.0, 11.0]
|
| 1232 |
+
|
| 1233 |
+
def test_margins_no_values_two_row_two_cols(self, data):
|
| 1234 |
+
# Regression test on pivot table: no values passed but rows and cols
|
| 1235 |
+
# are multi-indexed
|
| 1236 |
+
data["D"] = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]
|
| 1237 |
+
result = data[["A", "B", "C", "D"]].pivot_table(
|
| 1238 |
+
index=["A", "B"], columns=["C", "D"], aggfunc=len, margins=True
|
| 1239 |
+
)
|
| 1240 |
+
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
|
| 1241 |
+
|
| 1242 |
+
@pytest.mark.parametrize("margin_name", ["foo", "one", 666, None, ["a", "b"]])
|
| 1243 |
+
def test_pivot_table_with_margins_set_margin_name(self, margin_name, data):
|
| 1244 |
+
# see gh-3335
|
| 1245 |
+
msg = (
|
| 1246 |
+
f'Conflicting name "{margin_name}" in margins|'
|
| 1247 |
+
"margins_name argument must be a string"
|
| 1248 |
+
)
|
| 1249 |
+
with pytest.raises(ValueError, match=msg):
|
| 1250 |
+
# multi-index index
|
| 1251 |
+
pivot_table(
|
| 1252 |
+
data,
|
| 1253 |
+
values="D",
|
| 1254 |
+
index=["A", "B"],
|
| 1255 |
+
columns=["C"],
|
| 1256 |
+
margins=True,
|
| 1257 |
+
margins_name=margin_name,
|
| 1258 |
+
)
|
| 1259 |
+
with pytest.raises(ValueError, match=msg):
|
| 1260 |
+
# multi-index column
|
| 1261 |
+
pivot_table(
|
| 1262 |
+
data,
|
| 1263 |
+
values="D",
|
| 1264 |
+
index=["C"],
|
| 1265 |
+
columns=["A", "B"],
|
| 1266 |
+
margins=True,
|
| 1267 |
+
margins_name=margin_name,
|
| 1268 |
+
)
|
| 1269 |
+
with pytest.raises(ValueError, match=msg):
|
| 1270 |
+
# non-multi-index index/column
|
| 1271 |
+
pivot_table(
|
| 1272 |
+
data,
|
| 1273 |
+
values="D",
|
| 1274 |
+
index=["A"],
|
| 1275 |
+
columns=["B"],
|
| 1276 |
+
margins=True,
|
| 1277 |
+
margins_name=margin_name,
|
| 1278 |
+
)
|
| 1279 |
+
|
| 1280 |
+
def test_pivot_timegrouper(self, using_array_manager):
|
| 1281 |
+
df = DataFrame(
|
| 1282 |
+
{
|
| 1283 |
+
"Branch": "A A A A A A A B".split(),
|
| 1284 |
+
"Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),
|
| 1285 |
+
"Quantity": [1, 3, 5, 1, 8, 1, 9, 3],
|
| 1286 |
+
"Date": [
|
| 1287 |
+
datetime(2013, 1, 1),
|
| 1288 |
+
datetime(2013, 1, 1),
|
| 1289 |
+
datetime(2013, 10, 1),
|
| 1290 |
+
datetime(2013, 10, 2),
|
| 1291 |
+
datetime(2013, 10, 1),
|
| 1292 |
+
datetime(2013, 10, 2),
|
| 1293 |
+
datetime(2013, 12, 2),
|
| 1294 |
+
datetime(2013, 12, 2),
|
| 1295 |
+
],
|
| 1296 |
+
}
|
| 1297 |
+
).set_index("Date")
|
| 1298 |
+
|
| 1299 |
+
expected = DataFrame(
|
| 1300 |
+
np.array([10, 18, 3], dtype="int64").reshape(1, 3),
|
| 1301 |
+
index=pd.DatetimeIndex([datetime(2013, 12, 31)], freq="YE"),
|
| 1302 |
+
columns="Carl Joe Mark".split(),
|
| 1303 |
+
)
|
| 1304 |
+
expected.index.name = "Date"
|
| 1305 |
+
expected.columns.name = "Buyer"
|
| 1306 |
+
|
| 1307 |
+
result = pivot_table(
|
| 1308 |
+
df,
|
| 1309 |
+
index=Grouper(freq="YE"),
|
| 1310 |
+
columns="Buyer",
|
| 1311 |
+
values="Quantity",
|
| 1312 |
+
aggfunc="sum",
|
| 1313 |
+
)
|
| 1314 |
+
tm.assert_frame_equal(result, expected)
|
| 1315 |
+
|
| 1316 |
+
result = pivot_table(
|
| 1317 |
+
df,
|
| 1318 |
+
index="Buyer",
|
| 1319 |
+
columns=Grouper(freq="YE"),
|
| 1320 |
+
values="Quantity",
|
| 1321 |
+
aggfunc="sum",
|
| 1322 |
+
)
|
| 1323 |
+
tm.assert_frame_equal(result, expected.T)
|
| 1324 |
+
|
| 1325 |
+
expected = DataFrame(
|
| 1326 |
+
np.array([1, np.nan, 3, 9, 18, np.nan]).reshape(2, 3),
|
| 1327 |
+
index=pd.DatetimeIndex(
|
| 1328 |
+
[datetime(2013, 1, 1), datetime(2013, 7, 1)], freq="6MS"
|
| 1329 |
+
),
|
| 1330 |
+
columns="Carl Joe Mark".split(),
|
| 1331 |
+
)
|
| 1332 |
+
expected.index.name = "Date"
|
| 1333 |
+
expected.columns.name = "Buyer"
|
| 1334 |
+
if using_array_manager:
|
| 1335 |
+
# INFO(ArrayManager) column without NaNs can preserve int dtype
|
| 1336 |
+
expected["Carl"] = expected["Carl"].astype("int64")
|
| 1337 |
+
|
| 1338 |
+
result = pivot_table(
|
| 1339 |
+
df,
|
| 1340 |
+
index=Grouper(freq="6MS"),
|
| 1341 |
+
columns="Buyer",
|
| 1342 |
+
values="Quantity",
|
| 1343 |
+
aggfunc="sum",
|
| 1344 |
+
)
|
| 1345 |
+
tm.assert_frame_equal(result, expected)
|
| 1346 |
+
|
| 1347 |
+
result = pivot_table(
|
| 1348 |
+
df,
|
| 1349 |
+
index="Buyer",
|
| 1350 |
+
columns=Grouper(freq="6MS"),
|
| 1351 |
+
values="Quantity",
|
| 1352 |
+
aggfunc="sum",
|
| 1353 |
+
)
|
| 1354 |
+
tm.assert_frame_equal(result, expected.T)
|
| 1355 |
+
|
| 1356 |
+
# passing the name
|
| 1357 |
+
df = df.reset_index()
|
| 1358 |
+
result = pivot_table(
|
| 1359 |
+
df,
|
| 1360 |
+
index=Grouper(freq="6MS", key="Date"),
|
| 1361 |
+
columns="Buyer",
|
| 1362 |
+
values="Quantity",
|
| 1363 |
+
aggfunc="sum",
|
| 1364 |
+
)
|
| 1365 |
+
tm.assert_frame_equal(result, expected)
|
| 1366 |
+
|
| 1367 |
+
result = pivot_table(
|
| 1368 |
+
df,
|
| 1369 |
+
index="Buyer",
|
| 1370 |
+
columns=Grouper(freq="6MS", key="Date"),
|
| 1371 |
+
values="Quantity",
|
| 1372 |
+
aggfunc="sum",
|
| 1373 |
+
)
|
| 1374 |
+
tm.assert_frame_equal(result, expected.T)
|
| 1375 |
+
|
| 1376 |
+
msg = "'The grouper name foo is not found'"
|
| 1377 |
+
with pytest.raises(KeyError, match=msg):
|
| 1378 |
+
pivot_table(
|
| 1379 |
+
df,
|
| 1380 |
+
index=Grouper(freq="6MS", key="foo"),
|
| 1381 |
+
columns="Buyer",
|
| 1382 |
+
values="Quantity",
|
| 1383 |
+
aggfunc="sum",
|
| 1384 |
+
)
|
| 1385 |
+
with pytest.raises(KeyError, match=msg):
|
| 1386 |
+
pivot_table(
|
| 1387 |
+
df,
|
| 1388 |
+
index="Buyer",
|
| 1389 |
+
columns=Grouper(freq="6MS", key="foo"),
|
| 1390 |
+
values="Quantity",
|
| 1391 |
+
aggfunc="sum",
|
| 1392 |
+
)
|
| 1393 |
+
|
| 1394 |
+
# passing the level
|
| 1395 |
+
df = df.set_index("Date")
|
| 1396 |
+
result = pivot_table(
|
| 1397 |
+
df,
|
| 1398 |
+
index=Grouper(freq="6MS", level="Date"),
|
| 1399 |
+
columns="Buyer",
|
| 1400 |
+
values="Quantity",
|
| 1401 |
+
aggfunc="sum",
|
| 1402 |
+
)
|
| 1403 |
+
tm.assert_frame_equal(result, expected)
|
| 1404 |
+
|
| 1405 |
+
result = pivot_table(
|
| 1406 |
+
df,
|
| 1407 |
+
index="Buyer",
|
| 1408 |
+
columns=Grouper(freq="6MS", level="Date"),
|
| 1409 |
+
values="Quantity",
|
| 1410 |
+
aggfunc="sum",
|
| 1411 |
+
)
|
| 1412 |
+
tm.assert_frame_equal(result, expected.T)
|
| 1413 |
+
|
| 1414 |
+
msg = "The level foo is not valid"
|
| 1415 |
+
with pytest.raises(ValueError, match=msg):
|
| 1416 |
+
pivot_table(
|
| 1417 |
+
df,
|
| 1418 |
+
index=Grouper(freq="6MS", level="foo"),
|
| 1419 |
+
columns="Buyer",
|
| 1420 |
+
values="Quantity",
|
| 1421 |
+
aggfunc="sum",
|
| 1422 |
+
)
|
| 1423 |
+
with pytest.raises(ValueError, match=msg):
|
| 1424 |
+
pivot_table(
|
| 1425 |
+
df,
|
| 1426 |
+
index="Buyer",
|
| 1427 |
+
columns=Grouper(freq="6MS", level="foo"),
|
| 1428 |
+
values="Quantity",
|
| 1429 |
+
aggfunc="sum",
|
| 1430 |
+
)
|
| 1431 |
+
|
| 1432 |
+
def test_pivot_timegrouper_double(self):
|
| 1433 |
+
# double grouper
|
| 1434 |
+
df = DataFrame(
|
| 1435 |
+
{
|
| 1436 |
+
"Branch": "A A A A A A A B".split(),
|
| 1437 |
+
"Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),
|
| 1438 |
+
"Quantity": [1, 3, 5, 1, 8, 1, 9, 3],
|
| 1439 |
+
"Date": [
|
| 1440 |
+
datetime(2013, 11, 1, 13, 0),
|
| 1441 |
+
datetime(2013, 9, 1, 13, 5),
|
| 1442 |
+
datetime(2013, 10, 1, 20, 0),
|
| 1443 |
+
datetime(2013, 10, 2, 10, 0),
|
| 1444 |
+
datetime(2013, 11, 1, 20, 0),
|
| 1445 |
+
datetime(2013, 10, 2, 10, 0),
|
| 1446 |
+
datetime(2013, 10, 2, 12, 0),
|
| 1447 |
+
datetime(2013, 12, 5, 14, 0),
|
| 1448 |
+
],
|
| 1449 |
+
"PayDay": [
|
| 1450 |
+
datetime(2013, 10, 4, 0, 0),
|
| 1451 |
+
datetime(2013, 10, 15, 13, 5),
|
| 1452 |
+
datetime(2013, 9, 5, 20, 0),
|
| 1453 |
+
datetime(2013, 11, 2, 10, 0),
|
| 1454 |
+
datetime(2013, 10, 7, 20, 0),
|
| 1455 |
+
datetime(2013, 9, 5, 10, 0),
|
| 1456 |
+
datetime(2013, 12, 30, 12, 0),
|
| 1457 |
+
datetime(2013, 11, 20, 14, 0),
|
| 1458 |
+
],
|
| 1459 |
+
}
|
| 1460 |
+
)
|
| 1461 |
+
|
| 1462 |
+
result = pivot_table(
|
| 1463 |
+
df,
|
| 1464 |
+
index=Grouper(freq="ME", key="Date"),
|
| 1465 |
+
columns=Grouper(freq="ME", key="PayDay"),
|
| 1466 |
+
values="Quantity",
|
| 1467 |
+
aggfunc="sum",
|
| 1468 |
+
)
|
| 1469 |
+
expected = DataFrame(
|
| 1470 |
+
np.array(
|
| 1471 |
+
[
|
| 1472 |
+
np.nan,
|
| 1473 |
+
3,
|
| 1474 |
+
np.nan,
|
| 1475 |
+
np.nan,
|
| 1476 |
+
6,
|
| 1477 |
+
np.nan,
|
| 1478 |
+
1,
|
| 1479 |
+
9,
|
| 1480 |
+
np.nan,
|
| 1481 |
+
9,
|
| 1482 |
+
np.nan,
|
| 1483 |
+
np.nan,
|
| 1484 |
+
np.nan,
|
| 1485 |
+
np.nan,
|
| 1486 |
+
3,
|
| 1487 |
+
np.nan,
|
| 1488 |
+
]
|
| 1489 |
+
).reshape(4, 4),
|
| 1490 |
+
index=pd.DatetimeIndex(
|
| 1491 |
+
[
|
| 1492 |
+
datetime(2013, 9, 30),
|
| 1493 |
+
datetime(2013, 10, 31),
|
| 1494 |
+
datetime(2013, 11, 30),
|
| 1495 |
+
datetime(2013, 12, 31),
|
| 1496 |
+
],
|
| 1497 |
+
freq="ME",
|
| 1498 |
+
),
|
| 1499 |
+
columns=pd.DatetimeIndex(
|
| 1500 |
+
[
|
| 1501 |
+
datetime(2013, 9, 30),
|
| 1502 |
+
datetime(2013, 10, 31),
|
| 1503 |
+
datetime(2013, 11, 30),
|
| 1504 |
+
datetime(2013, 12, 31),
|
| 1505 |
+
],
|
| 1506 |
+
freq="ME",
|
| 1507 |
+
),
|
| 1508 |
+
)
|
| 1509 |
+
expected.index.name = "Date"
|
| 1510 |
+
expected.columns.name = "PayDay"
|
| 1511 |
+
|
| 1512 |
+
tm.assert_frame_equal(result, expected)
|
| 1513 |
+
|
| 1514 |
+
result = pivot_table(
|
| 1515 |
+
df,
|
| 1516 |
+
index=Grouper(freq="ME", key="PayDay"),
|
| 1517 |
+
columns=Grouper(freq="ME", key="Date"),
|
| 1518 |
+
values="Quantity",
|
| 1519 |
+
aggfunc="sum",
|
| 1520 |
+
)
|
| 1521 |
+
tm.assert_frame_equal(result, expected.T)
|
| 1522 |
+
|
| 1523 |
+
tuples = [
|
| 1524 |
+
(datetime(2013, 9, 30), datetime(2013, 10, 31)),
|
| 1525 |
+
(datetime(2013, 10, 31), datetime(2013, 9, 30)),
|
| 1526 |
+
(datetime(2013, 10, 31), datetime(2013, 11, 30)),
|
| 1527 |
+
(datetime(2013, 10, 31), datetime(2013, 12, 31)),
|
| 1528 |
+
(datetime(2013, 11, 30), datetime(2013, 10, 31)),
|
| 1529 |
+
(datetime(2013, 12, 31), datetime(2013, 11, 30)),
|
| 1530 |
+
]
|
| 1531 |
+
idx = MultiIndex.from_tuples(tuples, names=["Date", "PayDay"])
|
| 1532 |
+
expected = DataFrame(
|
| 1533 |
+
np.array(
|
| 1534 |
+
[3, np.nan, 6, np.nan, 1, np.nan, 9, np.nan, 9, np.nan, np.nan, 3]
|
| 1535 |
+
).reshape(6, 2),
|
| 1536 |
+
index=idx,
|
| 1537 |
+
columns=["A", "B"],
|
| 1538 |
+
)
|
| 1539 |
+
expected.columns.name = "Branch"
|
| 1540 |
+
|
| 1541 |
+
result = pivot_table(
|
| 1542 |
+
df,
|
| 1543 |
+
index=[Grouper(freq="ME", key="Date"), Grouper(freq="ME", key="PayDay")],
|
| 1544 |
+
columns=["Branch"],
|
| 1545 |
+
values="Quantity",
|
| 1546 |
+
aggfunc="sum",
|
| 1547 |
+
)
|
| 1548 |
+
tm.assert_frame_equal(result, expected)
|
| 1549 |
+
|
| 1550 |
+
result = pivot_table(
|
| 1551 |
+
df,
|
| 1552 |
+
index=["Branch"],
|
| 1553 |
+
columns=[Grouper(freq="ME", key="Date"), Grouper(freq="ME", key="PayDay")],
|
| 1554 |
+
values="Quantity",
|
| 1555 |
+
aggfunc="sum",
|
| 1556 |
+
)
|
| 1557 |
+
tm.assert_frame_equal(result, expected.T)
|
| 1558 |
+
|
| 1559 |
+
def test_pivot_datetime_tz(self):
|
| 1560 |
+
dates1 = pd.DatetimeIndex(
|
| 1561 |
+
[
|
| 1562 |
+
"2011-07-19 07:00:00",
|
| 1563 |
+
"2011-07-19 08:00:00",
|
| 1564 |
+
"2011-07-19 09:00:00",
|
| 1565 |
+
"2011-07-19 07:00:00",
|
| 1566 |
+
"2011-07-19 08:00:00",
|
| 1567 |
+
"2011-07-19 09:00:00",
|
| 1568 |
+
],
|
| 1569 |
+
dtype="M8[ns, US/Pacific]",
|
| 1570 |
+
name="dt1",
|
| 1571 |
+
)
|
| 1572 |
+
dates2 = pd.DatetimeIndex(
|
| 1573 |
+
[
|
| 1574 |
+
"2013-01-01 15:00:00",
|
| 1575 |
+
"2013-01-01 15:00:00",
|
| 1576 |
+
"2013-01-01 15:00:00",
|
| 1577 |
+
"2013-02-01 15:00:00",
|
| 1578 |
+
"2013-02-01 15:00:00",
|
| 1579 |
+
"2013-02-01 15:00:00",
|
| 1580 |
+
],
|
| 1581 |
+
dtype="M8[ns, Asia/Tokyo]",
|
| 1582 |
+
)
|
| 1583 |
+
df = DataFrame(
|
| 1584 |
+
{
|
| 1585 |
+
"label": ["a", "a", "a", "b", "b", "b"],
|
| 1586 |
+
"dt1": dates1,
|
| 1587 |
+
"dt2": dates2,
|
| 1588 |
+
"value1": np.arange(6, dtype="int64"),
|
| 1589 |
+
"value2": [1, 2] * 3,
|
| 1590 |
+
}
|
| 1591 |
+
)
|
| 1592 |
+
|
| 1593 |
+
exp_idx = dates1[:3]
|
| 1594 |
+
exp_col1 = Index(["value1", "value1"])
|
| 1595 |
+
exp_col2 = Index(["a", "b"], name="label")
|
| 1596 |
+
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
|
| 1597 |
+
expected = DataFrame(
|
| 1598 |
+
[[0.0, 3.0], [1.0, 4.0], [2.0, 5.0]], index=exp_idx, columns=exp_col
|
| 1599 |
+
)
|
| 1600 |
+
result = pivot_table(df, index=["dt1"], columns=["label"], values=["value1"])
|
| 1601 |
+
tm.assert_frame_equal(result, expected)
|
| 1602 |
+
|
| 1603 |
+
exp_col1 = Index(["sum", "sum", "sum", "sum", "mean", "mean", "mean", "mean"])
|
| 1604 |
+
exp_col2 = Index(["value1", "value1", "value2", "value2"] * 2)
|
| 1605 |
+
exp_col3 = pd.DatetimeIndex(
|
| 1606 |
+
["2013-01-01 15:00:00", "2013-02-01 15:00:00"] * 4,
|
| 1607 |
+
dtype="M8[ns, Asia/Tokyo]",
|
| 1608 |
+
name="dt2",
|
| 1609 |
+
)
|
| 1610 |
+
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
|
| 1611 |
+
expected1 = DataFrame(
|
| 1612 |
+
np.array(
|
| 1613 |
+
[
|
| 1614 |
+
[
|
| 1615 |
+
0,
|
| 1616 |
+
3,
|
| 1617 |
+
1,
|
| 1618 |
+
2,
|
| 1619 |
+
],
|
| 1620 |
+
[1, 4, 2, 1],
|
| 1621 |
+
[2, 5, 1, 2],
|
| 1622 |
+
],
|
| 1623 |
+
dtype="int64",
|
| 1624 |
+
),
|
| 1625 |
+
index=exp_idx,
|
| 1626 |
+
columns=exp_col[:4],
|
| 1627 |
+
)
|
| 1628 |
+
expected2 = DataFrame(
|
| 1629 |
+
np.array(
|
| 1630 |
+
[
|
| 1631 |
+
[0.0, 3.0, 1.0, 2.0],
|
| 1632 |
+
[1.0, 4.0, 2.0, 1.0],
|
| 1633 |
+
[2.0, 5.0, 1.0, 2.0],
|
| 1634 |
+
],
|
| 1635 |
+
),
|
| 1636 |
+
index=exp_idx,
|
| 1637 |
+
columns=exp_col[4:],
|
| 1638 |
+
)
|
| 1639 |
+
expected = concat([expected1, expected2], axis=1)
|
| 1640 |
+
|
| 1641 |
+
result = pivot_table(
|
| 1642 |
+
df,
|
| 1643 |
+
index=["dt1"],
|
| 1644 |
+
columns=["dt2"],
|
| 1645 |
+
values=["value1", "value2"],
|
| 1646 |
+
aggfunc=["sum", "mean"],
|
| 1647 |
+
)
|
| 1648 |
+
tm.assert_frame_equal(result, expected)
|
| 1649 |
+
|
| 1650 |
+
def test_pivot_dtaccessor(self):
|
| 1651 |
+
# GH 8103
|
| 1652 |
+
dates1 = pd.DatetimeIndex(
|
| 1653 |
+
[
|
| 1654 |
+
"2011-07-19 07:00:00",
|
| 1655 |
+
"2011-07-19 08:00:00",
|
| 1656 |
+
"2011-07-19 09:00:00",
|
| 1657 |
+
"2011-07-19 07:00:00",
|
| 1658 |
+
"2011-07-19 08:00:00",
|
| 1659 |
+
"2011-07-19 09:00:00",
|
| 1660 |
+
]
|
| 1661 |
+
)
|
| 1662 |
+
dates2 = pd.DatetimeIndex(
|
| 1663 |
+
[
|
| 1664 |
+
"2013-01-01 15:00:00",
|
| 1665 |
+
"2013-01-01 15:00:00",
|
| 1666 |
+
"2013-01-01 15:00:00",
|
| 1667 |
+
"2013-02-01 15:00:00",
|
| 1668 |
+
"2013-02-01 15:00:00",
|
| 1669 |
+
"2013-02-01 15:00:00",
|
| 1670 |
+
]
|
| 1671 |
+
)
|
| 1672 |
+
df = DataFrame(
|
| 1673 |
+
{
|
| 1674 |
+
"label": ["a", "a", "a", "b", "b", "b"],
|
| 1675 |
+
"dt1": dates1,
|
| 1676 |
+
"dt2": dates2,
|
| 1677 |
+
"value1": np.arange(6, dtype="int64"),
|
| 1678 |
+
"value2": [1, 2] * 3,
|
| 1679 |
+
}
|
| 1680 |
+
)
|
| 1681 |
+
|
| 1682 |
+
result = pivot_table(
|
| 1683 |
+
df, index="label", columns=df["dt1"].dt.hour, values="value1"
|
| 1684 |
+
)
|
| 1685 |
+
|
| 1686 |
+
exp_idx = Index(["a", "b"], name="label")
|
| 1687 |
+
expected = DataFrame(
|
| 1688 |
+
{7: [0.0, 3.0], 8: [1.0, 4.0], 9: [2.0, 5.0]},
|
| 1689 |
+
index=exp_idx,
|
| 1690 |
+
columns=Index([7, 8, 9], dtype=np.int32, name="dt1"),
|
| 1691 |
+
)
|
| 1692 |
+
tm.assert_frame_equal(result, expected)
|
| 1693 |
+
|
| 1694 |
+
result = pivot_table(
|
| 1695 |
+
df, index=df["dt2"].dt.month, columns=df["dt1"].dt.hour, values="value1"
|
| 1696 |
+
)
|
| 1697 |
+
|
| 1698 |
+
expected = DataFrame(
|
| 1699 |
+
{7: [0.0, 3.0], 8: [1.0, 4.0], 9: [2.0, 5.0]},
|
| 1700 |
+
index=Index([1, 2], dtype=np.int32, name="dt2"),
|
| 1701 |
+
columns=Index([7, 8, 9], dtype=np.int32, name="dt1"),
|
| 1702 |
+
)
|
| 1703 |
+
tm.assert_frame_equal(result, expected)
|
| 1704 |
+
|
| 1705 |
+
result = pivot_table(
|
| 1706 |
+
df,
|
| 1707 |
+
index=df["dt2"].dt.year.values,
|
| 1708 |
+
columns=[df["dt1"].dt.hour, df["dt2"].dt.month],
|
| 1709 |
+
values="value1",
|
| 1710 |
+
)
|
| 1711 |
+
|
| 1712 |
+
exp_col = MultiIndex.from_arrays(
|
| 1713 |
+
[
|
| 1714 |
+
np.array([7, 7, 8, 8, 9, 9], dtype=np.int32),
|
| 1715 |
+
np.array([1, 2] * 3, dtype=np.int32),
|
| 1716 |
+
],
|
| 1717 |
+
names=["dt1", "dt2"],
|
| 1718 |
+
)
|
| 1719 |
+
expected = DataFrame(
|
| 1720 |
+
np.array([[0.0, 3.0, 1.0, 4.0, 2.0, 5.0]]),
|
| 1721 |
+
index=Index([2013], dtype=np.int32),
|
| 1722 |
+
columns=exp_col,
|
| 1723 |
+
)
|
| 1724 |
+
tm.assert_frame_equal(result, expected)
|
| 1725 |
+
|
| 1726 |
+
result = pivot_table(
|
| 1727 |
+
df,
|
| 1728 |
+
index=np.array(["X", "X", "X", "X", "Y", "Y"]),
|
| 1729 |
+
columns=[df["dt1"].dt.hour, df["dt2"].dt.month],
|
| 1730 |
+
values="value1",
|
| 1731 |
+
)
|
| 1732 |
+
expected = DataFrame(
|
| 1733 |
+
np.array(
|
| 1734 |
+
[[0, 3, 1, np.nan, 2, np.nan], [np.nan, np.nan, np.nan, 4, np.nan, 5]]
|
| 1735 |
+
),
|
| 1736 |
+
index=["X", "Y"],
|
| 1737 |
+
columns=exp_col,
|
| 1738 |
+
)
|
| 1739 |
+
tm.assert_frame_equal(result, expected)
|
| 1740 |
+
|
| 1741 |
+
def test_daily(self):
|
| 1742 |
+
rng = date_range("1/1/2000", "12/31/2004", freq="D")
|
| 1743 |
+
ts = Series(np.arange(len(rng)), index=rng)
|
| 1744 |
+
|
| 1745 |
+
result = pivot_table(
|
| 1746 |
+
DataFrame(ts), index=ts.index.year, columns=ts.index.dayofyear
|
| 1747 |
+
)
|
| 1748 |
+
result.columns = result.columns.droplevel(0)
|
| 1749 |
+
|
| 1750 |
+
doy = np.asarray(ts.index.dayofyear)
|
| 1751 |
+
|
| 1752 |
+
expected = {}
|
| 1753 |
+
for y in ts.index.year.unique().values:
|
| 1754 |
+
mask = ts.index.year == y
|
| 1755 |
+
expected[y] = Series(ts.values[mask], index=doy[mask])
|
| 1756 |
+
expected = DataFrame(expected, dtype=float).T
|
| 1757 |
+
tm.assert_frame_equal(result, expected)
|
| 1758 |
+
|
| 1759 |
+
def test_monthly(self):
|
| 1760 |
+
rng = date_range("1/1/2000", "12/31/2004", freq="ME")
|
| 1761 |
+
ts = Series(np.arange(len(rng)), index=rng)
|
| 1762 |
+
|
| 1763 |
+
result = pivot_table(DataFrame(ts), index=ts.index.year, columns=ts.index.month)
|
| 1764 |
+
result.columns = result.columns.droplevel(0)
|
| 1765 |
+
|
| 1766 |
+
month = np.asarray(ts.index.month)
|
| 1767 |
+
expected = {}
|
| 1768 |
+
for y in ts.index.year.unique().values:
|
| 1769 |
+
mask = ts.index.year == y
|
| 1770 |
+
expected[y] = Series(ts.values[mask], index=month[mask])
|
| 1771 |
+
expected = DataFrame(expected, dtype=float).T
|
| 1772 |
+
tm.assert_frame_equal(result, expected)
|
| 1773 |
+
|
| 1774 |
+
def test_pivot_table_with_iterator_values(self, data):
|
| 1775 |
+
# GH 12017
|
| 1776 |
+
aggs = {"D": "sum", "E": "mean"}
|
| 1777 |
+
|
| 1778 |
+
pivot_values_list = pivot_table(
|
| 1779 |
+
data, index=["A"], values=list(aggs.keys()), aggfunc=aggs
|
| 1780 |
+
)
|
| 1781 |
+
|
| 1782 |
+
pivot_values_keys = pivot_table(
|
| 1783 |
+
data, index=["A"], values=aggs.keys(), aggfunc=aggs
|
| 1784 |
+
)
|
| 1785 |
+
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
|
| 1786 |
+
|
| 1787 |
+
agg_values_gen = (value for value in aggs)
|
| 1788 |
+
pivot_values_gen = pivot_table(
|
| 1789 |
+
data, index=["A"], values=agg_values_gen, aggfunc=aggs
|
| 1790 |
+
)
|
| 1791 |
+
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
|
| 1792 |
+
|
| 1793 |
+
def test_pivot_table_margins_name_with_aggfunc_list(self):
|
| 1794 |
+
# GH 13354
|
| 1795 |
+
margins_name = "Weekly"
|
| 1796 |
+
costs = DataFrame(
|
| 1797 |
+
{
|
| 1798 |
+
"item": ["bacon", "cheese", "bacon", "cheese"],
|
| 1799 |
+
"cost": [2.5, 4.5, 3.2, 3.3],
|
| 1800 |
+
"day": ["ME", "ME", "T", "T"],
|
| 1801 |
+
}
|
| 1802 |
+
)
|
| 1803 |
+
table = costs.pivot_table(
|
| 1804 |
+
index="item",
|
| 1805 |
+
columns="day",
|
| 1806 |
+
margins=True,
|
| 1807 |
+
margins_name=margins_name,
|
| 1808 |
+
aggfunc=["mean", "max"],
|
| 1809 |
+
)
|
| 1810 |
+
ix = Index(["bacon", "cheese", margins_name], name="item")
|
| 1811 |
+
tups = [
|
| 1812 |
+
("mean", "cost", "ME"),
|
| 1813 |
+
("mean", "cost", "T"),
|
| 1814 |
+
("mean", "cost", margins_name),
|
| 1815 |
+
("max", "cost", "ME"),
|
| 1816 |
+
("max", "cost", "T"),
|
| 1817 |
+
("max", "cost", margins_name),
|
| 1818 |
+
]
|
| 1819 |
+
cols = MultiIndex.from_tuples(tups, names=[None, None, "day"])
|
| 1820 |
+
expected = DataFrame(table.values, index=ix, columns=cols)
|
| 1821 |
+
tm.assert_frame_equal(table, expected)
|
| 1822 |
+
|
| 1823 |
+
def test_categorical_margins(self, observed):
|
| 1824 |
+
# GH 10989
|
| 1825 |
+
df = DataFrame(
|
| 1826 |
+
{"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2}
|
| 1827 |
+
)
|
| 1828 |
+
|
| 1829 |
+
expected = DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
|
| 1830 |
+
expected.index = Index([0, 1, "All"], name="y")
|
| 1831 |
+
expected.columns = Index([0, 1, "All"], name="z")
|
| 1832 |
+
|
| 1833 |
+
table = df.pivot_table("x", "y", "z", dropna=observed, margins=True)
|
| 1834 |
+
tm.assert_frame_equal(table, expected)
|
| 1835 |
+
|
| 1836 |
+
def test_categorical_margins_category(self, observed):
|
| 1837 |
+
df = DataFrame(
|
| 1838 |
+
{"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2}
|
| 1839 |
+
)
|
| 1840 |
+
|
| 1841 |
+
expected = DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
|
| 1842 |
+
expected.index = Index([0, 1, "All"], name="y")
|
| 1843 |
+
expected.columns = Index([0, 1, "All"], name="z")
|
| 1844 |
+
|
| 1845 |
+
df.y = df.y.astype("category")
|
| 1846 |
+
df.z = df.z.astype("category")
|
| 1847 |
+
msg = "The default value of observed=False is deprecated"
|
| 1848 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 1849 |
+
table = df.pivot_table("x", "y", "z", dropna=observed, margins=True)
|
| 1850 |
+
tm.assert_frame_equal(table, expected)
|
| 1851 |
+
|
| 1852 |
+
def test_margins_casted_to_float(self):
|
| 1853 |
+
# GH 24893
|
| 1854 |
+
df = DataFrame(
|
| 1855 |
+
{
|
| 1856 |
+
"A": [2, 4, 6, 8],
|
| 1857 |
+
"B": [1, 4, 5, 8],
|
| 1858 |
+
"C": [1, 3, 4, 6],
|
| 1859 |
+
"D": ["X", "X", "Y", "Y"],
|
| 1860 |
+
}
|
| 1861 |
+
)
|
| 1862 |
+
|
| 1863 |
+
result = pivot_table(df, index="D", margins=True)
|
| 1864 |
+
expected = DataFrame(
|
| 1865 |
+
{"A": [3.0, 7.0, 5], "B": [2.5, 6.5, 4.5], "C": [2.0, 5.0, 3.5]},
|
| 1866 |
+
index=Index(["X", "Y", "All"], name="D"),
|
| 1867 |
+
)
|
| 1868 |
+
tm.assert_frame_equal(result, expected)
|
| 1869 |
+
|
| 1870 |
+
def test_pivot_with_categorical(self, observed, ordered):
|
| 1871 |
+
# gh-21370
|
| 1872 |
+
idx = [np.nan, "low", "high", "low", np.nan]
|
| 1873 |
+
col = [np.nan, "A", "B", np.nan, "A"]
|
| 1874 |
+
df = DataFrame(
|
| 1875 |
+
{
|
| 1876 |
+
"In": Categorical(idx, categories=["low", "high"], ordered=ordered),
|
| 1877 |
+
"Col": Categorical(col, categories=["A", "B"], ordered=ordered),
|
| 1878 |
+
"Val": range(1, 6),
|
| 1879 |
+
}
|
| 1880 |
+
)
|
| 1881 |
+
# case with index/columns/value
|
| 1882 |
+
result = df.pivot_table(
|
| 1883 |
+
index="In", columns="Col", values="Val", observed=observed
|
| 1884 |
+
)
|
| 1885 |
+
|
| 1886 |
+
expected_cols = pd.CategoricalIndex(["A", "B"], ordered=ordered, name="Col")
|
| 1887 |
+
|
| 1888 |
+
expected = DataFrame(data=[[2.0, np.nan], [np.nan, 3.0]], columns=expected_cols)
|
| 1889 |
+
expected.index = Index(
|
| 1890 |
+
Categorical(["low", "high"], categories=["low", "high"], ordered=ordered),
|
| 1891 |
+
name="In",
|
| 1892 |
+
)
|
| 1893 |
+
|
| 1894 |
+
tm.assert_frame_equal(result, expected)
|
| 1895 |
+
|
| 1896 |
+
# case with columns/value
|
| 1897 |
+
result = df.pivot_table(columns="Col", values="Val", observed=observed)
|
| 1898 |
+
|
| 1899 |
+
expected = DataFrame(
|
| 1900 |
+
data=[[3.5, 3.0]], columns=expected_cols, index=Index(["Val"])
|
| 1901 |
+
)
|
| 1902 |
+
|
| 1903 |
+
tm.assert_frame_equal(result, expected)
|
| 1904 |
+
|
| 1905 |
+
def test_categorical_aggfunc(self, observed):
|
| 1906 |
+
# GH 9534
|
| 1907 |
+
df = DataFrame(
|
| 1908 |
+
{"C1": ["A", "B", "C", "C"], "C2": ["a", "a", "b", "b"], "V": [1, 2, 3, 4]}
|
| 1909 |
+
)
|
| 1910 |
+
df["C1"] = df["C1"].astype("category")
|
| 1911 |
+
msg = "The default value of observed=False is deprecated"
|
| 1912 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 1913 |
+
result = df.pivot_table(
|
| 1914 |
+
"V", index="C1", columns="C2", dropna=observed, aggfunc="count"
|
| 1915 |
+
)
|
| 1916 |
+
|
| 1917 |
+
expected_index = pd.CategoricalIndex(
|
| 1918 |
+
["A", "B", "C"], categories=["A", "B", "C"], ordered=False, name="C1"
|
| 1919 |
+
)
|
| 1920 |
+
expected_columns = Index(["a", "b"], name="C2")
|
| 1921 |
+
expected_data = np.array([[1, 0], [1, 0], [0, 2]], dtype=np.int64)
|
| 1922 |
+
expected = DataFrame(
|
| 1923 |
+
expected_data, index=expected_index, columns=expected_columns
|
| 1924 |
+
)
|
| 1925 |
+
tm.assert_frame_equal(result, expected)
|
| 1926 |
+
|
| 1927 |
+
def test_categorical_pivot_index_ordering(self, observed):
|
| 1928 |
+
# GH 8731
|
| 1929 |
+
df = DataFrame(
|
| 1930 |
+
{
|
| 1931 |
+
"Sales": [100, 120, 220],
|
| 1932 |
+
"Month": ["January", "January", "January"],
|
| 1933 |
+
"Year": [2013, 2014, 2013],
|
| 1934 |
+
}
|
| 1935 |
+
)
|
| 1936 |
+
months = [
|
| 1937 |
+
"January",
|
| 1938 |
+
"February",
|
| 1939 |
+
"March",
|
| 1940 |
+
"April",
|
| 1941 |
+
"May",
|
| 1942 |
+
"June",
|
| 1943 |
+
"July",
|
| 1944 |
+
"August",
|
| 1945 |
+
"September",
|
| 1946 |
+
"October",
|
| 1947 |
+
"November",
|
| 1948 |
+
"December",
|
| 1949 |
+
]
|
| 1950 |
+
df["Month"] = df["Month"].astype("category").cat.set_categories(months)
|
| 1951 |
+
result = df.pivot_table(
|
| 1952 |
+
values="Sales",
|
| 1953 |
+
index="Month",
|
| 1954 |
+
columns="Year",
|
| 1955 |
+
observed=observed,
|
| 1956 |
+
aggfunc="sum",
|
| 1957 |
+
)
|
| 1958 |
+
expected_columns = Index([2013, 2014], name="Year", dtype="int64")
|
| 1959 |
+
expected_index = pd.CategoricalIndex(
|
| 1960 |
+
months, categories=months, ordered=False, name="Month"
|
| 1961 |
+
)
|
| 1962 |
+
expected_data = [[320, 120]] + [[0, 0]] * 11
|
| 1963 |
+
expected = DataFrame(
|
| 1964 |
+
expected_data, index=expected_index, columns=expected_columns
|
| 1965 |
+
)
|
| 1966 |
+
if observed:
|
| 1967 |
+
expected = expected.loc[["January"]]
|
| 1968 |
+
|
| 1969 |
+
tm.assert_frame_equal(result, expected)
|
| 1970 |
+
|
| 1971 |
+
def test_pivot_table_not_series(self):
|
| 1972 |
+
# GH 4386
|
| 1973 |
+
# pivot_table always returns a DataFrame
|
| 1974 |
+
# when values is not list like and columns is None
|
| 1975 |
+
# and aggfunc is not instance of list
|
| 1976 |
+
df = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"], "col3": [1, 3, 9]})
|
| 1977 |
+
|
| 1978 |
+
result = df.pivot_table("col1", index=["col3", "col2"], aggfunc="sum")
|
| 1979 |
+
m = MultiIndex.from_arrays([[1, 3, 9], ["C", "D", "E"]], names=["col3", "col2"])
|
| 1980 |
+
expected = DataFrame([3, 4, 5], index=m, columns=["col1"])
|
| 1981 |
+
|
| 1982 |
+
tm.assert_frame_equal(result, expected)
|
| 1983 |
+
|
| 1984 |
+
result = df.pivot_table("col1", index="col3", columns="col2", aggfunc="sum")
|
| 1985 |
+
expected = DataFrame(
|
| 1986 |
+
[[3, np.nan, np.nan], [np.nan, 4, np.nan], [np.nan, np.nan, 5]],
|
| 1987 |
+
index=Index([1, 3, 9], name="col3"),
|
| 1988 |
+
columns=Index(["C", "D", "E"], name="col2"),
|
| 1989 |
+
)
|
| 1990 |
+
|
| 1991 |
+
tm.assert_frame_equal(result, expected)
|
| 1992 |
+
|
| 1993 |
+
result = df.pivot_table("col1", index="col3", aggfunc=["sum"])
|
| 1994 |
+
m = MultiIndex.from_arrays([["sum"], ["col1"]])
|
| 1995 |
+
expected = DataFrame([3, 4, 5], index=Index([1, 3, 9], name="col3"), columns=m)
|
| 1996 |
+
|
| 1997 |
+
tm.assert_frame_equal(result, expected)
|
| 1998 |
+
|
| 1999 |
+
def test_pivot_margins_name_unicode(self):
|
| 2000 |
+
# issue #13292
|
| 2001 |
+
greek = "\u0394\u03bf\u03ba\u03b9\u03bc\u03ae"
|
| 2002 |
+
frame = DataFrame({"foo": [1, 2, 3]}, columns=Index(["foo"], dtype=object))
|
| 2003 |
+
table = pivot_table(
|
| 2004 |
+
frame, index=["foo"], aggfunc=len, margins=True, margins_name=greek
|
| 2005 |
+
)
|
| 2006 |
+
index = Index([1, 2, 3, greek], dtype="object", name="foo")
|
| 2007 |
+
expected = DataFrame(index=index, columns=[])
|
| 2008 |
+
tm.assert_frame_equal(table, expected)
|
| 2009 |
+
|
| 2010 |
+
def test_pivot_string_as_func(self):
|
| 2011 |
+
# GH #18713
|
| 2012 |
+
# for correctness purposes
|
| 2013 |
+
data = DataFrame(
|
| 2014 |
+
{
|
| 2015 |
+
"A": [
|
| 2016 |
+
"foo",
|
| 2017 |
+
"foo",
|
| 2018 |
+
"foo",
|
| 2019 |
+
"foo",
|
| 2020 |
+
"bar",
|
| 2021 |
+
"bar",
|
| 2022 |
+
"bar",
|
| 2023 |
+
"bar",
|
| 2024 |
+
"foo",
|
| 2025 |
+
"foo",
|
| 2026 |
+
"foo",
|
| 2027 |
+
],
|
| 2028 |
+
"B": [
|
| 2029 |
+
"one",
|
| 2030 |
+
"one",
|
| 2031 |
+
"one",
|
| 2032 |
+
"two",
|
| 2033 |
+
"one",
|
| 2034 |
+
"one",
|
| 2035 |
+
"one",
|
| 2036 |
+
"two",
|
| 2037 |
+
"two",
|
| 2038 |
+
"two",
|
| 2039 |
+
"one",
|
| 2040 |
+
],
|
| 2041 |
+
"C": range(11),
|
| 2042 |
+
}
|
| 2043 |
+
)
|
| 2044 |
+
|
| 2045 |
+
result = pivot_table(data, index="A", columns="B", aggfunc="sum")
|
| 2046 |
+
mi = MultiIndex(
|
| 2047 |
+
levels=[["C"], ["one", "two"]], codes=[[0, 0], [0, 1]], names=[None, "B"]
|
| 2048 |
+
)
|
| 2049 |
+
expected = DataFrame(
|
| 2050 |
+
{("C", "one"): {"bar": 15, "foo": 13}, ("C", "two"): {"bar": 7, "foo": 20}},
|
| 2051 |
+
columns=mi,
|
| 2052 |
+
).rename_axis("A")
|
| 2053 |
+
tm.assert_frame_equal(result, expected)
|
| 2054 |
+
|
| 2055 |
+
result = pivot_table(data, index="A", columns="B", aggfunc=["sum", "mean"])
|
| 2056 |
+
mi = MultiIndex(
|
| 2057 |
+
levels=[["sum", "mean"], ["C"], ["one", "two"]],
|
| 2058 |
+
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
|
| 2059 |
+
names=[None, None, "B"],
|
| 2060 |
+
)
|
| 2061 |
+
expected = DataFrame(
|
| 2062 |
+
{
|
| 2063 |
+
("mean", "C", "one"): {"bar": 5.0, "foo": 3.25},
|
| 2064 |
+
("mean", "C", "two"): {"bar": 7.0, "foo": 6.666666666666667},
|
| 2065 |
+
("sum", "C", "one"): {"bar": 15, "foo": 13},
|
| 2066 |
+
("sum", "C", "two"): {"bar": 7, "foo": 20},
|
| 2067 |
+
},
|
| 2068 |
+
columns=mi,
|
| 2069 |
+
).rename_axis("A")
|
| 2070 |
+
tm.assert_frame_equal(result, expected)
|
| 2071 |
+
|
| 2072 |
+
@pytest.mark.parametrize(
|
| 2073 |
+
"f, f_numpy",
|
| 2074 |
+
[
|
| 2075 |
+
("sum", np.sum),
|
| 2076 |
+
("mean", np.mean),
|
| 2077 |
+
("std", np.std),
|
| 2078 |
+
(["sum", "mean"], [np.sum, np.mean]),
|
| 2079 |
+
(["sum", "std"], [np.sum, np.std]),
|
| 2080 |
+
(["std", "mean"], [np.std, np.mean]),
|
| 2081 |
+
],
|
| 2082 |
+
)
|
| 2083 |
+
def test_pivot_string_func_vs_func(self, f, f_numpy, data):
|
| 2084 |
+
# GH #18713
|
| 2085 |
+
# for consistency purposes
|
| 2086 |
+
data = data.drop(columns="C")
|
| 2087 |
+
result = pivot_table(data, index="A", columns="B", aggfunc=f)
|
| 2088 |
+
ops = "|".join(f) if isinstance(f, list) else f
|
| 2089 |
+
msg = f"using DataFrameGroupBy.[{ops}]"
|
| 2090 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 2091 |
+
expected = pivot_table(data, index="A", columns="B", aggfunc=f_numpy)
|
| 2092 |
+
tm.assert_frame_equal(result, expected)
|
| 2093 |
+
|
| 2094 |
+
@pytest.mark.slow
|
| 2095 |
+
def test_pivot_number_of_levels_larger_than_int32(self, monkeypatch):
|
| 2096 |
+
# GH 20601
|
| 2097 |
+
# GH 26314: Change ValueError to PerformanceWarning
|
| 2098 |
+
class MockUnstacker(reshape_lib._Unstacker):
|
| 2099 |
+
def __init__(self, *args, **kwargs) -> None:
|
| 2100 |
+
# __init__ will raise the warning
|
| 2101 |
+
super().__init__(*args, **kwargs)
|
| 2102 |
+
raise Exception("Don't compute final result.")
|
| 2103 |
+
|
| 2104 |
+
with monkeypatch.context() as m:
|
| 2105 |
+
m.setattr(reshape_lib, "_Unstacker", MockUnstacker)
|
| 2106 |
+
df = DataFrame(
|
| 2107 |
+
{"ind1": np.arange(2**16), "ind2": np.arange(2**16), "count": 0}
|
| 2108 |
+
)
|
| 2109 |
+
|
| 2110 |
+
msg = "The following operation may generate"
|
| 2111 |
+
with tm.assert_produces_warning(PerformanceWarning, match=msg):
|
| 2112 |
+
with pytest.raises(Exception, match="Don't compute final result."):
|
| 2113 |
+
df.pivot_table(
|
| 2114 |
+
index="ind1", columns="ind2", values="count", aggfunc="count"
|
| 2115 |
+
)
|
| 2116 |
+
|
| 2117 |
+
def test_pivot_table_aggfunc_dropna(self, dropna):
|
| 2118 |
+
# GH 22159
|
| 2119 |
+
df = DataFrame(
|
| 2120 |
+
{
|
| 2121 |
+
"fruit": ["apple", "peach", "apple"],
|
| 2122 |
+
"size": [1, 1, 2],
|
| 2123 |
+
"taste": [7, 6, 6],
|
| 2124 |
+
}
|
| 2125 |
+
)
|
| 2126 |
+
|
| 2127 |
+
def ret_one(x):
|
| 2128 |
+
return 1
|
| 2129 |
+
|
| 2130 |
+
def ret_sum(x):
|
| 2131 |
+
return sum(x)
|
| 2132 |
+
|
| 2133 |
+
def ret_none(x):
|
| 2134 |
+
return np.nan
|
| 2135 |
+
|
| 2136 |
+
result = pivot_table(
|
| 2137 |
+
df, columns="fruit", aggfunc=[ret_sum, ret_none, ret_one], dropna=dropna
|
| 2138 |
+
)
|
| 2139 |
+
|
| 2140 |
+
data = [[3, 1, np.nan, np.nan, 1, 1], [13, 6, np.nan, np.nan, 1, 1]]
|
| 2141 |
+
col = MultiIndex.from_product(
|
| 2142 |
+
[["ret_sum", "ret_none", "ret_one"], ["apple", "peach"]],
|
| 2143 |
+
names=[None, "fruit"],
|
| 2144 |
+
)
|
| 2145 |
+
expected = DataFrame(data, index=["size", "taste"], columns=col)
|
| 2146 |
+
|
| 2147 |
+
if dropna:
|
| 2148 |
+
expected = expected.dropna(axis="columns")
|
| 2149 |
+
|
| 2150 |
+
tm.assert_frame_equal(result, expected)
|
| 2151 |
+
|
| 2152 |
+
def test_pivot_table_aggfunc_scalar_dropna(self, dropna):
|
| 2153 |
+
# GH 22159
|
| 2154 |
+
df = DataFrame(
|
| 2155 |
+
{"A": ["one", "two", "one"], "x": [3, np.nan, 2], "y": [1, np.nan, np.nan]}
|
| 2156 |
+
)
|
| 2157 |
+
|
| 2158 |
+
result = pivot_table(df, columns="A", aggfunc="mean", dropna=dropna)
|
| 2159 |
+
|
| 2160 |
+
data = [[2.5, np.nan], [1, np.nan]]
|
| 2161 |
+
col = Index(["one", "two"], name="A")
|
| 2162 |
+
expected = DataFrame(data, index=["x", "y"], columns=col)
|
| 2163 |
+
|
| 2164 |
+
if dropna:
|
| 2165 |
+
expected = expected.dropna(axis="columns")
|
| 2166 |
+
|
| 2167 |
+
tm.assert_frame_equal(result, expected)
|
| 2168 |
+
|
| 2169 |
+
@pytest.mark.parametrize("margins", [True, False])
|
| 2170 |
+
def test_pivot_table_empty_aggfunc(self, margins):
|
| 2171 |
+
# GH 9186 & GH 13483 & GH 49240
|
| 2172 |
+
df = DataFrame(
|
| 2173 |
+
{
|
| 2174 |
+
"A": [2, 2, 3, 3, 2],
|
| 2175 |
+
"id": [5, 6, 7, 8, 9],
|
| 2176 |
+
"C": ["p", "q", "q", "p", "q"],
|
| 2177 |
+
"D": [None, None, None, None, None],
|
| 2178 |
+
}
|
| 2179 |
+
)
|
| 2180 |
+
result = df.pivot_table(
|
| 2181 |
+
index="A", columns="D", values="id", aggfunc=np.size, margins=margins
|
| 2182 |
+
)
|
| 2183 |
+
exp_cols = Index([], name="D")
|
| 2184 |
+
expected = DataFrame(index=Index([], dtype="int64", name="A"), columns=exp_cols)
|
| 2185 |
+
tm.assert_frame_equal(result, expected)
|
| 2186 |
+
|
| 2187 |
+
def test_pivot_table_no_column_raises(self):
|
| 2188 |
+
# GH 10326
|
| 2189 |
+
def agg(arr):
|
| 2190 |
+
return np.mean(arr)
|
| 2191 |
+
|
| 2192 |
+
df = DataFrame({"X": [0, 0, 1, 1], "Y": [0, 1, 0, 1], "Z": [10, 20, 30, 40]})
|
| 2193 |
+
with pytest.raises(KeyError, match="notpresent"):
|
| 2194 |
+
df.pivot_table("notpresent", "X", "Y", aggfunc=agg)
|
| 2195 |
+
|
| 2196 |
+
def test_pivot_table_multiindex_columns_doctest_case(self):
|
| 2197 |
+
# The relevant characteristic is that the call
|
| 2198 |
+
# to maybe_downcast_to_dtype(agged[v], data[v].dtype) in
|
| 2199 |
+
# __internal_pivot_table has `agged[v]` a DataFrame instead of Series,
|
| 2200 |
+
# In this case this is because agged.columns is a MultiIndex and 'v'
|
| 2201 |
+
# is only indexing on its first level.
|
| 2202 |
+
df = DataFrame(
|
| 2203 |
+
{
|
| 2204 |
+
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
|
| 2205 |
+
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
|
| 2206 |
+
"C": [
|
| 2207 |
+
"small",
|
| 2208 |
+
"large",
|
| 2209 |
+
"large",
|
| 2210 |
+
"small",
|
| 2211 |
+
"small",
|
| 2212 |
+
"large",
|
| 2213 |
+
"small",
|
| 2214 |
+
"small",
|
| 2215 |
+
"large",
|
| 2216 |
+
],
|
| 2217 |
+
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
|
| 2218 |
+
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
|
| 2219 |
+
}
|
| 2220 |
+
)
|
| 2221 |
+
|
| 2222 |
+
table = pivot_table(
|
| 2223 |
+
df,
|
| 2224 |
+
values=["D", "E"],
|
| 2225 |
+
index=["A", "C"],
|
| 2226 |
+
aggfunc={"D": "mean", "E": ["min", "max", "mean"]},
|
| 2227 |
+
)
|
| 2228 |
+
cols = MultiIndex.from_tuples(
|
| 2229 |
+
[("D", "mean"), ("E", "max"), ("E", "mean"), ("E", "min")]
|
| 2230 |
+
)
|
| 2231 |
+
index = MultiIndex.from_tuples(
|
| 2232 |
+
[("bar", "large"), ("bar", "small"), ("foo", "large"), ("foo", "small")],
|
| 2233 |
+
names=["A", "C"],
|
| 2234 |
+
)
|
| 2235 |
+
vals = np.array(
|
| 2236 |
+
[
|
| 2237 |
+
[5.5, 9.0, 7.5, 6.0],
|
| 2238 |
+
[5.5, 9.0, 8.5, 8.0],
|
| 2239 |
+
[2.0, 5.0, 4.5, 4.0],
|
| 2240 |
+
[2.33333333, 6.0, 4.33333333, 2.0],
|
| 2241 |
+
]
|
| 2242 |
+
)
|
| 2243 |
+
expected = DataFrame(vals, columns=cols, index=index)
|
| 2244 |
+
expected[("E", "min")] = expected[("E", "min")].astype(np.int64)
|
| 2245 |
+
expected[("E", "max")] = expected[("E", "max")].astype(np.int64)
|
| 2246 |
+
tm.assert_frame_equal(table, expected)
|
| 2247 |
+
|
| 2248 |
+
def test_pivot_table_sort_false(self):
|
| 2249 |
+
# GH#39143
|
| 2250 |
+
df = DataFrame(
|
| 2251 |
+
{
|
| 2252 |
+
"a": ["d1", "d4", "d3"],
|
| 2253 |
+
"col": ["a", "b", "c"],
|
| 2254 |
+
"num": [23, 21, 34],
|
| 2255 |
+
"year": ["2018", "2018", "2019"],
|
| 2256 |
+
}
|
| 2257 |
+
)
|
| 2258 |
+
result = df.pivot_table(
|
| 2259 |
+
index=["a", "col"], columns="year", values="num", aggfunc="sum", sort=False
|
| 2260 |
+
)
|
| 2261 |
+
expected = DataFrame(
|
| 2262 |
+
[[23, np.nan], [21, np.nan], [np.nan, 34]],
|
| 2263 |
+
columns=Index(["2018", "2019"], name="year"),
|
| 2264 |
+
index=MultiIndex.from_arrays(
|
| 2265 |
+
[["d1", "d4", "d3"], ["a", "b", "c"]], names=["a", "col"]
|
| 2266 |
+
),
|
| 2267 |
+
)
|
| 2268 |
+
tm.assert_frame_equal(result, expected)
|
| 2269 |
+
|
| 2270 |
+
def test_pivot_table_nullable_margins(self):
|
| 2271 |
+
# GH#48681
|
| 2272 |
+
df = DataFrame(
|
| 2273 |
+
{"a": "A", "b": [1, 2], "sales": Series([10, 11], dtype="Int64")}
|
| 2274 |
+
)
|
| 2275 |
+
|
| 2276 |
+
result = df.pivot_table(index="b", columns="a", margins=True, aggfunc="sum")
|
| 2277 |
+
expected = DataFrame(
|
| 2278 |
+
[[10, 10], [11, 11], [21, 21]],
|
| 2279 |
+
index=Index([1, 2, "All"], name="b"),
|
| 2280 |
+
columns=MultiIndex.from_tuples(
|
| 2281 |
+
[("sales", "A"), ("sales", "All")], names=[None, "a"]
|
| 2282 |
+
),
|
| 2283 |
+
dtype="Int64",
|
| 2284 |
+
)
|
| 2285 |
+
tm.assert_frame_equal(result, expected)
|
| 2286 |
+
|
| 2287 |
+
def test_pivot_table_sort_false_with_multiple_values(self):
|
| 2288 |
+
df = DataFrame(
|
| 2289 |
+
{
|
| 2290 |
+
"firstname": ["John", "Michael"],
|
| 2291 |
+
"lastname": ["Foo", "Bar"],
|
| 2292 |
+
"height": [173, 182],
|
| 2293 |
+
"age": [47, 33],
|
| 2294 |
+
}
|
| 2295 |
+
)
|
| 2296 |
+
result = df.pivot_table(
|
| 2297 |
+
index=["lastname", "firstname"], values=["height", "age"], sort=False
|
| 2298 |
+
)
|
| 2299 |
+
expected = DataFrame(
|
| 2300 |
+
[[173.0, 47.0], [182.0, 33.0]],
|
| 2301 |
+
columns=["height", "age"],
|
| 2302 |
+
index=MultiIndex.from_tuples(
|
| 2303 |
+
[("Foo", "John"), ("Bar", "Michael")],
|
| 2304 |
+
names=["lastname", "firstname"],
|
| 2305 |
+
),
|
| 2306 |
+
)
|
| 2307 |
+
tm.assert_frame_equal(result, expected)
|
| 2308 |
+
|
| 2309 |
+
def test_pivot_table_with_margins_and_numeric_columns(self):
|
| 2310 |
+
# GH 26568
|
| 2311 |
+
df = DataFrame([["a", "x", 1], ["a", "y", 2], ["b", "y", 3], ["b", "z", 4]])
|
| 2312 |
+
df.columns = [10, 20, 30]
|
| 2313 |
+
|
| 2314 |
+
result = df.pivot_table(
|
| 2315 |
+
index=10, columns=20, values=30, aggfunc="sum", fill_value=0, margins=True
|
| 2316 |
+
)
|
| 2317 |
+
|
| 2318 |
+
expected = DataFrame([[1, 2, 0, 3], [0, 3, 4, 7], [1, 5, 4, 10]])
|
| 2319 |
+
expected.columns = ["x", "y", "z", "All"]
|
| 2320 |
+
expected.index = ["a", "b", "All"]
|
| 2321 |
+
expected.columns.name = 20
|
| 2322 |
+
expected.index.name = 10
|
| 2323 |
+
|
| 2324 |
+
tm.assert_frame_equal(result, expected)
|
| 2325 |
+
|
| 2326 |
+
@pytest.mark.parametrize("dropna", [True, False])
|
| 2327 |
+
def test_pivot_ea_dtype_dropna(self, dropna):
|
| 2328 |
+
# GH#47477
|
| 2329 |
+
df = DataFrame({"x": "a", "y": "b", "age": Series([20, 40], dtype="Int64")})
|
| 2330 |
+
result = df.pivot_table(
|
| 2331 |
+
index="x", columns="y", values="age", aggfunc="mean", dropna=dropna
|
| 2332 |
+
)
|
| 2333 |
+
expected = DataFrame(
|
| 2334 |
+
[[30]],
|
| 2335 |
+
index=Index(["a"], name="x"),
|
| 2336 |
+
columns=Index(["b"], name="y"),
|
| 2337 |
+
dtype="Float64",
|
| 2338 |
+
)
|
| 2339 |
+
tm.assert_frame_equal(result, expected)
|
| 2340 |
+
|
| 2341 |
+
def test_pivot_table_datetime_warning(self):
|
| 2342 |
+
# GH#48683
|
| 2343 |
+
df = DataFrame(
|
| 2344 |
+
{
|
| 2345 |
+
"a": "A",
|
| 2346 |
+
"b": [1, 2],
|
| 2347 |
+
"date": pd.Timestamp("2019-12-31"),
|
| 2348 |
+
"sales": [10.0, 11],
|
| 2349 |
+
}
|
| 2350 |
+
)
|
| 2351 |
+
with tm.assert_produces_warning(None):
|
| 2352 |
+
result = df.pivot_table(
|
| 2353 |
+
index=["b", "date"], columns="a", margins=True, aggfunc="sum"
|
| 2354 |
+
)
|
| 2355 |
+
expected = DataFrame(
|
| 2356 |
+
[[10.0, 10.0], [11.0, 11.0], [21.0, 21.0]],
|
| 2357 |
+
index=MultiIndex.from_arrays(
|
| 2358 |
+
[
|
| 2359 |
+
Index([1, 2, "All"], name="b"),
|
| 2360 |
+
Index(
|
| 2361 |
+
[pd.Timestamp("2019-12-31"), pd.Timestamp("2019-12-31"), ""],
|
| 2362 |
+
dtype=object,
|
| 2363 |
+
name="date",
|
| 2364 |
+
),
|
| 2365 |
+
]
|
| 2366 |
+
),
|
| 2367 |
+
columns=MultiIndex.from_tuples(
|
| 2368 |
+
[("sales", "A"), ("sales", "All")], names=[None, "a"]
|
| 2369 |
+
),
|
| 2370 |
+
)
|
| 2371 |
+
tm.assert_frame_equal(result, expected)
|
| 2372 |
+
|
| 2373 |
+
def test_pivot_table_with_mixed_nested_tuples(self, using_array_manager):
|
| 2374 |
+
# GH 50342
|
| 2375 |
+
df = DataFrame(
|
| 2376 |
+
{
|
| 2377 |
+
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
|
| 2378 |
+
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
|
| 2379 |
+
"C": [
|
| 2380 |
+
"small",
|
| 2381 |
+
"large",
|
| 2382 |
+
"large",
|
| 2383 |
+
"small",
|
| 2384 |
+
"small",
|
| 2385 |
+
"large",
|
| 2386 |
+
"small",
|
| 2387 |
+
"small",
|
| 2388 |
+
"large",
|
| 2389 |
+
],
|
| 2390 |
+
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
|
| 2391 |
+
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
|
| 2392 |
+
("col5",): [
|
| 2393 |
+
"foo",
|
| 2394 |
+
"foo",
|
| 2395 |
+
"foo",
|
| 2396 |
+
"foo",
|
| 2397 |
+
"foo",
|
| 2398 |
+
"bar",
|
| 2399 |
+
"bar",
|
| 2400 |
+
"bar",
|
| 2401 |
+
"bar",
|
| 2402 |
+
],
|
| 2403 |
+
("col6", 6): [
|
| 2404 |
+
"one",
|
| 2405 |
+
"one",
|
| 2406 |
+
"one",
|
| 2407 |
+
"two",
|
| 2408 |
+
"two",
|
| 2409 |
+
"one",
|
| 2410 |
+
"one",
|
| 2411 |
+
"two",
|
| 2412 |
+
"two",
|
| 2413 |
+
],
|
| 2414 |
+
(7, "seven"): [
|
| 2415 |
+
"small",
|
| 2416 |
+
"large",
|
| 2417 |
+
"large",
|
| 2418 |
+
"small",
|
| 2419 |
+
"small",
|
| 2420 |
+
"large",
|
| 2421 |
+
"small",
|
| 2422 |
+
"small",
|
| 2423 |
+
"large",
|
| 2424 |
+
],
|
| 2425 |
+
}
|
| 2426 |
+
)
|
| 2427 |
+
result = pivot_table(
|
| 2428 |
+
df, values="D", index=["A", "B"], columns=[(7, "seven")], aggfunc="sum"
|
| 2429 |
+
)
|
| 2430 |
+
expected = DataFrame(
|
| 2431 |
+
[[4.0, 5.0], [7.0, 6.0], [4.0, 1.0], [np.nan, 6.0]],
|
| 2432 |
+
columns=Index(["large", "small"], name=(7, "seven")),
|
| 2433 |
+
index=MultiIndex.from_arrays(
|
| 2434 |
+
[["bar", "bar", "foo", "foo"], ["one", "two"] * 2], names=["A", "B"]
|
| 2435 |
+
),
|
| 2436 |
+
)
|
| 2437 |
+
if using_array_manager:
|
| 2438 |
+
# INFO(ArrayManager) column without NaNs can preserve int dtype
|
| 2439 |
+
expected["small"] = expected["small"].astype("int64")
|
| 2440 |
+
tm.assert_frame_equal(result, expected)
|
| 2441 |
+
|
| 2442 |
+
def test_pivot_table_aggfunc_nunique_with_different_values(self):
|
| 2443 |
+
test = DataFrame(
|
| 2444 |
+
{
|
| 2445 |
+
"a": range(10),
|
| 2446 |
+
"b": range(10),
|
| 2447 |
+
"c": range(10),
|
| 2448 |
+
"d": range(10),
|
| 2449 |
+
}
|
| 2450 |
+
)
|
| 2451 |
+
|
| 2452 |
+
columnval = MultiIndex.from_arrays(
|
| 2453 |
+
[
|
| 2454 |
+
["nunique" for i in range(10)],
|
| 2455 |
+
["c" for i in range(10)],
|
| 2456 |
+
range(10),
|
| 2457 |
+
],
|
| 2458 |
+
names=(None, None, "b"),
|
| 2459 |
+
)
|
| 2460 |
+
nparr = np.full((10, 10), np.nan)
|
| 2461 |
+
np.fill_diagonal(nparr, 1.0)
|
| 2462 |
+
|
| 2463 |
+
expected = DataFrame(nparr, index=Index(range(10), name="a"), columns=columnval)
|
| 2464 |
+
result = test.pivot_table(
|
| 2465 |
+
index=[
|
| 2466 |
+
"a",
|
| 2467 |
+
],
|
| 2468 |
+
columns=[
|
| 2469 |
+
"b",
|
| 2470 |
+
],
|
| 2471 |
+
values=[
|
| 2472 |
+
"c",
|
| 2473 |
+
],
|
| 2474 |
+
aggfunc=["nunique"],
|
| 2475 |
+
)
|
| 2476 |
+
|
| 2477 |
+
tm.assert_frame_equal(result, expected)
|
| 2478 |
+
|
| 2479 |
+
|
| 2480 |
+
class TestPivot:
|
| 2481 |
+
def test_pivot(self):
|
| 2482 |
+
data = {
|
| 2483 |
+
"index": ["A", "B", "C", "C", "B", "A"],
|
| 2484 |
+
"columns": ["One", "One", "One", "Two", "Two", "Two"],
|
| 2485 |
+
"values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0],
|
| 2486 |
+
}
|
| 2487 |
+
|
| 2488 |
+
frame = DataFrame(data)
|
| 2489 |
+
pivoted = frame.pivot(index="index", columns="columns", values="values")
|
| 2490 |
+
|
| 2491 |
+
expected = DataFrame(
|
| 2492 |
+
{
|
| 2493 |
+
"One": {"A": 1.0, "B": 2.0, "C": 3.0},
|
| 2494 |
+
"Two": {"A": 1.0, "B": 2.0, "C": 3.0},
|
| 2495 |
+
}
|
| 2496 |
+
)
|
| 2497 |
+
|
| 2498 |
+
expected.index.name, expected.columns.name = "index", "columns"
|
| 2499 |
+
tm.assert_frame_equal(pivoted, expected)
|
| 2500 |
+
|
| 2501 |
+
# name tracking
|
| 2502 |
+
assert pivoted.index.name == "index"
|
| 2503 |
+
assert pivoted.columns.name == "columns"
|
| 2504 |
+
|
| 2505 |
+
# don't specify values
|
| 2506 |
+
pivoted = frame.pivot(index="index", columns="columns")
|
| 2507 |
+
assert pivoted.index.name == "index"
|
| 2508 |
+
assert pivoted.columns.names == (None, "columns")
|
| 2509 |
+
|
| 2510 |
+
def test_pivot_duplicates(self):
|
| 2511 |
+
data = DataFrame(
|
| 2512 |
+
{
|
| 2513 |
+
"a": ["bar", "bar", "foo", "foo", "foo"],
|
| 2514 |
+
"b": ["one", "two", "one", "one", "two"],
|
| 2515 |
+
"c": [1.0, 2.0, 3.0, 3.0, 4.0],
|
| 2516 |
+
}
|
| 2517 |
+
)
|
| 2518 |
+
with pytest.raises(ValueError, match="duplicate entries"):
|
| 2519 |
+
data.pivot(index="a", columns="b", values="c")
|
| 2520 |
+
|
| 2521 |
+
def test_pivot_empty(self):
|
| 2522 |
+
df = DataFrame(columns=["a", "b", "c"])
|
| 2523 |
+
result = df.pivot(index="a", columns="b", values="c")
|
| 2524 |
+
expected = DataFrame(index=[], columns=[])
|
| 2525 |
+
tm.assert_frame_equal(result, expected, check_names=False)
|
| 2526 |
+
|
| 2527 |
+
@pytest.mark.parametrize("dtype", [object, "string"])
|
| 2528 |
+
def test_pivot_integer_bug(self, dtype):
|
| 2529 |
+
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")], dtype=dtype)
|
| 2530 |
+
|
| 2531 |
+
result = df.pivot(index=1, columns=0, values=2)
|
| 2532 |
+
tm.assert_index_equal(result.columns, Index(["A", "B"], name=0, dtype=dtype))
|
| 2533 |
+
|
| 2534 |
+
def test_pivot_index_none(self):
|
| 2535 |
+
# GH#3962
|
| 2536 |
+
data = {
|
| 2537 |
+
"index": ["A", "B", "C", "C", "B", "A"],
|
| 2538 |
+
"columns": ["One", "One", "One", "Two", "Two", "Two"],
|
| 2539 |
+
"values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0],
|
| 2540 |
+
}
|
| 2541 |
+
|
| 2542 |
+
frame = DataFrame(data).set_index("index")
|
| 2543 |
+
result = frame.pivot(columns="columns", values="values")
|
| 2544 |
+
expected = DataFrame(
|
| 2545 |
+
{
|
| 2546 |
+
"One": {"A": 1.0, "B": 2.0, "C": 3.0},
|
| 2547 |
+
"Two": {"A": 1.0, "B": 2.0, "C": 3.0},
|
| 2548 |
+
}
|
| 2549 |
+
)
|
| 2550 |
+
|
| 2551 |
+
expected.index.name, expected.columns.name = "index", "columns"
|
| 2552 |
+
tm.assert_frame_equal(result, expected)
|
| 2553 |
+
|
| 2554 |
+
# omit values
|
| 2555 |
+
result = frame.pivot(columns="columns")
|
| 2556 |
+
|
| 2557 |
+
expected.columns = MultiIndex.from_tuples(
|
| 2558 |
+
[("values", "One"), ("values", "Two")], names=[None, "columns"]
|
| 2559 |
+
)
|
| 2560 |
+
expected.index.name = "index"
|
| 2561 |
+
tm.assert_frame_equal(result, expected, check_names=False)
|
| 2562 |
+
assert result.index.name == "index"
|
| 2563 |
+
assert result.columns.names == (None, "columns")
|
| 2564 |
+
expected.columns = expected.columns.droplevel(0)
|
| 2565 |
+
result = frame.pivot(columns="columns", values="values")
|
| 2566 |
+
|
| 2567 |
+
expected.columns.name = "columns"
|
| 2568 |
+
tm.assert_frame_equal(result, expected)
|
| 2569 |
+
|
| 2570 |
+
def test_pivot_index_list_values_none_immutable_args(self):
|
| 2571 |
+
# GH37635
|
| 2572 |
+
df = DataFrame(
|
| 2573 |
+
{
|
| 2574 |
+
"lev1": [1, 1, 1, 2, 2, 2],
|
| 2575 |
+
"lev2": [1, 1, 2, 1, 1, 2],
|
| 2576 |
+
"lev3": [1, 2, 1, 2, 1, 2],
|
| 2577 |
+
"lev4": [1, 2, 3, 4, 5, 6],
|
| 2578 |
+
"values": [0, 1, 2, 3, 4, 5],
|
| 2579 |
+
}
|
| 2580 |
+
)
|
| 2581 |
+
index = ["lev1", "lev2"]
|
| 2582 |
+
columns = ["lev3"]
|
| 2583 |
+
result = df.pivot(index=index, columns=columns)
|
| 2584 |
+
|
| 2585 |
+
expected = DataFrame(
|
| 2586 |
+
np.array(
|
| 2587 |
+
[
|
| 2588 |
+
[1.0, 2.0, 0.0, 1.0],
|
| 2589 |
+
[3.0, np.nan, 2.0, np.nan],
|
| 2590 |
+
[5.0, 4.0, 4.0, 3.0],
|
| 2591 |
+
[np.nan, 6.0, np.nan, 5.0],
|
| 2592 |
+
]
|
| 2593 |
+
),
|
| 2594 |
+
index=MultiIndex.from_arrays(
|
| 2595 |
+
[(1, 1, 2, 2), (1, 2, 1, 2)], names=["lev1", "lev2"]
|
| 2596 |
+
),
|
| 2597 |
+
columns=MultiIndex.from_arrays(
|
| 2598 |
+
[("lev4", "lev4", "values", "values"), (1, 2, 1, 2)],
|
| 2599 |
+
names=[None, "lev3"],
|
| 2600 |
+
),
|
| 2601 |
+
)
|
| 2602 |
+
|
| 2603 |
+
tm.assert_frame_equal(result, expected)
|
| 2604 |
+
|
| 2605 |
+
assert index == ["lev1", "lev2"]
|
| 2606 |
+
assert columns == ["lev3"]
|
| 2607 |
+
|
| 2608 |
+
def test_pivot_columns_not_given(self):
|
| 2609 |
+
# GH#48293
|
| 2610 |
+
df = DataFrame({"a": [1], "b": 1})
|
| 2611 |
+
with pytest.raises(TypeError, match="missing 1 required keyword-only argument"):
|
| 2612 |
+
df.pivot() # pylint: disable=missing-kwoa
|
| 2613 |
+
|
| 2614 |
+
@pytest.mark.xfail(using_pyarrow_string_dtype(), reason="None is cast to NaN")
|
| 2615 |
+
def test_pivot_columns_is_none(self):
|
| 2616 |
+
# GH#48293
|
| 2617 |
+
df = DataFrame({None: [1], "b": 2, "c": 3})
|
| 2618 |
+
result = df.pivot(columns=None)
|
| 2619 |
+
expected = DataFrame({("b", 1): [2], ("c", 1): 3})
|
| 2620 |
+
tm.assert_frame_equal(result, expected)
|
| 2621 |
+
|
| 2622 |
+
result = df.pivot(columns=None, index="b")
|
| 2623 |
+
expected = DataFrame({("c", 1): 3}, index=Index([2], name="b"))
|
| 2624 |
+
tm.assert_frame_equal(result, expected)
|
| 2625 |
+
|
| 2626 |
+
result = df.pivot(columns=None, index="b", values="c")
|
| 2627 |
+
expected = DataFrame({1: 3}, index=Index([2], name="b"))
|
| 2628 |
+
tm.assert_frame_equal(result, expected)
|
| 2629 |
+
|
| 2630 |
+
@pytest.mark.xfail(using_pyarrow_string_dtype(), reason="None is cast to NaN")
|
| 2631 |
+
def test_pivot_index_is_none(self):
|
| 2632 |
+
# GH#48293
|
| 2633 |
+
df = DataFrame({None: [1], "b": 2, "c": 3})
|
| 2634 |
+
|
| 2635 |
+
result = df.pivot(columns="b", index=None)
|
| 2636 |
+
expected = DataFrame({("c", 2): 3}, index=[1])
|
| 2637 |
+
expected.columns.names = [None, "b"]
|
| 2638 |
+
tm.assert_frame_equal(result, expected)
|
| 2639 |
+
|
| 2640 |
+
result = df.pivot(columns="b", index=None, values="c")
|
| 2641 |
+
expected = DataFrame(3, index=[1], columns=Index([2], name="b"))
|
| 2642 |
+
tm.assert_frame_equal(result, expected)
|
| 2643 |
+
|
| 2644 |
+
@pytest.mark.xfail(using_pyarrow_string_dtype(), reason="None is cast to NaN")
|
| 2645 |
+
def test_pivot_values_is_none(self):
|
| 2646 |
+
# GH#48293
|
| 2647 |
+
df = DataFrame({None: [1], "b": 2, "c": 3})
|
| 2648 |
+
|
| 2649 |
+
result = df.pivot(columns="b", index="c", values=None)
|
| 2650 |
+
expected = DataFrame(
|
| 2651 |
+
1, index=Index([3], name="c"), columns=Index([2], name="b")
|
| 2652 |
+
)
|
| 2653 |
+
tm.assert_frame_equal(result, expected)
|
| 2654 |
+
|
| 2655 |
+
result = df.pivot(columns="b", values=None)
|
| 2656 |
+
expected = DataFrame(1, index=[0], columns=Index([2], name="b"))
|
| 2657 |
+
tm.assert_frame_equal(result, expected)
|
| 2658 |
+
|
| 2659 |
+
def test_pivot_not_changing_index_name(self):
|
| 2660 |
+
# GH#52692
|
| 2661 |
+
df = DataFrame({"one": ["a"], "two": 0, "three": 1})
|
| 2662 |
+
expected = df.copy(deep=True)
|
| 2663 |
+
df.pivot(index="one", columns="two", values="three")
|
| 2664 |
+
tm.assert_frame_equal(df, expected)
|
| 2665 |
+
|
| 2666 |
+
def test_pivot_table_empty_dataframe_correct_index(self):
|
| 2667 |
+
# GH 21932
|
| 2668 |
+
df = DataFrame([], columns=["a", "b", "value"])
|
| 2669 |
+
pivot = df.pivot_table(index="a", columns="b", values="value", aggfunc="count")
|
| 2670 |
+
|
| 2671 |
+
expected = Index([], dtype="object", name="b")
|
| 2672 |
+
tm.assert_index_equal(pivot.columns, expected)
|
| 2673 |
+
|
| 2674 |
+
def test_pivot_table_handles_explicit_datetime_types(self):
|
| 2675 |
+
# GH#43574
|
| 2676 |
+
df = DataFrame(
|
| 2677 |
+
[
|
| 2678 |
+
{"a": "x", "date_str": "2023-01-01", "amount": 1},
|
| 2679 |
+
{"a": "y", "date_str": "2023-01-02", "amount": 2},
|
| 2680 |
+
{"a": "z", "date_str": "2023-01-03", "amount": 3},
|
| 2681 |
+
]
|
| 2682 |
+
)
|
| 2683 |
+
df["date"] = pd.to_datetime(df["date_str"])
|
| 2684 |
+
|
| 2685 |
+
with tm.assert_produces_warning(False):
|
| 2686 |
+
pivot = df.pivot_table(
|
| 2687 |
+
index=["a", "date"], values=["amount"], aggfunc="sum", margins=True
|
| 2688 |
+
)
|
| 2689 |
+
|
| 2690 |
+
expected = MultiIndex.from_tuples(
|
| 2691 |
+
[
|
| 2692 |
+
("x", datetime.strptime("2023-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")),
|
| 2693 |
+
("y", datetime.strptime("2023-01-02 00:00:00", "%Y-%m-%d %H:%M:%S")),
|
| 2694 |
+
("z", datetime.strptime("2023-01-03 00:00:00", "%Y-%m-%d %H:%M:%S")),
|
| 2695 |
+
("All", ""),
|
| 2696 |
+
],
|
| 2697 |
+
names=["a", "date"],
|
| 2698 |
+
)
|
| 2699 |
+
tm.assert_index_equal(pivot.index, expected)
|
| 2700 |
+
|
| 2701 |
+
def test_pivot_table_with_margins_and_numeric_column_names(self):
|
| 2702 |
+
# GH#26568
|
| 2703 |
+
df = DataFrame([["a", "x", 1], ["a", "y", 2], ["b", "y", 3], ["b", "z", 4]])
|
| 2704 |
+
|
| 2705 |
+
result = df.pivot_table(
|
| 2706 |
+
index=0, columns=1, values=2, aggfunc="sum", fill_value=0, margins=True
|
| 2707 |
+
)
|
| 2708 |
+
|
| 2709 |
+
expected = DataFrame(
|
| 2710 |
+
[[1, 2, 0, 3], [0, 3, 4, 7], [1, 5, 4, 10]],
|
| 2711 |
+
columns=Index(["x", "y", "z", "All"], name=1),
|
| 2712 |
+
index=Index(["a", "b", "All"], name=0),
|
| 2713 |
+
)
|
| 2714 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_pivot_multilevel.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas._libs import lib
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from pandas import (
|
| 8 |
+
Index,
|
| 9 |
+
MultiIndex,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@pytest.mark.parametrize(
|
| 15 |
+
"input_index, input_columns, input_values, "
|
| 16 |
+
"expected_values, expected_columns, expected_index",
|
| 17 |
+
[
|
| 18 |
+
(
|
| 19 |
+
["lev4"],
|
| 20 |
+
"lev3",
|
| 21 |
+
"values",
|
| 22 |
+
[
|
| 23 |
+
[0.0, np.nan],
|
| 24 |
+
[np.nan, 1.0],
|
| 25 |
+
[2.0, np.nan],
|
| 26 |
+
[np.nan, 3.0],
|
| 27 |
+
[4.0, np.nan],
|
| 28 |
+
[np.nan, 5.0],
|
| 29 |
+
[6.0, np.nan],
|
| 30 |
+
[np.nan, 7.0],
|
| 31 |
+
],
|
| 32 |
+
Index([1, 2], name="lev3"),
|
| 33 |
+
Index([1, 2, 3, 4, 5, 6, 7, 8], name="lev4"),
|
| 34 |
+
),
|
| 35 |
+
(
|
| 36 |
+
["lev4"],
|
| 37 |
+
"lev3",
|
| 38 |
+
lib.no_default,
|
| 39 |
+
[
|
| 40 |
+
[1.0, np.nan, 1.0, np.nan, 0.0, np.nan],
|
| 41 |
+
[np.nan, 1.0, np.nan, 1.0, np.nan, 1.0],
|
| 42 |
+
[1.0, np.nan, 2.0, np.nan, 2.0, np.nan],
|
| 43 |
+
[np.nan, 1.0, np.nan, 2.0, np.nan, 3.0],
|
| 44 |
+
[2.0, np.nan, 1.0, np.nan, 4.0, np.nan],
|
| 45 |
+
[np.nan, 2.0, np.nan, 1.0, np.nan, 5.0],
|
| 46 |
+
[2.0, np.nan, 2.0, np.nan, 6.0, np.nan],
|
| 47 |
+
[np.nan, 2.0, np.nan, 2.0, np.nan, 7.0],
|
| 48 |
+
],
|
| 49 |
+
MultiIndex.from_tuples(
|
| 50 |
+
[
|
| 51 |
+
("lev1", 1),
|
| 52 |
+
("lev1", 2),
|
| 53 |
+
("lev2", 1),
|
| 54 |
+
("lev2", 2),
|
| 55 |
+
("values", 1),
|
| 56 |
+
("values", 2),
|
| 57 |
+
],
|
| 58 |
+
names=[None, "lev3"],
|
| 59 |
+
),
|
| 60 |
+
Index([1, 2, 3, 4, 5, 6, 7, 8], name="lev4"),
|
| 61 |
+
),
|
| 62 |
+
(
|
| 63 |
+
["lev1", "lev2"],
|
| 64 |
+
"lev3",
|
| 65 |
+
"values",
|
| 66 |
+
[[0, 1], [2, 3], [4, 5], [6, 7]],
|
| 67 |
+
Index([1, 2], name="lev3"),
|
| 68 |
+
MultiIndex.from_tuples(
|
| 69 |
+
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev1", "lev2"]
|
| 70 |
+
),
|
| 71 |
+
),
|
| 72 |
+
(
|
| 73 |
+
["lev1", "lev2"],
|
| 74 |
+
"lev3",
|
| 75 |
+
lib.no_default,
|
| 76 |
+
[[1, 2, 0, 1], [3, 4, 2, 3], [5, 6, 4, 5], [7, 8, 6, 7]],
|
| 77 |
+
MultiIndex.from_tuples(
|
| 78 |
+
[("lev4", 1), ("lev4", 2), ("values", 1), ("values", 2)],
|
| 79 |
+
names=[None, "lev3"],
|
| 80 |
+
),
|
| 81 |
+
MultiIndex.from_tuples(
|
| 82 |
+
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev1", "lev2"]
|
| 83 |
+
),
|
| 84 |
+
),
|
| 85 |
+
],
|
| 86 |
+
)
|
| 87 |
+
def test_pivot_list_like_index(
|
| 88 |
+
input_index,
|
| 89 |
+
input_columns,
|
| 90 |
+
input_values,
|
| 91 |
+
expected_values,
|
| 92 |
+
expected_columns,
|
| 93 |
+
expected_index,
|
| 94 |
+
):
|
| 95 |
+
# GH 21425, test when index is given a list
|
| 96 |
+
df = pd.DataFrame(
|
| 97 |
+
{
|
| 98 |
+
"lev1": [1, 1, 1, 1, 2, 2, 2, 2],
|
| 99 |
+
"lev2": [1, 1, 2, 2, 1, 1, 2, 2],
|
| 100 |
+
"lev3": [1, 2, 1, 2, 1, 2, 1, 2],
|
| 101 |
+
"lev4": [1, 2, 3, 4, 5, 6, 7, 8],
|
| 102 |
+
"values": [0, 1, 2, 3, 4, 5, 6, 7],
|
| 103 |
+
}
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
result = df.pivot(index=input_index, columns=input_columns, values=input_values)
|
| 107 |
+
expected = pd.DataFrame(
|
| 108 |
+
expected_values, columns=expected_columns, index=expected_index
|
| 109 |
+
)
|
| 110 |
+
tm.assert_frame_equal(result, expected)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
@pytest.mark.parametrize(
|
| 114 |
+
"input_index, input_columns, input_values, "
|
| 115 |
+
"expected_values, expected_columns, expected_index",
|
| 116 |
+
[
|
| 117 |
+
(
|
| 118 |
+
"lev4",
|
| 119 |
+
["lev3"],
|
| 120 |
+
"values",
|
| 121 |
+
[
|
| 122 |
+
[0.0, np.nan],
|
| 123 |
+
[np.nan, 1.0],
|
| 124 |
+
[2.0, np.nan],
|
| 125 |
+
[np.nan, 3.0],
|
| 126 |
+
[4.0, np.nan],
|
| 127 |
+
[np.nan, 5.0],
|
| 128 |
+
[6.0, np.nan],
|
| 129 |
+
[np.nan, 7.0],
|
| 130 |
+
],
|
| 131 |
+
Index([1, 2], name="lev3"),
|
| 132 |
+
Index([1, 2, 3, 4, 5, 6, 7, 8], name="lev4"),
|
| 133 |
+
),
|
| 134 |
+
(
|
| 135 |
+
["lev1", "lev2"],
|
| 136 |
+
["lev3"],
|
| 137 |
+
"values",
|
| 138 |
+
[[0, 1], [2, 3], [4, 5], [6, 7]],
|
| 139 |
+
Index([1, 2], name="lev3"),
|
| 140 |
+
MultiIndex.from_tuples(
|
| 141 |
+
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev1", "lev2"]
|
| 142 |
+
),
|
| 143 |
+
),
|
| 144 |
+
(
|
| 145 |
+
["lev1"],
|
| 146 |
+
["lev2", "lev3"],
|
| 147 |
+
"values",
|
| 148 |
+
[[0, 1, 2, 3], [4, 5, 6, 7]],
|
| 149 |
+
MultiIndex.from_tuples(
|
| 150 |
+
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev2", "lev3"]
|
| 151 |
+
),
|
| 152 |
+
Index([1, 2], name="lev1"),
|
| 153 |
+
),
|
| 154 |
+
(
|
| 155 |
+
["lev1", "lev2"],
|
| 156 |
+
["lev3", "lev4"],
|
| 157 |
+
"values",
|
| 158 |
+
[
|
| 159 |
+
[0.0, 1.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
|
| 160 |
+
[np.nan, np.nan, 2.0, 3.0, np.nan, np.nan, np.nan, np.nan],
|
| 161 |
+
[np.nan, np.nan, np.nan, np.nan, 4.0, 5.0, np.nan, np.nan],
|
| 162 |
+
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 6.0, 7.0],
|
| 163 |
+
],
|
| 164 |
+
MultiIndex.from_tuples(
|
| 165 |
+
[(1, 1), (2, 2), (1, 3), (2, 4), (1, 5), (2, 6), (1, 7), (2, 8)],
|
| 166 |
+
names=["lev3", "lev4"],
|
| 167 |
+
),
|
| 168 |
+
MultiIndex.from_tuples(
|
| 169 |
+
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev1", "lev2"]
|
| 170 |
+
),
|
| 171 |
+
),
|
| 172 |
+
],
|
| 173 |
+
)
|
| 174 |
+
def test_pivot_list_like_columns(
|
| 175 |
+
input_index,
|
| 176 |
+
input_columns,
|
| 177 |
+
input_values,
|
| 178 |
+
expected_values,
|
| 179 |
+
expected_columns,
|
| 180 |
+
expected_index,
|
| 181 |
+
):
|
| 182 |
+
# GH 21425, test when columns is given a list
|
| 183 |
+
df = pd.DataFrame(
|
| 184 |
+
{
|
| 185 |
+
"lev1": [1, 1, 1, 1, 2, 2, 2, 2],
|
| 186 |
+
"lev2": [1, 1, 2, 2, 1, 1, 2, 2],
|
| 187 |
+
"lev3": [1, 2, 1, 2, 1, 2, 1, 2],
|
| 188 |
+
"lev4": [1, 2, 3, 4, 5, 6, 7, 8],
|
| 189 |
+
"values": [0, 1, 2, 3, 4, 5, 6, 7],
|
| 190 |
+
}
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
result = df.pivot(index=input_index, columns=input_columns, values=input_values)
|
| 194 |
+
expected = pd.DataFrame(
|
| 195 |
+
expected_values, columns=expected_columns, index=expected_index
|
| 196 |
+
)
|
| 197 |
+
tm.assert_frame_equal(result, expected)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def test_pivot_multiindexed_rows_and_cols(using_array_manager):
|
| 201 |
+
# GH 36360
|
| 202 |
+
|
| 203 |
+
df = pd.DataFrame(
|
| 204 |
+
data=np.arange(12).reshape(4, 3),
|
| 205 |
+
columns=MultiIndex.from_tuples(
|
| 206 |
+
[(0, 0), (0, 1), (0, 2)], names=["col_L0", "col_L1"]
|
| 207 |
+
),
|
| 208 |
+
index=MultiIndex.from_tuples(
|
| 209 |
+
[(0, 0, 0), (0, 0, 1), (1, 1, 1), (1, 0, 0)],
|
| 210 |
+
names=["idx_L0", "idx_L1", "idx_L2"],
|
| 211 |
+
),
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
res = df.pivot_table(
|
| 215 |
+
index=["idx_L0"],
|
| 216 |
+
columns=["idx_L1"],
|
| 217 |
+
values=[(0, 1)],
|
| 218 |
+
aggfunc=lambda col: col.values.sum(),
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
expected = pd.DataFrame(
|
| 222 |
+
data=[[5, np.nan], [10, 7.0]],
|
| 223 |
+
columns=MultiIndex.from_tuples(
|
| 224 |
+
[(0, 1, 0), (0, 1, 1)], names=["col_L0", "col_L1", "idx_L1"]
|
| 225 |
+
),
|
| 226 |
+
index=Index([0, 1], dtype="int64", name="idx_L0"),
|
| 227 |
+
)
|
| 228 |
+
if not using_array_manager:
|
| 229 |
+
# BlockManager does not preserve the dtypes
|
| 230 |
+
expected = expected.astype("float64")
|
| 231 |
+
|
| 232 |
+
tm.assert_frame_equal(res, expected)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def test_pivot_df_multiindex_index_none():
|
| 236 |
+
# GH 23955
|
| 237 |
+
df = pd.DataFrame(
|
| 238 |
+
[
|
| 239 |
+
["A", "A1", "label1", 1],
|
| 240 |
+
["A", "A2", "label2", 2],
|
| 241 |
+
["B", "A1", "label1", 3],
|
| 242 |
+
["B", "A2", "label2", 4],
|
| 243 |
+
],
|
| 244 |
+
columns=["index_1", "index_2", "label", "value"],
|
| 245 |
+
)
|
| 246 |
+
df = df.set_index(["index_1", "index_2"])
|
| 247 |
+
|
| 248 |
+
result = df.pivot(columns="label", values="value")
|
| 249 |
+
expected = pd.DataFrame(
|
| 250 |
+
[[1.0, np.nan], [np.nan, 2.0], [3.0, np.nan], [np.nan, 4.0]],
|
| 251 |
+
index=df.index,
|
| 252 |
+
columns=Index(["label1", "label2"], name="label"),
|
| 253 |
+
)
|
| 254 |
+
tm.assert_frame_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/reshape/test_util.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
Index,
|
| 6 |
+
date_range,
|
| 7 |
+
)
|
| 8 |
+
import pandas._testing as tm
|
| 9 |
+
from pandas.core.reshape.util import cartesian_product
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TestCartesianProduct:
|
| 13 |
+
def test_simple(self):
|
| 14 |
+
x, y = list("ABC"), [1, 22]
|
| 15 |
+
result1, result2 = cartesian_product([x, y])
|
| 16 |
+
expected1 = np.array(["A", "A", "B", "B", "C", "C"])
|
| 17 |
+
expected2 = np.array([1, 22, 1, 22, 1, 22])
|
| 18 |
+
tm.assert_numpy_array_equal(result1, expected1)
|
| 19 |
+
tm.assert_numpy_array_equal(result2, expected2)
|
| 20 |
+
|
| 21 |
+
def test_datetimeindex(self):
|
| 22 |
+
# regression test for GitHub issue #6439
|
| 23 |
+
# make sure that the ordering on datetimeindex is consistent
|
| 24 |
+
x = date_range("2000-01-01", periods=2)
|
| 25 |
+
result1, result2 = (Index(y).day for y in cartesian_product([x, x]))
|
| 26 |
+
expected1 = Index([1, 1, 2, 2], dtype=np.int32)
|
| 27 |
+
expected2 = Index([1, 2, 1, 2], dtype=np.int32)
|
| 28 |
+
tm.assert_index_equal(result1, expected1)
|
| 29 |
+
tm.assert_index_equal(result2, expected2)
|
| 30 |
+
|
| 31 |
+
def test_tzaware_retained(self):
|
| 32 |
+
x = date_range("2000-01-01", periods=2, tz="US/Pacific")
|
| 33 |
+
y = np.array([3, 4])
|
| 34 |
+
result1, result2 = cartesian_product([x, y])
|
| 35 |
+
|
| 36 |
+
expected = x.repeat(2)
|
| 37 |
+
tm.assert_index_equal(result1, expected)
|
| 38 |
+
|
| 39 |
+
def test_tzaware_retained_categorical(self):
|
| 40 |
+
x = date_range("2000-01-01", periods=2, tz="US/Pacific").astype("category")
|
| 41 |
+
y = np.array([3, 4])
|
| 42 |
+
result1, result2 = cartesian_product([x, y])
|
| 43 |
+
|
| 44 |
+
expected = x.repeat(2)
|
| 45 |
+
tm.assert_index_equal(result1, expected)
|
| 46 |
+
|
| 47 |
+
@pytest.mark.parametrize("x, y", [[[], []], [[0, 1], []], [[], ["a", "b", "c"]]])
|
| 48 |
+
def test_empty(self, x, y):
|
| 49 |
+
# product of empty factors
|
| 50 |
+
expected1 = np.array([], dtype=np.asarray(x).dtype)
|
| 51 |
+
expected2 = np.array([], dtype=np.asarray(y).dtype)
|
| 52 |
+
result1, result2 = cartesian_product([x, y])
|
| 53 |
+
tm.assert_numpy_array_equal(result1, expected1)
|
| 54 |
+
tm.assert_numpy_array_equal(result2, expected2)
|
| 55 |
+
|
| 56 |
+
def test_empty_input(self):
|
| 57 |
+
# empty product (empty input):
|
| 58 |
+
result = cartesian_product([])
|
| 59 |
+
expected = []
|
| 60 |
+
assert result == expected
|
| 61 |
+
|
| 62 |
+
@pytest.mark.parametrize(
|
| 63 |
+
"X", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]]
|
| 64 |
+
)
|
| 65 |
+
def test_invalid_input(self, X):
|
| 66 |
+
msg = "Input must be a list-like of list-likes"
|
| 67 |
+
|
| 68 |
+
with pytest.raises(TypeError, match=msg):
|
| 69 |
+
cartesian_product(X=X)
|
| 70 |
+
|
| 71 |
+
def test_exceed_product_space(self):
|
| 72 |
+
# GH31355: raise useful error when produce space is too large
|
| 73 |
+
msg = "Product space too large to allocate arrays!"
|
| 74 |
+
|
| 75 |
+
with pytest.raises(ValueError, match=msg):
|
| 76 |
+
dims = [np.arange(0, 22, dtype=np.int16) for i in range(12)] + [
|
| 77 |
+
(np.arange(15128, dtype=np.int16)),
|
| 78 |
+
]
|
| 79 |
+
cartesian_product(X=dims)
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
|
| 5 |
+
object_pyarrow_numpy = ("object", "string[pyarrow_numpy]")
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _convert_na_value(ser, expected):
|
| 9 |
+
if ser.dtype != object:
|
| 10 |
+
if ser.dtype.storage == "pyarrow_numpy":
|
| 11 |
+
expected = expected.fillna(np.nan)
|
| 12 |
+
else:
|
| 13 |
+
# GH#18463
|
| 14 |
+
expected = expected.fillna(pd.NA)
|
| 15 |
+
return expected
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (524 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/conftest.cpython-310.pyc
ADDED
|
Binary file (3.01 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_api.cpython-310.pyc
ADDED
|
Binary file (4.64 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_case_justify.cpython-310.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_cat.cpython-310.pyc
ADDED
|
Binary file (9.56 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_extract.cpython-310.pyc
ADDED
|
Binary file (15.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_find_replace.cpython-310.pyc
ADDED
|
Binary file (23.4 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_get_dummies.cpython-310.pyc
ADDED
|
Binary file (1.74 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_split_partition.cpython-310.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_string_array.cpython-310.pyc
ADDED
|
Binary file (2.77 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_strings.cpython-310.pyc
ADDED
|
Binary file (21.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/conftest.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from pandas import Series
|
| 4 |
+
from pandas.core.strings.accessor import StringMethods
|
| 5 |
+
|
| 6 |
+
_any_string_method = [
|
| 7 |
+
("cat", (), {"sep": ","}),
|
| 8 |
+
("cat", (Series(list("zyx")),), {"sep": ",", "join": "left"}),
|
| 9 |
+
("center", (10,), {}),
|
| 10 |
+
("contains", ("a",), {}),
|
| 11 |
+
("count", ("a",), {}),
|
| 12 |
+
("decode", ("UTF-8",), {}),
|
| 13 |
+
("encode", ("UTF-8",), {}),
|
| 14 |
+
("endswith", ("a",), {}),
|
| 15 |
+
("endswith", ((),), {}),
|
| 16 |
+
("endswith", (("a",),), {}),
|
| 17 |
+
("endswith", (("a", "b"),), {}),
|
| 18 |
+
("endswith", (("a", "MISSING"),), {}),
|
| 19 |
+
("endswith", ("a",), {"na": True}),
|
| 20 |
+
("endswith", ("a",), {"na": False}),
|
| 21 |
+
("extract", ("([a-z]*)",), {"expand": False}),
|
| 22 |
+
("extract", ("([a-z]*)",), {"expand": True}),
|
| 23 |
+
("extractall", ("([a-z]*)",), {}),
|
| 24 |
+
("find", ("a",), {}),
|
| 25 |
+
("findall", ("a",), {}),
|
| 26 |
+
("get", (0,), {}),
|
| 27 |
+
# because "index" (and "rindex") fail intentionally
|
| 28 |
+
# if the string is not found, search only for empty string
|
| 29 |
+
("index", ("",), {}),
|
| 30 |
+
("join", (",",), {}),
|
| 31 |
+
("ljust", (10,), {}),
|
| 32 |
+
("match", ("a",), {}),
|
| 33 |
+
("fullmatch", ("a",), {}),
|
| 34 |
+
("normalize", ("NFC",), {}),
|
| 35 |
+
("pad", (10,), {}),
|
| 36 |
+
("partition", (" ",), {"expand": False}),
|
| 37 |
+
("partition", (" ",), {"expand": True}),
|
| 38 |
+
("repeat", (3,), {}),
|
| 39 |
+
("replace", ("a", "z"), {}),
|
| 40 |
+
("rfind", ("a",), {}),
|
| 41 |
+
("rindex", ("",), {}),
|
| 42 |
+
("rjust", (10,), {}),
|
| 43 |
+
("rpartition", (" ",), {"expand": False}),
|
| 44 |
+
("rpartition", (" ",), {"expand": True}),
|
| 45 |
+
("slice", (0, 1), {}),
|
| 46 |
+
("slice_replace", (0, 1, "z"), {}),
|
| 47 |
+
("split", (" ",), {"expand": False}),
|
| 48 |
+
("split", (" ",), {"expand": True}),
|
| 49 |
+
("startswith", ("a",), {}),
|
| 50 |
+
("startswith", (("a",),), {}),
|
| 51 |
+
("startswith", (("a", "b"),), {}),
|
| 52 |
+
("startswith", (("a", "MISSING"),), {}),
|
| 53 |
+
("startswith", ((),), {}),
|
| 54 |
+
("startswith", ("a",), {"na": True}),
|
| 55 |
+
("startswith", ("a",), {"na": False}),
|
| 56 |
+
("removeprefix", ("a",), {}),
|
| 57 |
+
("removesuffix", ("a",), {}),
|
| 58 |
+
# translating unicode points of "a" to "d"
|
| 59 |
+
("translate", ({97: 100},), {}),
|
| 60 |
+
("wrap", (2,), {}),
|
| 61 |
+
("zfill", (10,), {}),
|
| 62 |
+
] + list(
|
| 63 |
+
zip(
|
| 64 |
+
[
|
| 65 |
+
# methods without positional arguments: zip with empty tuple and empty dict
|
| 66 |
+
"capitalize",
|
| 67 |
+
"cat",
|
| 68 |
+
"get_dummies",
|
| 69 |
+
"isalnum",
|
| 70 |
+
"isalpha",
|
| 71 |
+
"isdecimal",
|
| 72 |
+
"isdigit",
|
| 73 |
+
"islower",
|
| 74 |
+
"isnumeric",
|
| 75 |
+
"isspace",
|
| 76 |
+
"istitle",
|
| 77 |
+
"isupper",
|
| 78 |
+
"len",
|
| 79 |
+
"lower",
|
| 80 |
+
"lstrip",
|
| 81 |
+
"partition",
|
| 82 |
+
"rpartition",
|
| 83 |
+
"rsplit",
|
| 84 |
+
"rstrip",
|
| 85 |
+
"slice",
|
| 86 |
+
"slice_replace",
|
| 87 |
+
"split",
|
| 88 |
+
"strip",
|
| 89 |
+
"swapcase",
|
| 90 |
+
"title",
|
| 91 |
+
"upper",
|
| 92 |
+
"casefold",
|
| 93 |
+
],
|
| 94 |
+
[()] * 100,
|
| 95 |
+
[{}] * 100,
|
| 96 |
+
)
|
| 97 |
+
)
|
| 98 |
+
ids, _, _ = zip(*_any_string_method) # use method name as fixture-id
|
| 99 |
+
missing_methods = {f for f in dir(StringMethods) if not f.startswith("_")} - set(ids)
|
| 100 |
+
|
| 101 |
+
# test that the above list captures all methods of StringMethods
|
| 102 |
+
assert not missing_methods
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@pytest.fixture(params=_any_string_method, ids=ids)
|
| 106 |
+
def any_string_method(request):
|
| 107 |
+
"""
|
| 108 |
+
Fixture for all public methods of `StringMethods`
|
| 109 |
+
|
| 110 |
+
This fixture returns a tuple of the method name and sample arguments
|
| 111 |
+
necessary to call the method.
|
| 112 |
+
|
| 113 |
+
Returns
|
| 114 |
+
-------
|
| 115 |
+
method_name : str
|
| 116 |
+
The name of the method in `StringMethods`
|
| 117 |
+
args : tuple
|
| 118 |
+
Sample values for the positional arguments
|
| 119 |
+
kwargs : dict
|
| 120 |
+
Sample values for the keyword arguments
|
| 121 |
+
|
| 122 |
+
Examples
|
| 123 |
+
--------
|
| 124 |
+
>>> def test_something(any_string_method):
|
| 125 |
+
... s = Series(['a', 'b', np.nan, 'd'])
|
| 126 |
+
...
|
| 127 |
+
... method_name, args, kwargs = any_string_method
|
| 128 |
+
... method = getattr(s.str, method_name)
|
| 129 |
+
... # will not raise
|
| 130 |
+
... method(*args, **kwargs)
|
| 131 |
+
"""
|
| 132 |
+
return request.param
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/test_api.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
CategoricalDtype,
|
| 6 |
+
DataFrame,
|
| 7 |
+
Index,
|
| 8 |
+
MultiIndex,
|
| 9 |
+
Series,
|
| 10 |
+
_testing as tm,
|
| 11 |
+
option_context,
|
| 12 |
+
)
|
| 13 |
+
from pandas.core.strings.accessor import StringMethods
|
| 14 |
+
|
| 15 |
+
# subset of the full set from pandas/conftest.py
|
| 16 |
+
_any_allowed_skipna_inferred_dtype = [
|
| 17 |
+
("string", ["a", np.nan, "c"]),
|
| 18 |
+
("bytes", [b"a", np.nan, b"c"]),
|
| 19 |
+
("empty", [np.nan, np.nan, np.nan]),
|
| 20 |
+
("empty", []),
|
| 21 |
+
("mixed-integer", ["a", np.nan, 2]),
|
| 22 |
+
]
|
| 23 |
+
ids, _ = zip(*_any_allowed_skipna_inferred_dtype) # use inferred type as id
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@pytest.fixture(params=_any_allowed_skipna_inferred_dtype, ids=ids)
|
| 27 |
+
def any_allowed_skipna_inferred_dtype(request):
|
| 28 |
+
"""
|
| 29 |
+
Fixture for all (inferred) dtypes allowed in StringMethods.__init__
|
| 30 |
+
|
| 31 |
+
The covered (inferred) types are:
|
| 32 |
+
* 'string'
|
| 33 |
+
* 'empty'
|
| 34 |
+
* 'bytes'
|
| 35 |
+
* 'mixed'
|
| 36 |
+
* 'mixed-integer'
|
| 37 |
+
|
| 38 |
+
Returns
|
| 39 |
+
-------
|
| 40 |
+
inferred_dtype : str
|
| 41 |
+
The string for the inferred dtype from _libs.lib.infer_dtype
|
| 42 |
+
values : np.ndarray
|
| 43 |
+
An array of object dtype that will be inferred to have
|
| 44 |
+
`inferred_dtype`
|
| 45 |
+
|
| 46 |
+
Examples
|
| 47 |
+
--------
|
| 48 |
+
>>> from pandas._libs import lib
|
| 49 |
+
>>>
|
| 50 |
+
>>> def test_something(any_allowed_skipna_inferred_dtype):
|
| 51 |
+
... inferred_dtype, values = any_allowed_skipna_inferred_dtype
|
| 52 |
+
... # will pass
|
| 53 |
+
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
|
| 54 |
+
...
|
| 55 |
+
... # constructor for .str-accessor will also pass
|
| 56 |
+
... Series(values).str
|
| 57 |
+
"""
|
| 58 |
+
inferred_dtype, values = request.param
|
| 59 |
+
values = np.array(values, dtype=object) # object dtype to avoid casting
|
| 60 |
+
|
| 61 |
+
# correctness of inference tested in tests/dtypes/test_inference.py
|
| 62 |
+
return inferred_dtype, values
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def test_api(any_string_dtype):
|
| 66 |
+
# GH 6106, GH 9322
|
| 67 |
+
assert Series.str is StringMethods
|
| 68 |
+
assert isinstance(Series([""], dtype=any_string_dtype).str, StringMethods)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def test_api_mi_raises():
|
| 72 |
+
# GH 23679
|
| 73 |
+
mi = MultiIndex.from_arrays([["a", "b", "c"]])
|
| 74 |
+
msg = "Can only use .str accessor with Index, not MultiIndex"
|
| 75 |
+
with pytest.raises(AttributeError, match=msg):
|
| 76 |
+
mi.str
|
| 77 |
+
assert not hasattr(mi, "str")
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@pytest.mark.parametrize("dtype", [object, "category"])
|
| 81 |
+
def test_api_per_dtype(index_or_series, dtype, any_skipna_inferred_dtype):
|
| 82 |
+
# one instance of parametrized fixture
|
| 83 |
+
box = index_or_series
|
| 84 |
+
inferred_dtype, values = any_skipna_inferred_dtype
|
| 85 |
+
|
| 86 |
+
t = box(values, dtype=dtype) # explicit dtype to avoid casting
|
| 87 |
+
|
| 88 |
+
types_passing_constructor = [
|
| 89 |
+
"string",
|
| 90 |
+
"unicode",
|
| 91 |
+
"empty",
|
| 92 |
+
"bytes",
|
| 93 |
+
"mixed",
|
| 94 |
+
"mixed-integer",
|
| 95 |
+
]
|
| 96 |
+
if inferred_dtype in types_passing_constructor:
|
| 97 |
+
# GH 6106
|
| 98 |
+
assert isinstance(t.str, StringMethods)
|
| 99 |
+
else:
|
| 100 |
+
# GH 9184, GH 23011, GH 23163
|
| 101 |
+
msg = "Can only use .str accessor with string values.*"
|
| 102 |
+
with pytest.raises(AttributeError, match=msg):
|
| 103 |
+
t.str
|
| 104 |
+
assert not hasattr(t, "str")
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@pytest.mark.parametrize("dtype", [object, "category"])
|
| 108 |
+
def test_api_per_method(
|
| 109 |
+
index_or_series,
|
| 110 |
+
dtype,
|
| 111 |
+
any_allowed_skipna_inferred_dtype,
|
| 112 |
+
any_string_method,
|
| 113 |
+
request,
|
| 114 |
+
):
|
| 115 |
+
# this test does not check correctness of the different methods,
|
| 116 |
+
# just that the methods work on the specified (inferred) dtypes,
|
| 117 |
+
# and raise on all others
|
| 118 |
+
box = index_or_series
|
| 119 |
+
|
| 120 |
+
# one instance of each parametrized fixture
|
| 121 |
+
inferred_dtype, values = any_allowed_skipna_inferred_dtype
|
| 122 |
+
method_name, args, kwargs = any_string_method
|
| 123 |
+
|
| 124 |
+
reason = None
|
| 125 |
+
if box is Index and values.size == 0:
|
| 126 |
+
if method_name in ["partition", "rpartition"] and kwargs.get("expand", True):
|
| 127 |
+
raises = TypeError
|
| 128 |
+
reason = "Method cannot deal with empty Index"
|
| 129 |
+
elif method_name == "split" and kwargs.get("expand", None):
|
| 130 |
+
raises = TypeError
|
| 131 |
+
reason = "Split fails on empty Series when expand=True"
|
| 132 |
+
elif method_name == "get_dummies":
|
| 133 |
+
raises = ValueError
|
| 134 |
+
reason = "Need to fortify get_dummies corner cases"
|
| 135 |
+
|
| 136 |
+
elif (
|
| 137 |
+
box is Index
|
| 138 |
+
and inferred_dtype == "empty"
|
| 139 |
+
and dtype == object
|
| 140 |
+
and method_name == "get_dummies"
|
| 141 |
+
):
|
| 142 |
+
raises = ValueError
|
| 143 |
+
reason = "Need to fortify get_dummies corner cases"
|
| 144 |
+
|
| 145 |
+
if reason is not None:
|
| 146 |
+
mark = pytest.mark.xfail(raises=raises, reason=reason)
|
| 147 |
+
request.applymarker(mark)
|
| 148 |
+
|
| 149 |
+
t = box(values, dtype=dtype) # explicit dtype to avoid casting
|
| 150 |
+
method = getattr(t.str, method_name)
|
| 151 |
+
|
| 152 |
+
bytes_allowed = method_name in ["decode", "get", "len", "slice"]
|
| 153 |
+
# as of v0.23.4, all methods except 'cat' are very lenient with the
|
| 154 |
+
# allowed data types, just returning NaN for entries that error.
|
| 155 |
+
# This could be changed with an 'errors'-kwarg to the `str`-accessor,
|
| 156 |
+
# see discussion in GH 13877
|
| 157 |
+
mixed_allowed = method_name not in ["cat"]
|
| 158 |
+
|
| 159 |
+
allowed_types = (
|
| 160 |
+
["string", "unicode", "empty"]
|
| 161 |
+
+ ["bytes"] * bytes_allowed
|
| 162 |
+
+ ["mixed", "mixed-integer"] * mixed_allowed
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
if inferred_dtype in allowed_types:
|
| 166 |
+
# xref GH 23555, GH 23556
|
| 167 |
+
with option_context("future.no_silent_downcasting", True):
|
| 168 |
+
method(*args, **kwargs) # works!
|
| 169 |
+
else:
|
| 170 |
+
# GH 23011, GH 23163
|
| 171 |
+
msg = (
|
| 172 |
+
f"Cannot use .str.{method_name} with values of "
|
| 173 |
+
f"inferred dtype {repr(inferred_dtype)}."
|
| 174 |
+
)
|
| 175 |
+
with pytest.raises(TypeError, match=msg):
|
| 176 |
+
method(*args, **kwargs)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def test_api_for_categorical(any_string_method, any_string_dtype):
|
| 180 |
+
# https://github.com/pandas-dev/pandas/issues/10661
|
| 181 |
+
s = Series(list("aabb"), dtype=any_string_dtype)
|
| 182 |
+
s = s + " " + s
|
| 183 |
+
c = s.astype("category")
|
| 184 |
+
c = c.astype(CategoricalDtype(c.dtype.categories.astype("object")))
|
| 185 |
+
assert isinstance(c.str, StringMethods)
|
| 186 |
+
|
| 187 |
+
method_name, args, kwargs = any_string_method
|
| 188 |
+
|
| 189 |
+
result = getattr(c.str, method_name)(*args, **kwargs)
|
| 190 |
+
expected = getattr(s.astype("object").str, method_name)(*args, **kwargs)
|
| 191 |
+
|
| 192 |
+
if isinstance(result, DataFrame):
|
| 193 |
+
tm.assert_frame_equal(result, expected)
|
| 194 |
+
elif isinstance(result, Series):
|
| 195 |
+
tm.assert_series_equal(result, expected)
|
| 196 |
+
else:
|
| 197 |
+
# str.cat(others=None) returns string, for example
|
| 198 |
+
assert result == expected
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/test_case_justify.py
ADDED
|
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
import operator
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from pandas import (
|
| 8 |
+
Series,
|
| 9 |
+
_testing as tm,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def test_title(any_string_dtype):
|
| 14 |
+
s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype)
|
| 15 |
+
result = s.str.title()
|
| 16 |
+
expected = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"], dtype=any_string_dtype)
|
| 17 |
+
tm.assert_series_equal(result, expected)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def test_title_mixed_object():
|
| 21 |
+
s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
|
| 22 |
+
result = s.str.title()
|
| 23 |
+
expected = Series(
|
| 24 |
+
["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan],
|
| 25 |
+
dtype=object,
|
| 26 |
+
)
|
| 27 |
+
tm.assert_almost_equal(result, expected)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def test_lower_upper(any_string_dtype):
|
| 31 |
+
s = Series(["om", np.nan, "nom", "nom"], dtype=any_string_dtype)
|
| 32 |
+
|
| 33 |
+
result = s.str.upper()
|
| 34 |
+
expected = Series(["OM", np.nan, "NOM", "NOM"], dtype=any_string_dtype)
|
| 35 |
+
tm.assert_series_equal(result, expected)
|
| 36 |
+
|
| 37 |
+
result = result.str.lower()
|
| 38 |
+
tm.assert_series_equal(result, s)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_lower_upper_mixed_object():
|
| 42 |
+
s = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
|
| 43 |
+
|
| 44 |
+
result = s.str.upper()
|
| 45 |
+
expected = Series(
|
| 46 |
+
["A", np.nan, "B", np.nan, np.nan, "FOO", None, np.nan, np.nan], dtype=object
|
| 47 |
+
)
|
| 48 |
+
tm.assert_series_equal(result, expected)
|
| 49 |
+
|
| 50 |
+
result = s.str.lower()
|
| 51 |
+
expected = Series(
|
| 52 |
+
["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object
|
| 53 |
+
)
|
| 54 |
+
tm.assert_series_equal(result, expected)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@pytest.mark.parametrize(
|
| 58 |
+
"data, expected",
|
| 59 |
+
[
|
| 60 |
+
(
|
| 61 |
+
["FOO", "BAR", np.nan, "Blah", "blurg"],
|
| 62 |
+
["Foo", "Bar", np.nan, "Blah", "Blurg"],
|
| 63 |
+
),
|
| 64 |
+
(["a", "b", "c"], ["A", "B", "C"]),
|
| 65 |
+
(["a b", "a bc. de"], ["A b", "A bc. de"]),
|
| 66 |
+
],
|
| 67 |
+
)
|
| 68 |
+
def test_capitalize(data, expected, any_string_dtype):
|
| 69 |
+
s = Series(data, dtype=any_string_dtype)
|
| 70 |
+
result = s.str.capitalize()
|
| 71 |
+
expected = Series(expected, dtype=any_string_dtype)
|
| 72 |
+
tm.assert_series_equal(result, expected)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def test_capitalize_mixed_object():
|
| 76 |
+
s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
|
| 77 |
+
result = s.str.capitalize()
|
| 78 |
+
expected = Series(
|
| 79 |
+
["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan],
|
| 80 |
+
dtype=object,
|
| 81 |
+
)
|
| 82 |
+
tm.assert_series_equal(result, expected)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def test_swapcase(any_string_dtype):
|
| 86 |
+
s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype)
|
| 87 |
+
result = s.str.swapcase()
|
| 88 |
+
expected = Series(["foo", "bar", np.nan, "bLAH", "BLURG"], dtype=any_string_dtype)
|
| 89 |
+
tm.assert_series_equal(result, expected)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def test_swapcase_mixed_object():
|
| 93 |
+
s = Series(["FOO", np.nan, "bar", True, datetime.today(), "Blah", None, 1, 2.0])
|
| 94 |
+
result = s.str.swapcase()
|
| 95 |
+
expected = Series(
|
| 96 |
+
["foo", np.nan, "BAR", np.nan, np.nan, "bLAH", None, np.nan, np.nan],
|
| 97 |
+
dtype=object,
|
| 98 |
+
)
|
| 99 |
+
tm.assert_series_equal(result, expected)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def test_casefold():
|
| 103 |
+
# GH25405
|
| 104 |
+
expected = Series(["ss", np.nan, "case", "ssd"])
|
| 105 |
+
s = Series(["ß", np.nan, "case", "ßd"])
|
| 106 |
+
result = s.str.casefold()
|
| 107 |
+
|
| 108 |
+
tm.assert_series_equal(result, expected)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def test_casemethods(any_string_dtype):
|
| 112 |
+
values = ["aaa", "bbb", "CCC", "Dddd", "eEEE"]
|
| 113 |
+
s = Series(values, dtype=any_string_dtype)
|
| 114 |
+
assert s.str.lower().tolist() == [v.lower() for v in values]
|
| 115 |
+
assert s.str.upper().tolist() == [v.upper() for v in values]
|
| 116 |
+
assert s.str.title().tolist() == [v.title() for v in values]
|
| 117 |
+
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
|
| 118 |
+
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def test_pad(any_string_dtype):
|
| 122 |
+
s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype)
|
| 123 |
+
|
| 124 |
+
result = s.str.pad(5, side="left")
|
| 125 |
+
expected = Series(
|
| 126 |
+
[" a", " b", np.nan, " c", np.nan, "eeeeee"], dtype=any_string_dtype
|
| 127 |
+
)
|
| 128 |
+
tm.assert_series_equal(result, expected)
|
| 129 |
+
|
| 130 |
+
result = s.str.pad(5, side="right")
|
| 131 |
+
expected = Series(
|
| 132 |
+
["a ", "b ", np.nan, "c ", np.nan, "eeeeee"], dtype=any_string_dtype
|
| 133 |
+
)
|
| 134 |
+
tm.assert_series_equal(result, expected)
|
| 135 |
+
|
| 136 |
+
result = s.str.pad(5, side="both")
|
| 137 |
+
expected = Series(
|
| 138 |
+
[" a ", " b ", np.nan, " c ", np.nan, "eeeeee"], dtype=any_string_dtype
|
| 139 |
+
)
|
| 140 |
+
tm.assert_series_equal(result, expected)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def test_pad_mixed_object():
|
| 144 |
+
s = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0])
|
| 145 |
+
|
| 146 |
+
result = s.str.pad(5, side="left")
|
| 147 |
+
expected = Series(
|
| 148 |
+
[" a", np.nan, " b", np.nan, np.nan, " ee", None, np.nan, np.nan],
|
| 149 |
+
dtype=object,
|
| 150 |
+
)
|
| 151 |
+
tm.assert_series_equal(result, expected)
|
| 152 |
+
|
| 153 |
+
result = s.str.pad(5, side="right")
|
| 154 |
+
expected = Series(
|
| 155 |
+
["a ", np.nan, "b ", np.nan, np.nan, "ee ", None, np.nan, np.nan],
|
| 156 |
+
dtype=object,
|
| 157 |
+
)
|
| 158 |
+
tm.assert_series_equal(result, expected)
|
| 159 |
+
|
| 160 |
+
result = s.str.pad(5, side="both")
|
| 161 |
+
expected = Series(
|
| 162 |
+
[" a ", np.nan, " b ", np.nan, np.nan, " ee ", None, np.nan, np.nan],
|
| 163 |
+
dtype=object,
|
| 164 |
+
)
|
| 165 |
+
tm.assert_series_equal(result, expected)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def test_pad_fillchar(any_string_dtype):
|
| 169 |
+
s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype)
|
| 170 |
+
|
| 171 |
+
result = s.str.pad(5, side="left", fillchar="X")
|
| 172 |
+
expected = Series(
|
| 173 |
+
["XXXXa", "XXXXb", np.nan, "XXXXc", np.nan, "eeeeee"], dtype=any_string_dtype
|
| 174 |
+
)
|
| 175 |
+
tm.assert_series_equal(result, expected)
|
| 176 |
+
|
| 177 |
+
result = s.str.pad(5, side="right", fillchar="X")
|
| 178 |
+
expected = Series(
|
| 179 |
+
["aXXXX", "bXXXX", np.nan, "cXXXX", np.nan, "eeeeee"], dtype=any_string_dtype
|
| 180 |
+
)
|
| 181 |
+
tm.assert_series_equal(result, expected)
|
| 182 |
+
|
| 183 |
+
result = s.str.pad(5, side="both", fillchar="X")
|
| 184 |
+
expected = Series(
|
| 185 |
+
["XXaXX", "XXbXX", np.nan, "XXcXX", np.nan, "eeeeee"], dtype=any_string_dtype
|
| 186 |
+
)
|
| 187 |
+
tm.assert_series_equal(result, expected)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def test_pad_fillchar_bad_arg_raises(any_string_dtype):
|
| 191 |
+
s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype)
|
| 192 |
+
|
| 193 |
+
msg = "fillchar must be a character, not str"
|
| 194 |
+
with pytest.raises(TypeError, match=msg):
|
| 195 |
+
s.str.pad(5, fillchar="XY")
|
| 196 |
+
|
| 197 |
+
msg = "fillchar must be a character, not int"
|
| 198 |
+
with pytest.raises(TypeError, match=msg):
|
| 199 |
+
s.str.pad(5, fillchar=5)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
@pytest.mark.parametrize("method_name", ["center", "ljust", "rjust", "zfill", "pad"])
|
| 203 |
+
def test_pad_width_bad_arg_raises(method_name, any_string_dtype):
|
| 204 |
+
# see gh-13598
|
| 205 |
+
s = Series(["1", "22", "a", "bb"], dtype=any_string_dtype)
|
| 206 |
+
op = operator.methodcaller(method_name, "f")
|
| 207 |
+
|
| 208 |
+
msg = "width must be of integer type, not str"
|
| 209 |
+
with pytest.raises(TypeError, match=msg):
|
| 210 |
+
op(s.str)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def test_center_ljust_rjust(any_string_dtype):
|
| 214 |
+
s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype)
|
| 215 |
+
|
| 216 |
+
result = s.str.center(5)
|
| 217 |
+
expected = Series(
|
| 218 |
+
[" a ", " b ", np.nan, " c ", np.nan, "eeeeee"], dtype=any_string_dtype
|
| 219 |
+
)
|
| 220 |
+
tm.assert_series_equal(result, expected)
|
| 221 |
+
|
| 222 |
+
result = s.str.ljust(5)
|
| 223 |
+
expected = Series(
|
| 224 |
+
["a ", "b ", np.nan, "c ", np.nan, "eeeeee"], dtype=any_string_dtype
|
| 225 |
+
)
|
| 226 |
+
tm.assert_series_equal(result, expected)
|
| 227 |
+
|
| 228 |
+
result = s.str.rjust(5)
|
| 229 |
+
expected = Series(
|
| 230 |
+
[" a", " b", np.nan, " c", np.nan, "eeeeee"], dtype=any_string_dtype
|
| 231 |
+
)
|
| 232 |
+
tm.assert_series_equal(result, expected)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def test_center_ljust_rjust_mixed_object():
|
| 236 |
+
s = Series(["a", np.nan, "b", True, datetime.today(), "c", "eee", None, 1, 2.0])
|
| 237 |
+
|
| 238 |
+
result = s.str.center(5)
|
| 239 |
+
expected = Series(
|
| 240 |
+
[
|
| 241 |
+
" a ",
|
| 242 |
+
np.nan,
|
| 243 |
+
" b ",
|
| 244 |
+
np.nan,
|
| 245 |
+
np.nan,
|
| 246 |
+
" c ",
|
| 247 |
+
" eee ",
|
| 248 |
+
None,
|
| 249 |
+
np.nan,
|
| 250 |
+
np.nan,
|
| 251 |
+
],
|
| 252 |
+
dtype=object,
|
| 253 |
+
)
|
| 254 |
+
tm.assert_series_equal(result, expected)
|
| 255 |
+
|
| 256 |
+
result = s.str.ljust(5)
|
| 257 |
+
expected = Series(
|
| 258 |
+
[
|
| 259 |
+
"a ",
|
| 260 |
+
np.nan,
|
| 261 |
+
"b ",
|
| 262 |
+
np.nan,
|
| 263 |
+
np.nan,
|
| 264 |
+
"c ",
|
| 265 |
+
"eee ",
|
| 266 |
+
None,
|
| 267 |
+
np.nan,
|
| 268 |
+
np.nan,
|
| 269 |
+
],
|
| 270 |
+
dtype=object,
|
| 271 |
+
)
|
| 272 |
+
tm.assert_series_equal(result, expected)
|
| 273 |
+
|
| 274 |
+
result = s.str.rjust(5)
|
| 275 |
+
expected = Series(
|
| 276 |
+
[
|
| 277 |
+
" a",
|
| 278 |
+
np.nan,
|
| 279 |
+
" b",
|
| 280 |
+
np.nan,
|
| 281 |
+
np.nan,
|
| 282 |
+
" c",
|
| 283 |
+
" eee",
|
| 284 |
+
None,
|
| 285 |
+
np.nan,
|
| 286 |
+
np.nan,
|
| 287 |
+
],
|
| 288 |
+
dtype=object,
|
| 289 |
+
)
|
| 290 |
+
tm.assert_series_equal(result, expected)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def test_center_ljust_rjust_fillchar(any_string_dtype):
|
| 294 |
+
if any_string_dtype == "string[pyarrow_numpy]":
|
| 295 |
+
pytest.skip(
|
| 296 |
+
"Arrow logic is different, "
|
| 297 |
+
"see https://github.com/pandas-dev/pandas/pull/54533/files#r1299808126",
|
| 298 |
+
)
|
| 299 |
+
s = Series(["a", "bb", "cccc", "ddddd", "eeeeee"], dtype=any_string_dtype)
|
| 300 |
+
|
| 301 |
+
result = s.str.center(5, fillchar="X")
|
| 302 |
+
expected = Series(
|
| 303 |
+
["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"], dtype=any_string_dtype
|
| 304 |
+
)
|
| 305 |
+
tm.assert_series_equal(result, expected)
|
| 306 |
+
expected = np.array([v.center(5, "X") for v in np.array(s)], dtype=np.object_)
|
| 307 |
+
tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)
|
| 308 |
+
|
| 309 |
+
result = s.str.ljust(5, fillchar="X")
|
| 310 |
+
expected = Series(
|
| 311 |
+
["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"], dtype=any_string_dtype
|
| 312 |
+
)
|
| 313 |
+
tm.assert_series_equal(result, expected)
|
| 314 |
+
expected = np.array([v.ljust(5, "X") for v in np.array(s)], dtype=np.object_)
|
| 315 |
+
tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)
|
| 316 |
+
|
| 317 |
+
result = s.str.rjust(5, fillchar="X")
|
| 318 |
+
expected = Series(
|
| 319 |
+
["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"], dtype=any_string_dtype
|
| 320 |
+
)
|
| 321 |
+
tm.assert_series_equal(result, expected)
|
| 322 |
+
expected = np.array([v.rjust(5, "X") for v in np.array(s)], dtype=np.object_)
|
| 323 |
+
tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def test_center_ljust_rjust_fillchar_bad_arg_raises(any_string_dtype):
|
| 327 |
+
s = Series(["a", "bb", "cccc", "ddddd", "eeeeee"], dtype=any_string_dtype)
|
| 328 |
+
|
| 329 |
+
# If fillchar is not a character, normal str raises TypeError
|
| 330 |
+
# 'aaa'.ljust(5, 'XY')
|
| 331 |
+
# TypeError: must be char, not str
|
| 332 |
+
template = "fillchar must be a character, not {dtype}"
|
| 333 |
+
|
| 334 |
+
with pytest.raises(TypeError, match=template.format(dtype="str")):
|
| 335 |
+
s.str.center(5, fillchar="XY")
|
| 336 |
+
|
| 337 |
+
with pytest.raises(TypeError, match=template.format(dtype="str")):
|
| 338 |
+
s.str.ljust(5, fillchar="XY")
|
| 339 |
+
|
| 340 |
+
with pytest.raises(TypeError, match=template.format(dtype="str")):
|
| 341 |
+
s.str.rjust(5, fillchar="XY")
|
| 342 |
+
|
| 343 |
+
with pytest.raises(TypeError, match=template.format(dtype="int")):
|
| 344 |
+
s.str.center(5, fillchar=1)
|
| 345 |
+
|
| 346 |
+
with pytest.raises(TypeError, match=template.format(dtype="int")):
|
| 347 |
+
s.str.ljust(5, fillchar=1)
|
| 348 |
+
|
| 349 |
+
with pytest.raises(TypeError, match=template.format(dtype="int")):
|
| 350 |
+
s.str.rjust(5, fillchar=1)
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def test_zfill(any_string_dtype):
|
| 354 |
+
s = Series(["1", "22", "aaa", "333", "45678"], dtype=any_string_dtype)
|
| 355 |
+
|
| 356 |
+
result = s.str.zfill(5)
|
| 357 |
+
expected = Series(
|
| 358 |
+
["00001", "00022", "00aaa", "00333", "45678"], dtype=any_string_dtype
|
| 359 |
+
)
|
| 360 |
+
tm.assert_series_equal(result, expected)
|
| 361 |
+
expected = np.array([v.zfill(5) for v in np.array(s)], dtype=np.object_)
|
| 362 |
+
tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)
|
| 363 |
+
|
| 364 |
+
result = s.str.zfill(3)
|
| 365 |
+
expected = Series(["001", "022", "aaa", "333", "45678"], dtype=any_string_dtype)
|
| 366 |
+
tm.assert_series_equal(result, expected)
|
| 367 |
+
expected = np.array([v.zfill(3) for v in np.array(s)], dtype=np.object_)
|
| 368 |
+
tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)
|
| 369 |
+
|
| 370 |
+
s = Series(["1", np.nan, "aaa", np.nan, "45678"], dtype=any_string_dtype)
|
| 371 |
+
result = s.str.zfill(5)
|
| 372 |
+
expected = Series(
|
| 373 |
+
["00001", np.nan, "00aaa", np.nan, "45678"], dtype=any_string_dtype
|
| 374 |
+
)
|
| 375 |
+
tm.assert_series_equal(result, expected)
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def test_wrap(any_string_dtype):
|
| 379 |
+
# test values are: two words less than width, two words equal to width,
|
| 380 |
+
# two words greater than width, one word less than width, one word
|
| 381 |
+
# equal to width, one word greater than width, multiple tokens with
|
| 382 |
+
# trailing whitespace equal to width
|
| 383 |
+
s = Series(
|
| 384 |
+
[
|
| 385 |
+
"hello world",
|
| 386 |
+
"hello world!",
|
| 387 |
+
"hello world!!",
|
| 388 |
+
"abcdefabcde",
|
| 389 |
+
"abcdefabcdef",
|
| 390 |
+
"abcdefabcdefa",
|
| 391 |
+
"ab ab ab ab ",
|
| 392 |
+
"ab ab ab ab a",
|
| 393 |
+
"\t",
|
| 394 |
+
],
|
| 395 |
+
dtype=any_string_dtype,
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
# expected values
|
| 399 |
+
expected = Series(
|
| 400 |
+
[
|
| 401 |
+
"hello world",
|
| 402 |
+
"hello world!",
|
| 403 |
+
"hello\nworld!!",
|
| 404 |
+
"abcdefabcde",
|
| 405 |
+
"abcdefabcdef",
|
| 406 |
+
"abcdefabcdef\na",
|
| 407 |
+
"ab ab ab ab",
|
| 408 |
+
"ab ab ab ab\na",
|
| 409 |
+
"",
|
| 410 |
+
],
|
| 411 |
+
dtype=any_string_dtype,
|
| 412 |
+
)
|
| 413 |
+
|
| 414 |
+
result = s.str.wrap(12, break_long_words=True)
|
| 415 |
+
tm.assert_series_equal(result, expected)
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def test_wrap_unicode(any_string_dtype):
|
| 419 |
+
# test with pre and post whitespace (non-unicode), NaN, and non-ascii Unicode
|
| 420 |
+
s = Series(
|
| 421 |
+
[" pre ", np.nan, "\xac\u20ac\U00008000 abadcafe"], dtype=any_string_dtype
|
| 422 |
+
)
|
| 423 |
+
expected = Series(
|
| 424 |
+
[" pre", np.nan, "\xac\u20ac\U00008000 ab\nadcafe"], dtype=any_string_dtype
|
| 425 |
+
)
|
| 426 |
+
result = s.str.wrap(6)
|
| 427 |
+
tm.assert_series_equal(result, expected)
|
llava_next/lib/python3.10/site-packages/pandas/tests/strings/test_cat.py
ADDED
|
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import pandas.util._test_decorators as td
|
| 7 |
+
|
| 8 |
+
from pandas import (
|
| 9 |
+
DataFrame,
|
| 10 |
+
Index,
|
| 11 |
+
MultiIndex,
|
| 12 |
+
Series,
|
| 13 |
+
_testing as tm,
|
| 14 |
+
concat,
|
| 15 |
+
option_context,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@pytest.mark.parametrize("other", [None, Series, Index])
|
| 20 |
+
def test_str_cat_name(index_or_series, other):
|
| 21 |
+
# GH 21053
|
| 22 |
+
box = index_or_series
|
| 23 |
+
values = ["a", "b"]
|
| 24 |
+
if other:
|
| 25 |
+
other = other(values)
|
| 26 |
+
else:
|
| 27 |
+
other = values
|
| 28 |
+
result = box(values, name="name").str.cat(other, sep=",")
|
| 29 |
+
assert result.name == "name"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@pytest.mark.parametrize(
|
| 33 |
+
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
|
| 34 |
+
)
|
| 35 |
+
def test_str_cat(index_or_series, infer_string):
|
| 36 |
+
with option_context("future.infer_string", infer_string):
|
| 37 |
+
box = index_or_series
|
| 38 |
+
# test_cat above tests "str_cat" from ndarray;
|
| 39 |
+
# here testing "str.cat" from Series/Index to ndarray/list
|
| 40 |
+
s = box(["a", "a", "b", "b", "c", np.nan])
|
| 41 |
+
|
| 42 |
+
# single array
|
| 43 |
+
result = s.str.cat()
|
| 44 |
+
expected = "aabbc"
|
| 45 |
+
assert result == expected
|
| 46 |
+
|
| 47 |
+
result = s.str.cat(na_rep="-")
|
| 48 |
+
expected = "aabbc-"
|
| 49 |
+
assert result == expected
|
| 50 |
+
|
| 51 |
+
result = s.str.cat(sep="_", na_rep="NA")
|
| 52 |
+
expected = "a_a_b_b_c_NA"
|
| 53 |
+
assert result == expected
|
| 54 |
+
|
| 55 |
+
t = np.array(["a", np.nan, "b", "d", "foo", np.nan], dtype=object)
|
| 56 |
+
expected = box(["aa", "a-", "bb", "bd", "cfoo", "--"])
|
| 57 |
+
|
| 58 |
+
# Series/Index with array
|
| 59 |
+
result = s.str.cat(t, na_rep="-")
|
| 60 |
+
tm.assert_equal(result, expected)
|
| 61 |
+
|
| 62 |
+
# Series/Index with list
|
| 63 |
+
result = s.str.cat(list(t), na_rep="-")
|
| 64 |
+
tm.assert_equal(result, expected)
|
| 65 |
+
|
| 66 |
+
# errors for incorrect lengths
|
| 67 |
+
rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
|
| 68 |
+
z = Series(["1", "2", "3"])
|
| 69 |
+
|
| 70 |
+
with pytest.raises(ValueError, match=rgx):
|
| 71 |
+
s.str.cat(z.values)
|
| 72 |
+
|
| 73 |
+
with pytest.raises(ValueError, match=rgx):
|
| 74 |
+
s.str.cat(list(z))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def test_str_cat_raises_intuitive_error(index_or_series):
|
| 78 |
+
# GH 11334
|
| 79 |
+
box = index_or_series
|
| 80 |
+
s = box(["a", "b", "c", "d"])
|
| 81 |
+
message = "Did you mean to supply a `sep` keyword?"
|
| 82 |
+
with pytest.raises(ValueError, match=message):
|
| 83 |
+
s.str.cat("|")
|
| 84 |
+
with pytest.raises(ValueError, match=message):
|
| 85 |
+
s.str.cat(" ")
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@pytest.mark.parametrize(
|
| 89 |
+
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
|
| 90 |
+
)
|
| 91 |
+
@pytest.mark.parametrize("sep", ["", None])
|
| 92 |
+
@pytest.mark.parametrize("dtype_target", ["object", "category"])
|
| 93 |
+
@pytest.mark.parametrize("dtype_caller", ["object", "category"])
|
| 94 |
+
def test_str_cat_categorical(
|
| 95 |
+
index_or_series, dtype_caller, dtype_target, sep, infer_string
|
| 96 |
+
):
|
| 97 |
+
box = index_or_series
|
| 98 |
+
|
| 99 |
+
with option_context("future.infer_string", infer_string):
|
| 100 |
+
s = Index(["a", "a", "b", "a"], dtype=dtype_caller)
|
| 101 |
+
s = s if box == Index else Series(s, index=s, dtype=s.dtype)
|
| 102 |
+
t = Index(["b", "a", "b", "c"], dtype=dtype_target)
|
| 103 |
+
|
| 104 |
+
expected = Index(
|
| 105 |
+
["ab", "aa", "bb", "ac"], dtype=object if dtype_caller == "object" else None
|
| 106 |
+
)
|
| 107 |
+
expected = (
|
| 108 |
+
expected
|
| 109 |
+
if box == Index
|
| 110 |
+
else Series(
|
| 111 |
+
expected, index=Index(s, dtype=dtype_caller), dtype=expected.dtype
|
| 112 |
+
)
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Series/Index with unaligned Index -> t.values
|
| 116 |
+
result = s.str.cat(t.values, sep=sep)
|
| 117 |
+
tm.assert_equal(result, expected)
|
| 118 |
+
|
| 119 |
+
# Series/Index with Series having matching Index
|
| 120 |
+
t = Series(t.values, index=Index(s, dtype=dtype_caller))
|
| 121 |
+
result = s.str.cat(t, sep=sep)
|
| 122 |
+
tm.assert_equal(result, expected)
|
| 123 |
+
|
| 124 |
+
# Series/Index with Series.values
|
| 125 |
+
result = s.str.cat(t.values, sep=sep)
|
| 126 |
+
tm.assert_equal(result, expected)
|
| 127 |
+
|
| 128 |
+
# Series/Index with Series having different Index
|
| 129 |
+
t = Series(t.values, index=t.values)
|
| 130 |
+
expected = Index(
|
| 131 |
+
["aa", "aa", "bb", "bb", "aa"],
|
| 132 |
+
dtype=object if dtype_caller == "object" else None,
|
| 133 |
+
)
|
| 134 |
+
dtype = object if dtype_caller == "object" else s.dtype.categories.dtype
|
| 135 |
+
expected = (
|
| 136 |
+
expected
|
| 137 |
+
if box == Index
|
| 138 |
+
else Series(
|
| 139 |
+
expected,
|
| 140 |
+
index=Index(expected.str[:1], dtype=dtype),
|
| 141 |
+
dtype=expected.dtype,
|
| 142 |
+
)
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
result = s.str.cat(t, sep=sep)
|
| 146 |
+
tm.assert_equal(result, expected)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@pytest.mark.parametrize(
|
| 150 |
+
"data",
|
| 151 |
+
[[1, 2, 3], [0.1, 0.2, 0.3], [1, 2, "b"]],
|
| 152 |
+
ids=["integers", "floats", "mixed"],
|
| 153 |
+
)
|
| 154 |
+
# without dtype=object, np.array would cast [1, 2, 'b'] to ['1', '2', 'b']
|
| 155 |
+
@pytest.mark.parametrize(
|
| 156 |
+
"box",
|
| 157 |
+
[Series, Index, list, lambda x: np.array(x, dtype=object)],
|
| 158 |
+
ids=["Series", "Index", "list", "np.array"],
|
| 159 |
+
)
|
| 160 |
+
def test_str_cat_wrong_dtype_raises(box, data):
|
| 161 |
+
# GH 22722
|
| 162 |
+
s = Series(["a", "b", "c"])
|
| 163 |
+
t = box(data)
|
| 164 |
+
|
| 165 |
+
msg = "Concatenation requires list-likes containing only strings.*"
|
| 166 |
+
with pytest.raises(TypeError, match=msg):
|
| 167 |
+
# need to use outer and na_rep, as otherwise Index would not raise
|
| 168 |
+
s.str.cat(t, join="outer", na_rep="-")
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def test_str_cat_mixed_inputs(index_or_series):
|
| 172 |
+
box = index_or_series
|
| 173 |
+
s = Index(["a", "b", "c", "d"])
|
| 174 |
+
s = s if box == Index else Series(s, index=s)
|
| 175 |
+
|
| 176 |
+
t = Series(["A", "B", "C", "D"], index=s.values)
|
| 177 |
+
d = concat([t, Series(s, index=s)], axis=1)
|
| 178 |
+
|
| 179 |
+
expected = Index(["aAa", "bBb", "cCc", "dDd"])
|
| 180 |
+
expected = expected if box == Index else Series(expected.values, index=s.values)
|
| 181 |
+
|
| 182 |
+
# Series/Index with DataFrame
|
| 183 |
+
result = s.str.cat(d)
|
| 184 |
+
tm.assert_equal(result, expected)
|
| 185 |
+
|
| 186 |
+
# Series/Index with two-dimensional ndarray
|
| 187 |
+
result = s.str.cat(d.values)
|
| 188 |
+
tm.assert_equal(result, expected)
|
| 189 |
+
|
| 190 |
+
# Series/Index with list of Series
|
| 191 |
+
result = s.str.cat([t, s])
|
| 192 |
+
tm.assert_equal(result, expected)
|
| 193 |
+
|
| 194 |
+
# Series/Index with mixed list of Series/array
|
| 195 |
+
result = s.str.cat([t, s.values])
|
| 196 |
+
tm.assert_equal(result, expected)
|
| 197 |
+
|
| 198 |
+
# Series/Index with list of Series; different indexes
|
| 199 |
+
t.index = ["b", "c", "d", "a"]
|
| 200 |
+
expected = box(["aDa", "bAb", "cBc", "dCd"])
|
| 201 |
+
expected = expected if box == Index else Series(expected.values, index=s.values)
|
| 202 |
+
result = s.str.cat([t, s])
|
| 203 |
+
tm.assert_equal(result, expected)
|
| 204 |
+
|
| 205 |
+
# Series/Index with mixed list; different index
|
| 206 |
+
result = s.str.cat([t, s.values])
|
| 207 |
+
tm.assert_equal(result, expected)
|
| 208 |
+
|
| 209 |
+
# Series/Index with DataFrame; different indexes
|
| 210 |
+
d.index = ["b", "c", "d", "a"]
|
| 211 |
+
expected = box(["aDd", "bAa", "cBb", "dCc"])
|
| 212 |
+
expected = expected if box == Index else Series(expected.values, index=s.values)
|
| 213 |
+
result = s.str.cat(d)
|
| 214 |
+
tm.assert_equal(result, expected)
|
| 215 |
+
|
| 216 |
+
# errors for incorrect lengths
|
| 217 |
+
rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
|
| 218 |
+
z = Series(["1", "2", "3"])
|
| 219 |
+
e = concat([z, z], axis=1)
|
| 220 |
+
|
| 221 |
+
# two-dimensional ndarray
|
| 222 |
+
with pytest.raises(ValueError, match=rgx):
|
| 223 |
+
s.str.cat(e.values)
|
| 224 |
+
|
| 225 |
+
# list of list-likes
|
| 226 |
+
with pytest.raises(ValueError, match=rgx):
|
| 227 |
+
s.str.cat([z.values, s.values])
|
| 228 |
+
|
| 229 |
+
# mixed list of Series/list-like
|
| 230 |
+
with pytest.raises(ValueError, match=rgx):
|
| 231 |
+
s.str.cat([z.values, s])
|
| 232 |
+
|
| 233 |
+
# errors for incorrect arguments in list-like
|
| 234 |
+
rgx = "others must be Series, Index, DataFrame,.*"
|
| 235 |
+
# make sure None/NaN do not crash checks in _get_series_list
|
| 236 |
+
u = Series(["a", np.nan, "c", None])
|
| 237 |
+
|
| 238 |
+
# mix of string and Series
|
| 239 |
+
with pytest.raises(TypeError, match=rgx):
|
| 240 |
+
s.str.cat([u, "u"])
|
| 241 |
+
|
| 242 |
+
# DataFrame in list
|
| 243 |
+
with pytest.raises(TypeError, match=rgx):
|
| 244 |
+
s.str.cat([u, d])
|
| 245 |
+
|
| 246 |
+
# 2-dim ndarray in list
|
| 247 |
+
with pytest.raises(TypeError, match=rgx):
|
| 248 |
+
s.str.cat([u, d.values])
|
| 249 |
+
|
| 250 |
+
# nested lists
|
| 251 |
+
with pytest.raises(TypeError, match=rgx):
|
| 252 |
+
s.str.cat([u, [u, d]])
|
| 253 |
+
|
| 254 |
+
# forbidden input type: set
|
| 255 |
+
# GH 23009
|
| 256 |
+
with pytest.raises(TypeError, match=rgx):
|
| 257 |
+
s.str.cat(set(u))
|
| 258 |
+
|
| 259 |
+
# forbidden input type: set in list
|
| 260 |
+
# GH 23009
|
| 261 |
+
with pytest.raises(TypeError, match=rgx):
|
| 262 |
+
s.str.cat([u, set(u)])
|
| 263 |
+
|
| 264 |
+
# other forbidden input type, e.g. int
|
| 265 |
+
with pytest.raises(TypeError, match=rgx):
|
| 266 |
+
s.str.cat(1)
|
| 267 |
+
|
| 268 |
+
# nested list-likes
|
| 269 |
+
with pytest.raises(TypeError, match=rgx):
|
| 270 |
+
s.str.cat(iter([t.values, list(s)]))
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
@pytest.mark.parametrize("join", ["left", "outer", "inner", "right"])
|
| 274 |
+
def test_str_cat_align_indexed(index_or_series, join):
|
| 275 |
+
# https://github.com/pandas-dev/pandas/issues/18657
|
| 276 |
+
box = index_or_series
|
| 277 |
+
|
| 278 |
+
s = Series(["a", "b", "c", "d"], index=["a", "b", "c", "d"])
|
| 279 |
+
t = Series(["D", "A", "E", "B"], index=["d", "a", "e", "b"])
|
| 280 |
+
sa, ta = s.align(t, join=join)
|
| 281 |
+
# result after manual alignment of inputs
|
| 282 |
+
expected = sa.str.cat(ta, na_rep="-")
|
| 283 |
+
|
| 284 |
+
if box == Index:
|
| 285 |
+
s = Index(s)
|
| 286 |
+
sa = Index(sa)
|
| 287 |
+
expected = Index(expected)
|
| 288 |
+
|
| 289 |
+
result = s.str.cat(t, join=join, na_rep="-")
|
| 290 |
+
tm.assert_equal(result, expected)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
@pytest.mark.parametrize("join", ["left", "outer", "inner", "right"])
|
| 294 |
+
def test_str_cat_align_mixed_inputs(join):
|
| 295 |
+
s = Series(["a", "b", "c", "d"])
|
| 296 |
+
t = Series(["d", "a", "e", "b"], index=[3, 0, 4, 1])
|
| 297 |
+
d = concat([t, t], axis=1)
|
| 298 |
+
|
| 299 |
+
expected_outer = Series(["aaa", "bbb", "c--", "ddd", "-ee"])
|
| 300 |
+
expected = expected_outer.loc[s.index.join(t.index, how=join)]
|
| 301 |
+
|
| 302 |
+
# list of Series
|
| 303 |
+
result = s.str.cat([t, t], join=join, na_rep="-")
|
| 304 |
+
tm.assert_series_equal(result, expected)
|
| 305 |
+
|
| 306 |
+
# DataFrame
|
| 307 |
+
result = s.str.cat(d, join=join, na_rep="-")
|
| 308 |
+
tm.assert_series_equal(result, expected)
|
| 309 |
+
|
| 310 |
+
# mixed list of indexed/unindexed
|
| 311 |
+
u = np.array(["A", "B", "C", "D"])
|
| 312 |
+
expected_outer = Series(["aaA", "bbB", "c-C", "ddD", "-e-"])
|
| 313 |
+
# joint index of rhs [t, u]; u will be forced have index of s
|
| 314 |
+
rhs_idx = (
|
| 315 |
+
t.index.intersection(s.index)
|
| 316 |
+
if join == "inner"
|
| 317 |
+
else t.index.union(s.index)
|
| 318 |
+
if join == "outer"
|
| 319 |
+
else t.index.append(s.index.difference(t.index))
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
expected = expected_outer.loc[s.index.join(rhs_idx, how=join)]
|
| 323 |
+
result = s.str.cat([t, u], join=join, na_rep="-")
|
| 324 |
+
tm.assert_series_equal(result, expected)
|
| 325 |
+
|
| 326 |
+
with pytest.raises(TypeError, match="others must be Series,.*"):
|
| 327 |
+
# nested lists are forbidden
|
| 328 |
+
s.str.cat([t, list(u)], join=join)
|
| 329 |
+
|
| 330 |
+
# errors for incorrect lengths
|
| 331 |
+
rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
|
| 332 |
+
z = Series(["1", "2", "3"]).values
|
| 333 |
+
|
| 334 |
+
# unindexed object of wrong length
|
| 335 |
+
with pytest.raises(ValueError, match=rgx):
|
| 336 |
+
s.str.cat(z, join=join)
|
| 337 |
+
|
| 338 |
+
# unindexed object of wrong length in list
|
| 339 |
+
with pytest.raises(ValueError, match=rgx):
|
| 340 |
+
s.str.cat([t, z], join=join)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def test_str_cat_all_na(index_or_series, index_or_series2):
|
| 344 |
+
# GH 24044
|
| 345 |
+
box = index_or_series
|
| 346 |
+
other = index_or_series2
|
| 347 |
+
|
| 348 |
+
# check that all NaNs in caller / target work
|
| 349 |
+
s = Index(["a", "b", "c", "d"])
|
| 350 |
+
s = s if box == Index else Series(s, index=s)
|
| 351 |
+
t = other([np.nan] * 4, dtype=object)
|
| 352 |
+
# add index of s for alignment
|
| 353 |
+
t = t if other == Index else Series(t, index=s)
|
| 354 |
+
|
| 355 |
+
# all-NA target
|
| 356 |
+
if box == Series:
|
| 357 |
+
expected = Series([np.nan] * 4, index=s.index, dtype=s.dtype)
|
| 358 |
+
else: # box == Index
|
| 359 |
+
# TODO: Strimg option, this should return string dtype
|
| 360 |
+
expected = Index([np.nan] * 4, dtype=object)
|
| 361 |
+
result = s.str.cat(t, join="left")
|
| 362 |
+
tm.assert_equal(result, expected)
|
| 363 |
+
|
| 364 |
+
# all-NA caller (only for Series)
|
| 365 |
+
if other == Series:
|
| 366 |
+
expected = Series([np.nan] * 4, dtype=object, index=t.index)
|
| 367 |
+
result = t.str.cat(s, join="left")
|
| 368 |
+
tm.assert_series_equal(result, expected)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def test_str_cat_special_cases():
|
| 372 |
+
s = Series(["a", "b", "c", "d"])
|
| 373 |
+
t = Series(["d", "a", "e", "b"], index=[3, 0, 4, 1])
|
| 374 |
+
|
| 375 |
+
# iterator of elements with different types
|
| 376 |
+
expected = Series(["aaa", "bbb", "c-c", "ddd", "-e-"])
|
| 377 |
+
result = s.str.cat(iter([t, s.values]), join="outer", na_rep="-")
|
| 378 |
+
tm.assert_series_equal(result, expected)
|
| 379 |
+
|
| 380 |
+
# right-align with different indexes in others
|
| 381 |
+
expected = Series(["aa-", "d-d"], index=[0, 3])
|
| 382 |
+
result = s.str.cat([t.loc[[0]], t.loc[[3]]], join="right", na_rep="-")
|
| 383 |
+
tm.assert_series_equal(result, expected)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def test_cat_on_filtered_index():
|
| 387 |
+
df = DataFrame(
|
| 388 |
+
index=MultiIndex.from_product(
|
| 389 |
+
[[2011, 2012], [1, 2, 3]], names=["year", "month"]
|
| 390 |
+
)
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
df = df.reset_index()
|
| 394 |
+
df = df[df.month > 1]
|
| 395 |
+
|
| 396 |
+
str_year = df.year.astype("str")
|
| 397 |
+
str_month = df.month.astype("str")
|
| 398 |
+
str_both = str_year.str.cat(str_month, sep=" ")
|
| 399 |
+
|
| 400 |
+
assert str_both.loc[1] == "2011 2"
|
| 401 |
+
|
| 402 |
+
str_multiple = str_year.str.cat([str_month, str_month], sep=" ")
|
| 403 |
+
|
| 404 |
+
assert str_multiple.loc[1] == "2011 2 2"
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
@pytest.mark.parametrize("klass", [tuple, list, np.array, Series, Index])
|
| 408 |
+
def test_cat_different_classes(klass):
|
| 409 |
+
# https://github.com/pandas-dev/pandas/issues/33425
|
| 410 |
+
s = Series(["a", "b", "c"])
|
| 411 |
+
result = s.str.cat(klass(["x", "y", "z"]))
|
| 412 |
+
expected = Series(["ax", "by", "cz"])
|
| 413 |
+
tm.assert_series_equal(result, expected)
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def test_cat_on_series_dot_str():
|
| 417 |
+
# GH 28277
|
| 418 |
+
ps = Series(["AbC", "de", "FGHI", "j", "kLLLm"])
|
| 419 |
+
|
| 420 |
+
message = re.escape(
|
| 421 |
+
"others must be Series, Index, DataFrame, np.ndarray "
|
| 422 |
+
"or list-like (either containing only strings or "
|
| 423 |
+
"containing only objects of type Series/Index/"
|
| 424 |
+
"np.ndarray[1-dim])"
|
| 425 |
+
)
|
| 426 |
+
with pytest.raises(TypeError, match=message):
|
| 427 |
+
ps.str.cat(others=ps.str)
|