Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/constructors/__init__.py +0 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__init__.py +7 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_compare.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_convert_dtypes.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_duplicated.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_infer_objects.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_quantile.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_reorder_levels.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_set_axis.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_truncate.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_add_prefix_suffix.py +49 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_align.py +435 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_asfreq.py +213 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_asof.py +197 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_assign.py +84 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_at_time.py +124 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_between_time.py +217 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_combine.py +47 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_combine_first.py +540 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_convert_dtypes.py +169 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_copy.py +64 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_count.py +39 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_cov_corr.py +433 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_diff.py +304 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_dot.py +131 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_drop.py +537 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_drop_duplicates.py +473 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_droplevel.py +36 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_dropna.py +285 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_dtypes.py +148 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_duplicated.py +113 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_equals.py +83 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_explode.py +303 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_fillna.py +778 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_filter.py +139 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_first_and_last.py +97 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_first_valid_index.py +74 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_get_numeric_data.py +102 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_head_tail.py +57 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_infer_objects.py +42 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_interpolate.py +422 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_is_homogeneous_dtype.py +57 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_isetitem.py +37 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_join.py +571 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_matmul.py +86 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_pct_change.py +121 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_pipe.py +39 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_pop.py +71 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_quantile.py +989 -0
- videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_rank.py +493 -0
videochat2/lib/python3.10/site-packages/pandas/tests/frame/constructors/__init__.py
ADDED
|
File without changes
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test files dedicated to individual (stand-alone) DataFrame methods
|
| 3 |
+
|
| 4 |
+
Ideally these files/tests should correspond 1-to-1 with tests.series.methods
|
| 5 |
+
|
| 6 |
+
These may also present opportunities for sharing/de-duplicating test code.
|
| 7 |
+
"""
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_compare.cpython-310.pyc
ADDED
|
Binary file (6.71 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_convert_dtypes.cpython-310.pyc
ADDED
|
Binary file (5.34 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_duplicated.cpython-310.pyc
ADDED
|
Binary file (3.13 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_infer_objects.cpython-310.pyc
ADDED
|
Binary file (1.25 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_quantile.cpython-310.pyc
ADDED
|
Binary file (23.5 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_reorder_levels.cpython-310.pyc
ADDED
|
Binary file (2.31 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_set_axis.cpython-310.pyc
ADDED
|
Binary file (4.79 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_truncate.cpython-310.pyc
ADDED
|
Binary file (4.89 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_add_prefix_suffix.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from pandas import Index
|
| 4 |
+
import pandas._testing as tm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_add_prefix_suffix(float_frame):
|
| 8 |
+
with_prefix = float_frame.add_prefix("foo#")
|
| 9 |
+
expected = Index([f"foo#{c}" for c in float_frame.columns])
|
| 10 |
+
tm.assert_index_equal(with_prefix.columns, expected)
|
| 11 |
+
|
| 12 |
+
with_suffix = float_frame.add_suffix("#foo")
|
| 13 |
+
expected = Index([f"{c}#foo" for c in float_frame.columns])
|
| 14 |
+
tm.assert_index_equal(with_suffix.columns, expected)
|
| 15 |
+
|
| 16 |
+
with_pct_prefix = float_frame.add_prefix("%")
|
| 17 |
+
expected = Index([f"%{c}" for c in float_frame.columns])
|
| 18 |
+
tm.assert_index_equal(with_pct_prefix.columns, expected)
|
| 19 |
+
|
| 20 |
+
with_pct_suffix = float_frame.add_suffix("%")
|
| 21 |
+
expected = Index([f"{c}%" for c in float_frame.columns])
|
| 22 |
+
tm.assert_index_equal(with_pct_suffix.columns, expected)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def test_add_prefix_suffix_axis(float_frame):
|
| 26 |
+
# GH 47819
|
| 27 |
+
with_prefix = float_frame.add_prefix("foo#", axis=0)
|
| 28 |
+
expected = Index([f"foo#{c}" for c in float_frame.index])
|
| 29 |
+
tm.assert_index_equal(with_prefix.index, expected)
|
| 30 |
+
|
| 31 |
+
with_prefix = float_frame.add_prefix("foo#", axis=1)
|
| 32 |
+
expected = Index([f"foo#{c}" for c in float_frame.columns])
|
| 33 |
+
tm.assert_index_equal(with_prefix.columns, expected)
|
| 34 |
+
|
| 35 |
+
with_pct_suffix = float_frame.add_suffix("#foo", axis=0)
|
| 36 |
+
expected = Index([f"{c}#foo" for c in float_frame.index])
|
| 37 |
+
tm.assert_index_equal(with_pct_suffix.index, expected)
|
| 38 |
+
|
| 39 |
+
with_pct_suffix = float_frame.add_suffix("#foo", axis=1)
|
| 40 |
+
expected = Index([f"{c}#foo" for c in float_frame.columns])
|
| 41 |
+
tm.assert_index_equal(with_pct_suffix.columns, expected)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def test_add_prefix_suffix_invalid_axis(float_frame):
|
| 45 |
+
with pytest.raises(ValueError, match="No axis named 2 for object type DataFrame"):
|
| 46 |
+
float_frame.add_prefix("foo#", axis=2)
|
| 47 |
+
|
| 48 |
+
with pytest.raises(ValueError, match="No axis named 2 for object type DataFrame"):
|
| 49 |
+
float_frame.add_suffix("foo#", axis=2)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_align.py
ADDED
|
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import timezone
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from pandas import (
|
| 8 |
+
DataFrame,
|
| 9 |
+
Index,
|
| 10 |
+
Series,
|
| 11 |
+
date_range,
|
| 12 |
+
)
|
| 13 |
+
import pandas._testing as tm
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TestDataFrameAlign:
|
| 17 |
+
def test_frame_align_aware(self):
|
| 18 |
+
idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern")
|
| 19 |
+
idx2 = date_range("2001", periods=5, freq="2H", tz="US/Eastern")
|
| 20 |
+
df1 = DataFrame(np.random.randn(len(idx1), 3), idx1)
|
| 21 |
+
df2 = DataFrame(np.random.randn(len(idx2), 3), idx2)
|
| 22 |
+
new1, new2 = df1.align(df2)
|
| 23 |
+
assert df1.index.tz == new1.index.tz
|
| 24 |
+
assert df2.index.tz == new2.index.tz
|
| 25 |
+
|
| 26 |
+
# different timezones convert to UTC
|
| 27 |
+
|
| 28 |
+
# frame with frame
|
| 29 |
+
df1_central = df1.tz_convert("US/Central")
|
| 30 |
+
new1, new2 = df1.align(df1_central)
|
| 31 |
+
assert new1.index.tz is timezone.utc
|
| 32 |
+
assert new2.index.tz is timezone.utc
|
| 33 |
+
|
| 34 |
+
# frame with Series
|
| 35 |
+
new1, new2 = df1.align(df1_central[0], axis=0)
|
| 36 |
+
assert new1.index.tz is timezone.utc
|
| 37 |
+
assert new2.index.tz is timezone.utc
|
| 38 |
+
|
| 39 |
+
df1[0].align(df1_central, axis=0)
|
| 40 |
+
assert new1.index.tz is timezone.utc
|
| 41 |
+
assert new2.index.tz is timezone.utc
|
| 42 |
+
|
| 43 |
+
def test_align_float(self, float_frame, using_copy_on_write):
|
| 44 |
+
af, bf = float_frame.align(float_frame)
|
| 45 |
+
assert af._mgr is not float_frame._mgr
|
| 46 |
+
|
| 47 |
+
af, bf = float_frame.align(float_frame, copy=False)
|
| 48 |
+
if not using_copy_on_write:
|
| 49 |
+
assert af._mgr is float_frame._mgr
|
| 50 |
+
else:
|
| 51 |
+
assert af._mgr is not float_frame._mgr
|
| 52 |
+
|
| 53 |
+
# axis = 0
|
| 54 |
+
other = float_frame.iloc[:-5, :3]
|
| 55 |
+
af, bf = float_frame.align(other, axis=0, fill_value=-1)
|
| 56 |
+
|
| 57 |
+
tm.assert_index_equal(bf.columns, other.columns)
|
| 58 |
+
|
| 59 |
+
# test fill value
|
| 60 |
+
join_idx = float_frame.index.join(other.index)
|
| 61 |
+
diff_a = float_frame.index.difference(join_idx)
|
| 62 |
+
diff_a_vals = af.reindex(diff_a).values
|
| 63 |
+
assert (diff_a_vals == -1).all()
|
| 64 |
+
|
| 65 |
+
af, bf = float_frame.align(other, join="right", axis=0)
|
| 66 |
+
tm.assert_index_equal(bf.columns, other.columns)
|
| 67 |
+
tm.assert_index_equal(bf.index, other.index)
|
| 68 |
+
tm.assert_index_equal(af.index, other.index)
|
| 69 |
+
|
| 70 |
+
# axis = 1
|
| 71 |
+
other = float_frame.iloc[:-5, :3].copy()
|
| 72 |
+
af, bf = float_frame.align(other, axis=1)
|
| 73 |
+
tm.assert_index_equal(bf.columns, float_frame.columns)
|
| 74 |
+
tm.assert_index_equal(bf.index, other.index)
|
| 75 |
+
|
| 76 |
+
# test fill value
|
| 77 |
+
join_idx = float_frame.index.join(other.index)
|
| 78 |
+
diff_a = float_frame.index.difference(join_idx)
|
| 79 |
+
diff_a_vals = af.reindex(diff_a).values
|
| 80 |
+
|
| 81 |
+
assert (diff_a_vals == -1).all()
|
| 82 |
+
|
| 83 |
+
af, bf = float_frame.align(other, join="inner", axis=1)
|
| 84 |
+
tm.assert_index_equal(bf.columns, other.columns)
|
| 85 |
+
|
| 86 |
+
af, bf = float_frame.align(other, join="inner", axis=1, method="pad")
|
| 87 |
+
tm.assert_index_equal(bf.columns, other.columns)
|
| 88 |
+
|
| 89 |
+
af, bf = float_frame.align(
|
| 90 |
+
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=None
|
| 91 |
+
)
|
| 92 |
+
tm.assert_index_equal(bf.index, Index([]))
|
| 93 |
+
|
| 94 |
+
af, bf = float_frame.align(
|
| 95 |
+
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
|
| 96 |
+
)
|
| 97 |
+
tm.assert_index_equal(bf.index, Index([]))
|
| 98 |
+
|
| 99 |
+
# Try to align DataFrame to Series along bad axis
|
| 100 |
+
msg = "No axis named 2 for object type DataFrame"
|
| 101 |
+
with pytest.raises(ValueError, match=msg):
|
| 102 |
+
float_frame.align(af.iloc[0, :3], join="inner", axis=2)
|
| 103 |
+
|
| 104 |
+
# align dataframe to series with broadcast or not
|
| 105 |
+
idx = float_frame.index
|
| 106 |
+
s = Series(range(len(idx)), index=idx)
|
| 107 |
+
|
| 108 |
+
left, right = float_frame.align(s, axis=0)
|
| 109 |
+
tm.assert_index_equal(left.index, float_frame.index)
|
| 110 |
+
tm.assert_index_equal(right.index, float_frame.index)
|
| 111 |
+
assert isinstance(right, Series)
|
| 112 |
+
|
| 113 |
+
left, right = float_frame.align(s, broadcast_axis=1)
|
| 114 |
+
tm.assert_index_equal(left.index, float_frame.index)
|
| 115 |
+
expected = {c: s for c in float_frame.columns}
|
| 116 |
+
expected = DataFrame(
|
| 117 |
+
expected, index=float_frame.index, columns=float_frame.columns
|
| 118 |
+
)
|
| 119 |
+
tm.assert_frame_equal(right, expected)
|
| 120 |
+
|
| 121 |
+
# see gh-9558
|
| 122 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
| 123 |
+
result = df[df["a"] == 2]
|
| 124 |
+
expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"])
|
| 125 |
+
tm.assert_frame_equal(result, expected)
|
| 126 |
+
|
| 127 |
+
result = df.where(df["a"] == 2, 0)
|
| 128 |
+
expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]})
|
| 129 |
+
tm.assert_frame_equal(result, expected)
|
| 130 |
+
|
| 131 |
+
def test_align_int(self, int_frame):
|
| 132 |
+
# test other non-float types
|
| 133 |
+
other = DataFrame(index=range(5), columns=["A", "B", "C"])
|
| 134 |
+
|
| 135 |
+
af, bf = int_frame.align(other, join="inner", axis=1, method="pad")
|
| 136 |
+
tm.assert_index_equal(bf.columns, other.columns)
|
| 137 |
+
|
| 138 |
+
def test_align_mixed_type(self, float_string_frame):
|
| 139 |
+
af, bf = float_string_frame.align(
|
| 140 |
+
float_string_frame, join="inner", axis=1, method="pad"
|
| 141 |
+
)
|
| 142 |
+
tm.assert_index_equal(bf.columns, float_string_frame.columns)
|
| 143 |
+
|
| 144 |
+
def test_align_mixed_float(self, mixed_float_frame):
|
| 145 |
+
# mixed floats/ints
|
| 146 |
+
other = DataFrame(index=range(5), columns=["A", "B", "C"])
|
| 147 |
+
|
| 148 |
+
af, bf = mixed_float_frame.align(
|
| 149 |
+
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
|
| 150 |
+
)
|
| 151 |
+
tm.assert_index_equal(bf.index, Index([]))
|
| 152 |
+
|
| 153 |
+
def test_align_mixed_int(self, mixed_int_frame):
|
| 154 |
+
other = DataFrame(index=range(5), columns=["A", "B", "C"])
|
| 155 |
+
|
| 156 |
+
af, bf = mixed_int_frame.align(
|
| 157 |
+
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
|
| 158 |
+
)
|
| 159 |
+
tm.assert_index_equal(bf.index, Index([]))
|
| 160 |
+
|
| 161 |
+
@pytest.mark.parametrize(
|
| 162 |
+
"l_ordered,r_ordered,expected",
|
| 163 |
+
[
|
| 164 |
+
[True, True, pd.CategoricalIndex],
|
| 165 |
+
[True, False, Index],
|
| 166 |
+
[False, True, Index],
|
| 167 |
+
[False, False, pd.CategoricalIndex],
|
| 168 |
+
],
|
| 169 |
+
)
|
| 170 |
+
def test_align_categorical(self, l_ordered, r_ordered, expected):
|
| 171 |
+
# GH-28397
|
| 172 |
+
df_1 = DataFrame(
|
| 173 |
+
{
|
| 174 |
+
"A": np.arange(6, dtype="int64"),
|
| 175 |
+
"B": Series(list("aabbca")).astype(
|
| 176 |
+
pd.CategoricalDtype(list("cab"), ordered=l_ordered)
|
| 177 |
+
),
|
| 178 |
+
}
|
| 179 |
+
).set_index("B")
|
| 180 |
+
df_2 = DataFrame(
|
| 181 |
+
{
|
| 182 |
+
"A": np.arange(5, dtype="int64"),
|
| 183 |
+
"B": Series(list("babca")).astype(
|
| 184 |
+
pd.CategoricalDtype(list("cab"), ordered=r_ordered)
|
| 185 |
+
),
|
| 186 |
+
}
|
| 187 |
+
).set_index("B")
|
| 188 |
+
|
| 189 |
+
aligned_1, aligned_2 = df_1.align(df_2)
|
| 190 |
+
assert isinstance(aligned_1.index, expected)
|
| 191 |
+
assert isinstance(aligned_2.index, expected)
|
| 192 |
+
tm.assert_index_equal(aligned_1.index, aligned_2.index)
|
| 193 |
+
|
| 194 |
+
def test_align_multiindex(self):
|
| 195 |
+
# GH#10665
|
| 196 |
+
# same test cases as test_align_multiindex in test_series.py
|
| 197 |
+
|
| 198 |
+
midx = pd.MultiIndex.from_product(
|
| 199 |
+
[range(2), range(3), range(2)], names=("a", "b", "c")
|
| 200 |
+
)
|
| 201 |
+
idx = Index(range(2), name="b")
|
| 202 |
+
df1 = DataFrame(np.arange(12, dtype="int64"), index=midx)
|
| 203 |
+
df2 = DataFrame(np.arange(2, dtype="int64"), index=idx)
|
| 204 |
+
|
| 205 |
+
# these must be the same results (but flipped)
|
| 206 |
+
res1l, res1r = df1.align(df2, join="left")
|
| 207 |
+
res2l, res2r = df2.align(df1, join="right")
|
| 208 |
+
|
| 209 |
+
expl = df1
|
| 210 |
+
tm.assert_frame_equal(expl, res1l)
|
| 211 |
+
tm.assert_frame_equal(expl, res2r)
|
| 212 |
+
expr = DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
|
| 213 |
+
tm.assert_frame_equal(expr, res1r)
|
| 214 |
+
tm.assert_frame_equal(expr, res2l)
|
| 215 |
+
|
| 216 |
+
res1l, res1r = df1.align(df2, join="right")
|
| 217 |
+
res2l, res2r = df2.align(df1, join="left")
|
| 218 |
+
|
| 219 |
+
exp_idx = pd.MultiIndex.from_product(
|
| 220 |
+
[range(2), range(2), range(2)], names=("a", "b", "c")
|
| 221 |
+
)
|
| 222 |
+
expl = DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
|
| 223 |
+
tm.assert_frame_equal(expl, res1l)
|
| 224 |
+
tm.assert_frame_equal(expl, res2r)
|
| 225 |
+
expr = DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
|
| 226 |
+
tm.assert_frame_equal(expr, res1r)
|
| 227 |
+
tm.assert_frame_equal(expr, res2l)
|
| 228 |
+
|
| 229 |
+
def test_align_series_combinations(self):
|
| 230 |
+
df = DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
|
| 231 |
+
s = Series([1, 2, 4], index=list("ABD"), name="x")
|
| 232 |
+
|
| 233 |
+
# frame + series
|
| 234 |
+
res1, res2 = df.align(s, axis=0)
|
| 235 |
+
exp1 = DataFrame(
|
| 236 |
+
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
|
| 237 |
+
index=list("ABCDE"),
|
| 238 |
+
)
|
| 239 |
+
exp2 = Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x")
|
| 240 |
+
|
| 241 |
+
tm.assert_frame_equal(res1, exp1)
|
| 242 |
+
tm.assert_series_equal(res2, exp2)
|
| 243 |
+
|
| 244 |
+
# series + frame
|
| 245 |
+
res1, res2 = s.align(df)
|
| 246 |
+
tm.assert_series_equal(res1, exp2)
|
| 247 |
+
tm.assert_frame_equal(res2, exp1)
|
| 248 |
+
|
| 249 |
+
def test_multiindex_align_to_series_with_common_index_level(self):
|
| 250 |
+
# GH-46001
|
| 251 |
+
foo_index = Index([1, 2, 3], name="foo")
|
| 252 |
+
bar_index = Index([1, 2], name="bar")
|
| 253 |
+
|
| 254 |
+
series = Series([1, 2], index=bar_index, name="foo_series")
|
| 255 |
+
df = DataFrame(
|
| 256 |
+
{"col": np.arange(6)},
|
| 257 |
+
index=pd.MultiIndex.from_product([foo_index, bar_index]),
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
expected_r = Series([1, 2] * 3, index=df.index, name="foo_series")
|
| 261 |
+
result_l, result_r = df.align(series, axis=0)
|
| 262 |
+
|
| 263 |
+
tm.assert_frame_equal(result_l, df)
|
| 264 |
+
tm.assert_series_equal(result_r, expected_r)
|
| 265 |
+
|
| 266 |
+
def test_multiindex_align_to_series_with_common_index_level_missing_in_left(self):
|
| 267 |
+
# GH-46001
|
| 268 |
+
foo_index = Index([1, 2, 3], name="foo")
|
| 269 |
+
bar_index = Index([1, 2], name="bar")
|
| 270 |
+
|
| 271 |
+
series = Series(
|
| 272 |
+
[1, 2, 3, 4], index=Index([1, 2, 3, 4], name="bar"), name="foo_series"
|
| 273 |
+
)
|
| 274 |
+
df = DataFrame(
|
| 275 |
+
{"col": np.arange(6)},
|
| 276 |
+
index=pd.MultiIndex.from_product([foo_index, bar_index]),
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
expected_r = Series([1, 2] * 3, index=df.index, name="foo_series")
|
| 280 |
+
result_l, result_r = df.align(series, axis=0)
|
| 281 |
+
|
| 282 |
+
tm.assert_frame_equal(result_l, df)
|
| 283 |
+
tm.assert_series_equal(result_r, expected_r)
|
| 284 |
+
|
| 285 |
+
def test_multiindex_align_to_series_with_common_index_level_missing_in_right(self):
|
| 286 |
+
# GH-46001
|
| 287 |
+
foo_index = Index([1, 2, 3], name="foo")
|
| 288 |
+
bar_index = Index([1, 2, 3, 4], name="bar")
|
| 289 |
+
|
| 290 |
+
series = Series([1, 2], index=Index([1, 2], name="bar"), name="foo_series")
|
| 291 |
+
df = DataFrame(
|
| 292 |
+
{"col": np.arange(12)},
|
| 293 |
+
index=pd.MultiIndex.from_product([foo_index, bar_index]),
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
expected_r = Series(
|
| 297 |
+
[1, 2, np.nan, np.nan] * 3, index=df.index, name="foo_series"
|
| 298 |
+
)
|
| 299 |
+
result_l, result_r = df.align(series, axis=0)
|
| 300 |
+
|
| 301 |
+
tm.assert_frame_equal(result_l, df)
|
| 302 |
+
tm.assert_series_equal(result_r, expected_r)
|
| 303 |
+
|
| 304 |
+
def test_multiindex_align_to_series_with_common_index_level_missing_in_both(self):
|
| 305 |
+
# GH-46001
|
| 306 |
+
foo_index = Index([1, 2, 3], name="foo")
|
| 307 |
+
bar_index = Index([1, 3, 4], name="bar")
|
| 308 |
+
|
| 309 |
+
series = Series(
|
| 310 |
+
[1, 2, 3], index=Index([1, 2, 4], name="bar"), name="foo_series"
|
| 311 |
+
)
|
| 312 |
+
df = DataFrame(
|
| 313 |
+
{"col": np.arange(9)},
|
| 314 |
+
index=pd.MultiIndex.from_product([foo_index, bar_index]),
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
expected_r = Series([1, np.nan, 3] * 3, index=df.index, name="foo_series")
|
| 318 |
+
result_l, result_r = df.align(series, axis=0)
|
| 319 |
+
|
| 320 |
+
tm.assert_frame_equal(result_l, df)
|
| 321 |
+
tm.assert_series_equal(result_r, expected_r)
|
| 322 |
+
|
| 323 |
+
def test_multiindex_align_to_series_with_common_index_level_non_unique_cols(self):
|
| 324 |
+
# GH-46001
|
| 325 |
+
foo_index = Index([1, 2, 3], name="foo")
|
| 326 |
+
bar_index = Index([1, 2], name="bar")
|
| 327 |
+
|
| 328 |
+
series = Series([1, 2], index=bar_index, name="foo_series")
|
| 329 |
+
df = DataFrame(
|
| 330 |
+
np.arange(18).reshape(6, 3),
|
| 331 |
+
index=pd.MultiIndex.from_product([foo_index, bar_index]),
|
| 332 |
+
)
|
| 333 |
+
df.columns = ["cfoo", "cbar", "cfoo"]
|
| 334 |
+
|
| 335 |
+
expected = Series([1, 2] * 3, index=df.index, name="foo_series")
|
| 336 |
+
result_left, result_right = df.align(series, axis=0)
|
| 337 |
+
|
| 338 |
+
tm.assert_series_equal(result_right, expected)
|
| 339 |
+
tm.assert_index_equal(result_left.columns, df.columns)
|
| 340 |
+
|
| 341 |
+
def test_missing_axis_specification_exception(self):
|
| 342 |
+
df = DataFrame(np.arange(50).reshape((10, 5)))
|
| 343 |
+
series = Series(np.arange(5))
|
| 344 |
+
|
| 345 |
+
with pytest.raises(ValueError, match=r"axis=0 or 1"):
|
| 346 |
+
df.align(series)
|
| 347 |
+
|
| 348 |
+
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
|
| 349 |
+
aa, ab = a.align(
|
| 350 |
+
b, axis=axis, join=how, method=method, limit=limit, fill_axis=fill_axis
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
join_index, join_columns = None, None
|
| 354 |
+
|
| 355 |
+
ea, eb = a, b
|
| 356 |
+
if axis is None or axis == 0:
|
| 357 |
+
join_index = a.index.join(b.index, how=how)
|
| 358 |
+
ea = ea.reindex(index=join_index)
|
| 359 |
+
eb = eb.reindex(index=join_index)
|
| 360 |
+
|
| 361 |
+
if axis is None or axis == 1:
|
| 362 |
+
join_columns = a.columns.join(b.columns, how=how)
|
| 363 |
+
ea = ea.reindex(columns=join_columns)
|
| 364 |
+
eb = eb.reindex(columns=join_columns)
|
| 365 |
+
|
| 366 |
+
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
|
| 367 |
+
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
|
| 368 |
+
|
| 369 |
+
tm.assert_frame_equal(aa, ea)
|
| 370 |
+
tm.assert_frame_equal(ab, eb)
|
| 371 |
+
|
| 372 |
+
@pytest.mark.parametrize("meth", ["pad", "bfill"])
|
| 373 |
+
@pytest.mark.parametrize("ax", [0, 1, None])
|
| 374 |
+
@pytest.mark.parametrize("fax", [0, 1])
|
| 375 |
+
@pytest.mark.parametrize("how", ["inner", "outer", "left", "right"])
|
| 376 |
+
def test_align_fill_method(self, how, meth, ax, fax, float_frame):
|
| 377 |
+
df = float_frame
|
| 378 |
+
self._check_align_fill(df, how, meth, ax, fax)
|
| 379 |
+
|
| 380 |
+
def _check_align_fill(self, frame, kind, meth, ax, fax):
|
| 381 |
+
left = frame.iloc[0:4, :10]
|
| 382 |
+
right = frame.iloc[2:, 6:]
|
| 383 |
+
empty = frame.iloc[:0, :0]
|
| 384 |
+
|
| 385 |
+
self._check_align(left, right, axis=ax, fill_axis=fax, how=kind, method=meth)
|
| 386 |
+
self._check_align(
|
| 387 |
+
left, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
# empty left
|
| 391 |
+
self._check_align(empty, right, axis=ax, fill_axis=fax, how=kind, method=meth)
|
| 392 |
+
self._check_align(
|
| 393 |
+
empty, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
# empty right
|
| 397 |
+
self._check_align(left, empty, axis=ax, fill_axis=fax, how=kind, method=meth)
|
| 398 |
+
self._check_align(
|
| 399 |
+
left, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
# both empty
|
| 403 |
+
self._check_align(empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth)
|
| 404 |
+
self._check_align(
|
| 405 |
+
empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
def test_align_series_check_copy(self):
|
| 409 |
+
# GH#
|
| 410 |
+
df = DataFrame({0: [1, 2]})
|
| 411 |
+
ser = Series([1], name=0)
|
| 412 |
+
expected = ser.copy()
|
| 413 |
+
result, other = df.align(ser, axis=1)
|
| 414 |
+
ser.iloc[0] = 100
|
| 415 |
+
tm.assert_series_equal(other, expected)
|
| 416 |
+
|
| 417 |
+
def test_align_identical_different_object(self):
|
| 418 |
+
# GH#51032
|
| 419 |
+
df = DataFrame({"a": [1, 2]})
|
| 420 |
+
ser = Series([3, 4])
|
| 421 |
+
result, result2 = df.align(ser, axis=0)
|
| 422 |
+
tm.assert_frame_equal(result, df)
|
| 423 |
+
tm.assert_series_equal(result2, ser)
|
| 424 |
+
assert df is not result
|
| 425 |
+
assert ser is not result2
|
| 426 |
+
|
| 427 |
+
def test_align_identical_different_object_columns(self):
|
| 428 |
+
# GH#51032
|
| 429 |
+
df = DataFrame({"a": [1, 2]})
|
| 430 |
+
ser = Series([1], index=["a"])
|
| 431 |
+
result, result2 = df.align(ser, axis=1)
|
| 432 |
+
tm.assert_frame_equal(result, df)
|
| 433 |
+
tm.assert_series_equal(result2, ser)
|
| 434 |
+
assert df is not result
|
| 435 |
+
assert ser is not result2
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_asfreq.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
DataFrame,
|
| 8 |
+
DatetimeIndex,
|
| 9 |
+
Series,
|
| 10 |
+
date_range,
|
| 11 |
+
period_range,
|
| 12 |
+
to_datetime,
|
| 13 |
+
)
|
| 14 |
+
import pandas._testing as tm
|
| 15 |
+
|
| 16 |
+
from pandas.tseries import offsets
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TestAsFreq:
|
| 20 |
+
@pytest.fixture(params=["s", "ms", "us", "ns"])
|
| 21 |
+
def unit(self, request):
|
| 22 |
+
return request.param
|
| 23 |
+
|
| 24 |
+
def test_asfreq2(self, frame_or_series):
|
| 25 |
+
ts = frame_or_series(
|
| 26 |
+
[0.0, 1.0, 2.0],
|
| 27 |
+
index=DatetimeIndex(
|
| 28 |
+
[
|
| 29 |
+
datetime(2009, 10, 30),
|
| 30 |
+
datetime(2009, 11, 30),
|
| 31 |
+
datetime(2009, 12, 31),
|
| 32 |
+
],
|
| 33 |
+
freq="BM",
|
| 34 |
+
),
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
daily_ts = ts.asfreq("B")
|
| 38 |
+
monthly_ts = daily_ts.asfreq("BM")
|
| 39 |
+
tm.assert_equal(monthly_ts, ts)
|
| 40 |
+
|
| 41 |
+
daily_ts = ts.asfreq("B", method="pad")
|
| 42 |
+
monthly_ts = daily_ts.asfreq("BM")
|
| 43 |
+
tm.assert_equal(monthly_ts, ts)
|
| 44 |
+
|
| 45 |
+
daily_ts = ts.asfreq(offsets.BDay())
|
| 46 |
+
monthly_ts = daily_ts.asfreq(offsets.BMonthEnd())
|
| 47 |
+
tm.assert_equal(monthly_ts, ts)
|
| 48 |
+
|
| 49 |
+
result = ts[:0].asfreq("M")
|
| 50 |
+
assert len(result) == 0
|
| 51 |
+
assert result is not ts
|
| 52 |
+
|
| 53 |
+
if frame_or_series is Series:
|
| 54 |
+
daily_ts = ts.asfreq("D", fill_value=-1)
|
| 55 |
+
result = daily_ts.value_counts().sort_index()
|
| 56 |
+
expected = Series(
|
| 57 |
+
[60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0], name="count"
|
| 58 |
+
).sort_index()
|
| 59 |
+
tm.assert_series_equal(result, expected)
|
| 60 |
+
|
| 61 |
+
def test_asfreq_datetimeindex_empty(self, frame_or_series):
|
| 62 |
+
# GH#14320
|
| 63 |
+
index = DatetimeIndex(["2016-09-29 11:00"])
|
| 64 |
+
expected = frame_or_series(index=index, dtype=object).asfreq("H")
|
| 65 |
+
result = frame_or_series([3], index=index.copy()).asfreq("H")
|
| 66 |
+
tm.assert_index_equal(expected.index, result.index)
|
| 67 |
+
|
| 68 |
+
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
|
| 69 |
+
def test_tz_aware_asfreq_smoke(self, tz, frame_or_series):
|
| 70 |
+
dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz)
|
| 71 |
+
|
| 72 |
+
obj = frame_or_series(np.random.randn(len(dr)), index=dr)
|
| 73 |
+
|
| 74 |
+
# it works!
|
| 75 |
+
obj.asfreq("T")
|
| 76 |
+
|
| 77 |
+
def test_asfreq_normalize(self, frame_or_series):
|
| 78 |
+
rng = date_range("1/1/2000 09:30", periods=20)
|
| 79 |
+
norm = date_range("1/1/2000", periods=20)
|
| 80 |
+
|
| 81 |
+
vals = np.random.randn(20, 3)
|
| 82 |
+
|
| 83 |
+
obj = DataFrame(vals, index=rng)
|
| 84 |
+
expected = DataFrame(vals, index=norm)
|
| 85 |
+
if frame_or_series is Series:
|
| 86 |
+
obj = obj[0]
|
| 87 |
+
expected = expected[0]
|
| 88 |
+
|
| 89 |
+
result = obj.asfreq("D", normalize=True)
|
| 90 |
+
tm.assert_equal(result, expected)
|
| 91 |
+
|
| 92 |
+
def test_asfreq_keep_index_name(self, frame_or_series):
|
| 93 |
+
# GH#9854
|
| 94 |
+
index_name = "bar"
|
| 95 |
+
index = date_range("20130101", periods=20, name=index_name)
|
| 96 |
+
obj = DataFrame(list(range(20)), columns=["foo"], index=index)
|
| 97 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 98 |
+
|
| 99 |
+
assert index_name == obj.index.name
|
| 100 |
+
assert index_name == obj.asfreq("10D").index.name
|
| 101 |
+
|
| 102 |
+
def test_asfreq_ts(self, frame_or_series):
|
| 103 |
+
index = period_range(freq="A", start="1/1/2001", end="12/31/2010")
|
| 104 |
+
obj = DataFrame(np.random.randn(len(index), 3), index=index)
|
| 105 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 106 |
+
|
| 107 |
+
result = obj.asfreq("D", how="end")
|
| 108 |
+
exp_index = index.asfreq("D", how="end")
|
| 109 |
+
assert len(result) == len(obj)
|
| 110 |
+
tm.assert_index_equal(result.index, exp_index)
|
| 111 |
+
|
| 112 |
+
result = obj.asfreq("D", how="start")
|
| 113 |
+
exp_index = index.asfreq("D", how="start")
|
| 114 |
+
assert len(result) == len(obj)
|
| 115 |
+
tm.assert_index_equal(result.index, exp_index)
|
| 116 |
+
|
| 117 |
+
def test_asfreq_resample_set_correct_freq(self, frame_or_series):
|
| 118 |
+
# GH#5613
|
| 119 |
+
# we test if .asfreq() and .resample() set the correct value for .freq
|
| 120 |
+
dti = to_datetime(["2012-01-01", "2012-01-02", "2012-01-03"])
|
| 121 |
+
obj = DataFrame({"col": [1, 2, 3]}, index=dti)
|
| 122 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 123 |
+
|
| 124 |
+
# testing the settings before calling .asfreq() and .resample()
|
| 125 |
+
assert obj.index.freq is None
|
| 126 |
+
assert obj.index.inferred_freq == "D"
|
| 127 |
+
|
| 128 |
+
# does .asfreq() set .freq correctly?
|
| 129 |
+
assert obj.asfreq("D").index.freq == "D"
|
| 130 |
+
|
| 131 |
+
# does .resample() set .freq correctly?
|
| 132 |
+
assert obj.resample("D").asfreq().index.freq == "D"
|
| 133 |
+
|
| 134 |
+
def test_asfreq_empty(self, datetime_frame):
|
| 135 |
+
# test does not blow up on length-0 DataFrame
|
| 136 |
+
zero_length = datetime_frame.reindex([])
|
| 137 |
+
result = zero_length.asfreq("BM")
|
| 138 |
+
assert result is not zero_length
|
| 139 |
+
|
| 140 |
+
def test_asfreq(self, datetime_frame):
|
| 141 |
+
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
|
| 142 |
+
rule_monthly = datetime_frame.asfreq("BM")
|
| 143 |
+
|
| 144 |
+
tm.assert_frame_equal(offset_monthly, rule_monthly)
|
| 145 |
+
|
| 146 |
+
filled = rule_monthly.asfreq("B", method="pad") # noqa
|
| 147 |
+
# TODO: actually check that this worked.
|
| 148 |
+
|
| 149 |
+
# don't forget!
|
| 150 |
+
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
|
| 151 |
+
|
| 152 |
+
def test_asfreq_datetimeindex(self):
|
| 153 |
+
df = DataFrame(
|
| 154 |
+
{"A": [1, 2, 3]},
|
| 155 |
+
index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],
|
| 156 |
+
)
|
| 157 |
+
df = df.asfreq("B")
|
| 158 |
+
assert isinstance(df.index, DatetimeIndex)
|
| 159 |
+
|
| 160 |
+
ts = df["A"].asfreq("B")
|
| 161 |
+
assert isinstance(ts.index, DatetimeIndex)
|
| 162 |
+
|
| 163 |
+
def test_asfreq_fillvalue(self):
|
| 164 |
+
# test for fill value during upsampling, related to issue 3715
|
| 165 |
+
|
| 166 |
+
# setup
|
| 167 |
+
rng = date_range("1/1/2016", periods=10, freq="2S")
|
| 168 |
+
# Explicit cast to 'float' to avoid implicit cast when setting None
|
| 169 |
+
ts = Series(np.arange(len(rng)), index=rng, dtype="float")
|
| 170 |
+
df = DataFrame({"one": ts})
|
| 171 |
+
|
| 172 |
+
# insert pre-existing missing value
|
| 173 |
+
df.loc["2016-01-01 00:00:08", "one"] = None
|
| 174 |
+
|
| 175 |
+
actual_df = df.asfreq(freq="1S", fill_value=9.0)
|
| 176 |
+
expected_df = df.asfreq(freq="1S").fillna(9.0)
|
| 177 |
+
expected_df.loc["2016-01-01 00:00:08", "one"] = None
|
| 178 |
+
tm.assert_frame_equal(expected_df, actual_df)
|
| 179 |
+
|
| 180 |
+
expected_series = ts.asfreq(freq="1S").fillna(9.0)
|
| 181 |
+
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
|
| 182 |
+
tm.assert_series_equal(expected_series, actual_series)
|
| 183 |
+
|
| 184 |
+
def test_asfreq_with_date_object_index(self, frame_or_series):
|
| 185 |
+
rng = date_range("1/1/2000", periods=20)
|
| 186 |
+
ts = frame_or_series(np.random.randn(20), index=rng)
|
| 187 |
+
|
| 188 |
+
ts2 = ts.copy()
|
| 189 |
+
ts2.index = [x.date() for x in ts2.index]
|
| 190 |
+
|
| 191 |
+
result = ts2.asfreq("4H", method="ffill")
|
| 192 |
+
expected = ts.asfreq("4H", method="ffill")
|
| 193 |
+
tm.assert_equal(result, expected)
|
| 194 |
+
|
| 195 |
+
def test_asfreq_with_unsorted_index(self, frame_or_series):
|
| 196 |
+
# GH#39805
|
| 197 |
+
# Test that rows are not dropped when the datetime index is out of order
|
| 198 |
+
index = to_datetime(["2021-01-04", "2021-01-02", "2021-01-03", "2021-01-01"])
|
| 199 |
+
result = frame_or_series(range(4), index=index)
|
| 200 |
+
|
| 201 |
+
expected = result.reindex(sorted(index))
|
| 202 |
+
expected.index = expected.index._with_freq("infer")
|
| 203 |
+
|
| 204 |
+
result = result.asfreq("D")
|
| 205 |
+
tm.assert_equal(result, expected)
|
| 206 |
+
|
| 207 |
+
def test_asfreq_after_normalize(self, unit):
|
| 208 |
+
# https://github.com/pandas-dev/pandas/issues/50727
|
| 209 |
+
result = DatetimeIndex(
|
| 210 |
+
date_range("2000", periods=2).as_unit(unit).normalize(), freq="D"
|
| 211 |
+
)
|
| 212 |
+
expected = DatetimeIndex(["2000-01-01", "2000-01-02"], freq="D").as_unit(unit)
|
| 213 |
+
tm.assert_index_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_asof.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas._libs.tslibs import IncompatibleFrequency
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
DataFrame,
|
| 8 |
+
Period,
|
| 9 |
+
Series,
|
| 10 |
+
Timestamp,
|
| 11 |
+
date_range,
|
| 12 |
+
period_range,
|
| 13 |
+
to_datetime,
|
| 14 |
+
)
|
| 15 |
+
import pandas._testing as tm
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@pytest.fixture
|
| 19 |
+
def date_range_frame():
|
| 20 |
+
"""
|
| 21 |
+
Fixture for DataFrame of ints with date_range index
|
| 22 |
+
|
| 23 |
+
Columns are ['A', 'B'].
|
| 24 |
+
"""
|
| 25 |
+
N = 50
|
| 26 |
+
rng = date_range("1/1/1990", periods=N, freq="53s")
|
| 27 |
+
return DataFrame({"A": np.arange(N), "B": np.arange(N)}, index=rng)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class TestFrameAsof:
|
| 31 |
+
def test_basic(self, date_range_frame):
|
| 32 |
+
# Explicitly cast to float to avoid implicit cast when setting np.nan
|
| 33 |
+
df = date_range_frame.astype({"A": "float"})
|
| 34 |
+
N = 50
|
| 35 |
+
df.loc[df.index[15:30], "A"] = np.nan
|
| 36 |
+
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
|
| 37 |
+
|
| 38 |
+
result = df.asof(dates)
|
| 39 |
+
assert result.notna().all(1).all()
|
| 40 |
+
lb = df.index[14]
|
| 41 |
+
ub = df.index[30]
|
| 42 |
+
|
| 43 |
+
dates = list(dates)
|
| 44 |
+
|
| 45 |
+
result = df.asof(dates)
|
| 46 |
+
assert result.notna().all(1).all()
|
| 47 |
+
|
| 48 |
+
mask = (result.index >= lb) & (result.index < ub)
|
| 49 |
+
rs = result[mask]
|
| 50 |
+
assert (rs == 14).all(1).all()
|
| 51 |
+
|
| 52 |
+
def test_subset(self, date_range_frame):
|
| 53 |
+
N = 10
|
| 54 |
+
# explicitly cast to float to avoid implicit upcast when setting to np.nan
|
| 55 |
+
df = date_range_frame.iloc[:N].copy().astype({"A": "float"})
|
| 56 |
+
df.loc[df.index[4:8], "A"] = np.nan
|
| 57 |
+
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
|
| 58 |
+
|
| 59 |
+
# with a subset of A should be the same
|
| 60 |
+
result = df.asof(dates, subset="A")
|
| 61 |
+
expected = df.asof(dates)
|
| 62 |
+
tm.assert_frame_equal(result, expected)
|
| 63 |
+
|
| 64 |
+
# same with A/B
|
| 65 |
+
result = df.asof(dates, subset=["A", "B"])
|
| 66 |
+
expected = df.asof(dates)
|
| 67 |
+
tm.assert_frame_equal(result, expected)
|
| 68 |
+
|
| 69 |
+
# B gives df.asof
|
| 70 |
+
result = df.asof(dates, subset="B")
|
| 71 |
+
expected = df.resample("25s", closed="right").ffill().reindex(dates)
|
| 72 |
+
expected.iloc[20:] = 9
|
| 73 |
+
# no "missing", so "B" can retain int dtype (df["A"].dtype platform-dependent)
|
| 74 |
+
expected["B"] = expected["B"].astype(df["B"].dtype)
|
| 75 |
+
|
| 76 |
+
tm.assert_frame_equal(result, expected)
|
| 77 |
+
|
| 78 |
+
def test_missing(self, date_range_frame):
|
| 79 |
+
# GH 15118
|
| 80 |
+
# no match found - `where` value before earliest date in index
|
| 81 |
+
N = 10
|
| 82 |
+
df = date_range_frame.iloc[:N].copy()
|
| 83 |
+
|
| 84 |
+
result = df.asof("1989-12-31")
|
| 85 |
+
|
| 86 |
+
expected = Series(
|
| 87 |
+
index=["A", "B"], name=Timestamp("1989-12-31"), dtype=np.float64
|
| 88 |
+
)
|
| 89 |
+
tm.assert_series_equal(result, expected)
|
| 90 |
+
|
| 91 |
+
result = df.asof(to_datetime(["1989-12-31"]))
|
| 92 |
+
expected = DataFrame(
|
| 93 |
+
index=to_datetime(["1989-12-31"]), columns=["A", "B"], dtype="float64"
|
| 94 |
+
)
|
| 95 |
+
tm.assert_frame_equal(result, expected)
|
| 96 |
+
|
| 97 |
+
# Check that we handle PeriodIndex correctly, dont end up with
|
| 98 |
+
# period.ordinal for series name
|
| 99 |
+
df = df.to_period("D")
|
| 100 |
+
result = df.asof("1989-12-31")
|
| 101 |
+
assert isinstance(result.name, Period)
|
| 102 |
+
|
| 103 |
+
def test_asof_all_nans(self, frame_or_series):
|
| 104 |
+
# GH 15713
|
| 105 |
+
# DataFrame/Series is all nans
|
| 106 |
+
result = frame_or_series([np.nan]).asof([0])
|
| 107 |
+
expected = frame_or_series([np.nan])
|
| 108 |
+
tm.assert_equal(result, expected)
|
| 109 |
+
|
| 110 |
+
def test_all_nans(self, date_range_frame):
|
| 111 |
+
# GH 15713
|
| 112 |
+
# DataFrame is all nans
|
| 113 |
+
|
| 114 |
+
# testing non-default indexes, multiple inputs
|
| 115 |
+
N = 150
|
| 116 |
+
rng = date_range_frame.index
|
| 117 |
+
dates = date_range("1/1/1990", periods=N, freq="25s")
|
| 118 |
+
result = DataFrame(np.nan, index=rng, columns=["A"]).asof(dates)
|
| 119 |
+
expected = DataFrame(np.nan, index=dates, columns=["A"])
|
| 120 |
+
tm.assert_frame_equal(result, expected)
|
| 121 |
+
|
| 122 |
+
# testing multiple columns
|
| 123 |
+
dates = date_range("1/1/1990", periods=N, freq="25s")
|
| 124 |
+
result = DataFrame(np.nan, index=rng, columns=["A", "B", "C"]).asof(dates)
|
| 125 |
+
expected = DataFrame(np.nan, index=dates, columns=["A", "B", "C"])
|
| 126 |
+
tm.assert_frame_equal(result, expected)
|
| 127 |
+
|
| 128 |
+
# testing scalar input
|
| 129 |
+
result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof([3])
|
| 130 |
+
expected = DataFrame(np.nan, index=[3], columns=["A", "B"])
|
| 131 |
+
tm.assert_frame_equal(result, expected)
|
| 132 |
+
|
| 133 |
+
result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof(3)
|
| 134 |
+
expected = Series(np.nan, index=["A", "B"], name=3)
|
| 135 |
+
tm.assert_series_equal(result, expected)
|
| 136 |
+
|
| 137 |
+
@pytest.mark.parametrize(
|
| 138 |
+
"stamp,expected",
|
| 139 |
+
[
|
| 140 |
+
(
|
| 141 |
+
Timestamp("2018-01-01 23:22:43.325+00:00"),
|
| 142 |
+
Series(2, name=Timestamp("2018-01-01 23:22:43.325+00:00")),
|
| 143 |
+
),
|
| 144 |
+
(
|
| 145 |
+
Timestamp("2018-01-01 22:33:20.682+01:00"),
|
| 146 |
+
Series(1, name=Timestamp("2018-01-01 22:33:20.682+01:00")),
|
| 147 |
+
),
|
| 148 |
+
],
|
| 149 |
+
)
|
| 150 |
+
def test_time_zone_aware_index(self, stamp, expected):
|
| 151 |
+
# GH21194
|
| 152 |
+
# Testing awareness of DataFrame index considering different
|
| 153 |
+
# UTC and timezone
|
| 154 |
+
df = DataFrame(
|
| 155 |
+
data=[1, 2],
|
| 156 |
+
index=[
|
| 157 |
+
Timestamp("2018-01-01 21:00:05.001+00:00"),
|
| 158 |
+
Timestamp("2018-01-01 22:35:10.550+00:00"),
|
| 159 |
+
],
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
result = df.asof(stamp)
|
| 163 |
+
tm.assert_series_equal(result, expected)
|
| 164 |
+
|
| 165 |
+
def test_is_copy(self, date_range_frame):
|
| 166 |
+
# GH-27357, GH-30784: ensure the result of asof is an actual copy and
|
| 167 |
+
# doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
|
| 168 |
+
df = date_range_frame.astype({"A": "float"})
|
| 169 |
+
N = 50
|
| 170 |
+
df.loc[df.index[15:30], "A"] = np.nan
|
| 171 |
+
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
|
| 172 |
+
|
| 173 |
+
result = df.asof(dates)
|
| 174 |
+
|
| 175 |
+
with tm.assert_produces_warning(None):
|
| 176 |
+
result["C"] = 1
|
| 177 |
+
|
| 178 |
+
def test_asof_periodindex_mismatched_freq(self):
|
| 179 |
+
N = 50
|
| 180 |
+
rng = period_range("1/1/1990", periods=N, freq="H")
|
| 181 |
+
df = DataFrame(np.random.randn(N), index=rng)
|
| 182 |
+
|
| 183 |
+
# Mismatched freq
|
| 184 |
+
msg = "Input has different freq"
|
| 185 |
+
with pytest.raises(IncompatibleFrequency, match=msg):
|
| 186 |
+
df.asof(rng.asfreq("D"))
|
| 187 |
+
|
| 188 |
+
def test_asof_preserves_bool_dtype(self):
|
| 189 |
+
# GH#16063 was casting bools to floats
|
| 190 |
+
dti = date_range("2017-01-01", freq="MS", periods=4)
|
| 191 |
+
ser = Series([True, False, True], index=dti[:-1])
|
| 192 |
+
|
| 193 |
+
ts = dti[-1]
|
| 194 |
+
res = ser.asof([ts])
|
| 195 |
+
|
| 196 |
+
expected = Series([True], index=[ts])
|
| 197 |
+
tm.assert_series_equal(res, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_assign.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from pandas import DataFrame
|
| 4 |
+
import pandas._testing as tm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestAssign:
|
| 8 |
+
def test_assign(self):
|
| 9 |
+
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
|
| 10 |
+
original = df.copy()
|
| 11 |
+
result = df.assign(C=df.B / df.A)
|
| 12 |
+
expected = df.copy()
|
| 13 |
+
expected["C"] = [4, 2.5, 2]
|
| 14 |
+
tm.assert_frame_equal(result, expected)
|
| 15 |
+
|
| 16 |
+
# lambda syntax
|
| 17 |
+
result = df.assign(C=lambda x: x.B / x.A)
|
| 18 |
+
tm.assert_frame_equal(result, expected)
|
| 19 |
+
|
| 20 |
+
# original is unmodified
|
| 21 |
+
tm.assert_frame_equal(df, original)
|
| 22 |
+
|
| 23 |
+
# Non-Series array-like
|
| 24 |
+
result = df.assign(C=[4, 2.5, 2])
|
| 25 |
+
tm.assert_frame_equal(result, expected)
|
| 26 |
+
# original is unmodified
|
| 27 |
+
tm.assert_frame_equal(df, original)
|
| 28 |
+
|
| 29 |
+
result = df.assign(B=df.B / df.A)
|
| 30 |
+
expected = expected.drop("B", axis=1).rename(columns={"C": "B"})
|
| 31 |
+
tm.assert_frame_equal(result, expected)
|
| 32 |
+
|
| 33 |
+
# overwrite
|
| 34 |
+
result = df.assign(A=df.A + df.B)
|
| 35 |
+
expected = df.copy()
|
| 36 |
+
expected["A"] = [5, 7, 9]
|
| 37 |
+
tm.assert_frame_equal(result, expected)
|
| 38 |
+
|
| 39 |
+
# lambda
|
| 40 |
+
result = df.assign(A=lambda x: x.A + x.B)
|
| 41 |
+
tm.assert_frame_equal(result, expected)
|
| 42 |
+
|
| 43 |
+
def test_assign_multiple(self):
|
| 44 |
+
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=["A", "B"])
|
| 45 |
+
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
|
| 46 |
+
expected = DataFrame(
|
| 47 |
+
[[1, 4, 7, 1, 4], [2, 5, 8, 2, 5], [3, 6, 9, 3, 6]], columns=list("ABCDE")
|
| 48 |
+
)
|
| 49 |
+
tm.assert_frame_equal(result, expected)
|
| 50 |
+
|
| 51 |
+
def test_assign_order(self):
|
| 52 |
+
# GH 9818
|
| 53 |
+
df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
|
| 54 |
+
result = df.assign(D=df.A + df.B, C=df.A - df.B)
|
| 55 |
+
|
| 56 |
+
expected = DataFrame([[1, 2, 3, -1], [3, 4, 7, -1]], columns=list("ABDC"))
|
| 57 |
+
tm.assert_frame_equal(result, expected)
|
| 58 |
+
result = df.assign(C=df.A - df.B, D=df.A + df.B)
|
| 59 |
+
|
| 60 |
+
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]], columns=list("ABCD"))
|
| 61 |
+
|
| 62 |
+
tm.assert_frame_equal(result, expected)
|
| 63 |
+
|
| 64 |
+
def test_assign_bad(self):
|
| 65 |
+
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
|
| 66 |
+
|
| 67 |
+
# non-keyword argument
|
| 68 |
+
msg = r"assign\(\) takes 1 positional argument but 2 were given"
|
| 69 |
+
with pytest.raises(TypeError, match=msg):
|
| 70 |
+
df.assign(lambda x: x.A)
|
| 71 |
+
msg = "'DataFrame' object has no attribute 'C'"
|
| 72 |
+
with pytest.raises(AttributeError, match=msg):
|
| 73 |
+
df.assign(C=df.A, D=df.A + df.C)
|
| 74 |
+
|
| 75 |
+
def test_assign_dependent(self):
|
| 76 |
+
df = DataFrame({"A": [1, 2], "B": [3, 4]})
|
| 77 |
+
|
| 78 |
+
result = df.assign(C=df.A, D=lambda x: x["A"] + x["C"])
|
| 79 |
+
expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]], columns=list("ABCD"))
|
| 80 |
+
tm.assert_frame_equal(result, expected)
|
| 81 |
+
|
| 82 |
+
result = df.assign(C=lambda df: df.A, D=lambda df: df["A"] + df["C"])
|
| 83 |
+
expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]], columns=list("ABCD"))
|
| 84 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_at_time.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import time
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
import pytz
|
| 6 |
+
|
| 7 |
+
from pandas._libs.tslibs import timezones
|
| 8 |
+
|
| 9 |
+
from pandas import (
|
| 10 |
+
DataFrame,
|
| 11 |
+
date_range,
|
| 12 |
+
)
|
| 13 |
+
import pandas._testing as tm
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TestAtTime:
|
| 17 |
+
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
|
| 18 |
+
def test_localized_at_time(self, tzstr, frame_or_series):
|
| 19 |
+
tz = timezones.maybe_get_tz(tzstr)
|
| 20 |
+
|
| 21 |
+
rng = date_range("4/16/2012", "5/1/2012", freq="H")
|
| 22 |
+
ts = frame_or_series(np.random.randn(len(rng)), index=rng)
|
| 23 |
+
|
| 24 |
+
ts_local = ts.tz_localize(tzstr)
|
| 25 |
+
|
| 26 |
+
result = ts_local.at_time(time(10, 0))
|
| 27 |
+
expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
|
| 28 |
+
tm.assert_equal(result, expected)
|
| 29 |
+
assert timezones.tz_compare(result.index.tz, tz)
|
| 30 |
+
|
| 31 |
+
def test_at_time(self, frame_or_series):
|
| 32 |
+
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
|
| 33 |
+
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
|
| 34 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 35 |
+
rs = ts.at_time(rng[1])
|
| 36 |
+
assert (rs.index.hour == rng[1].hour).all()
|
| 37 |
+
assert (rs.index.minute == rng[1].minute).all()
|
| 38 |
+
assert (rs.index.second == rng[1].second).all()
|
| 39 |
+
|
| 40 |
+
result = ts.at_time("9:30")
|
| 41 |
+
expected = ts.at_time(time(9, 30))
|
| 42 |
+
tm.assert_equal(result, expected)
|
| 43 |
+
|
| 44 |
+
def test_at_time_midnight(self, frame_or_series):
|
| 45 |
+
# midnight, everything
|
| 46 |
+
rng = date_range("1/1/2000", "1/31/2000")
|
| 47 |
+
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
|
| 48 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 49 |
+
|
| 50 |
+
result = ts.at_time(time(0, 0))
|
| 51 |
+
tm.assert_equal(result, ts)
|
| 52 |
+
|
| 53 |
+
def test_at_time_nonexistent(self, frame_or_series):
|
| 54 |
+
# time doesn't exist
|
| 55 |
+
rng = date_range("1/1/2012", freq="23Min", periods=384)
|
| 56 |
+
ts = DataFrame(np.random.randn(len(rng)), rng)
|
| 57 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 58 |
+
rs = ts.at_time("16:00")
|
| 59 |
+
assert len(rs) == 0
|
| 60 |
+
|
| 61 |
+
@pytest.mark.parametrize(
|
| 62 |
+
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
|
| 63 |
+
)
|
| 64 |
+
def test_at_time_errors(self, hour):
|
| 65 |
+
# GH#24043
|
| 66 |
+
dti = date_range("2018", periods=3, freq="H")
|
| 67 |
+
df = DataFrame(list(range(len(dti))), index=dti)
|
| 68 |
+
if getattr(hour, "tzinfo", None) is None:
|
| 69 |
+
result = df.at_time(hour)
|
| 70 |
+
expected = df.iloc[1:2]
|
| 71 |
+
tm.assert_frame_equal(result, expected)
|
| 72 |
+
else:
|
| 73 |
+
with pytest.raises(ValueError, match="Index must be timezone"):
|
| 74 |
+
df.at_time(hour)
|
| 75 |
+
|
| 76 |
+
def test_at_time_tz(self):
|
| 77 |
+
# GH#24043
|
| 78 |
+
dti = date_range("2018", periods=3, freq="H", tz="US/Pacific")
|
| 79 |
+
df = DataFrame(list(range(len(dti))), index=dti)
|
| 80 |
+
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
|
| 81 |
+
expected = df.iloc[1:2]
|
| 82 |
+
tm.assert_frame_equal(result, expected)
|
| 83 |
+
|
| 84 |
+
def test_at_time_raises(self, frame_or_series):
|
| 85 |
+
# GH#20725
|
| 86 |
+
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
|
| 87 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 88 |
+
msg = "Index must be DatetimeIndex"
|
| 89 |
+
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
|
| 90 |
+
obj.at_time("00:00")
|
| 91 |
+
|
| 92 |
+
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
|
| 93 |
+
def test_at_time_axis(self, axis):
|
| 94 |
+
# issue 8839
|
| 95 |
+
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
|
| 96 |
+
ts = DataFrame(np.random.randn(len(rng), len(rng)))
|
| 97 |
+
ts.index, ts.columns = rng, rng
|
| 98 |
+
|
| 99 |
+
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
|
| 100 |
+
|
| 101 |
+
if axis in ["index", 0]:
|
| 102 |
+
expected = ts.loc[indices, :]
|
| 103 |
+
elif axis in ["columns", 1]:
|
| 104 |
+
expected = ts.loc[:, indices]
|
| 105 |
+
|
| 106 |
+
result = ts.at_time("9:30", axis=axis)
|
| 107 |
+
|
| 108 |
+
# Without clearing freq, result has freq 1440T and expected 5T
|
| 109 |
+
result.index = result.index._with_freq(None)
|
| 110 |
+
expected.index = expected.index._with_freq(None)
|
| 111 |
+
tm.assert_frame_equal(result, expected)
|
| 112 |
+
|
| 113 |
+
def test_at_time_datetimeindex(self):
|
| 114 |
+
index = date_range("2012-01-01", "2012-01-05", freq="30min")
|
| 115 |
+
df = DataFrame(np.random.randn(len(index), 5), index=index)
|
| 116 |
+
akey = time(12, 0, 0)
|
| 117 |
+
ainds = [24, 72, 120, 168]
|
| 118 |
+
|
| 119 |
+
result = df.at_time(akey)
|
| 120 |
+
expected = df.loc[akey]
|
| 121 |
+
expected2 = df.iloc[ainds]
|
| 122 |
+
tm.assert_frame_equal(result, expected)
|
| 123 |
+
tm.assert_frame_equal(result, expected2)
|
| 124 |
+
assert len(result) == 4
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_between_time.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import (
|
| 2 |
+
datetime,
|
| 3 |
+
time,
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pytest
|
| 8 |
+
|
| 9 |
+
from pandas._libs.tslibs import timezones
|
| 10 |
+
import pandas.util._test_decorators as td
|
| 11 |
+
|
| 12 |
+
from pandas import (
|
| 13 |
+
DataFrame,
|
| 14 |
+
Series,
|
| 15 |
+
date_range,
|
| 16 |
+
)
|
| 17 |
+
import pandas._testing as tm
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class TestBetweenTime:
|
| 21 |
+
@td.skip_if_not_us_locale
|
| 22 |
+
def test_between_time_formats(self, frame_or_series):
|
| 23 |
+
# GH#11818
|
| 24 |
+
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
|
| 25 |
+
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
|
| 26 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 27 |
+
|
| 28 |
+
strings = [
|
| 29 |
+
("2:00", "2:30"),
|
| 30 |
+
("0200", "0230"),
|
| 31 |
+
("2:00am", "2:30am"),
|
| 32 |
+
("0200am", "0230am"),
|
| 33 |
+
("2:00:00", "2:30:00"),
|
| 34 |
+
("020000", "023000"),
|
| 35 |
+
("2:00:00am", "2:30:00am"),
|
| 36 |
+
("020000am", "023000am"),
|
| 37 |
+
]
|
| 38 |
+
expected_length = 28
|
| 39 |
+
|
| 40 |
+
for time_string in strings:
|
| 41 |
+
assert len(ts.between_time(*time_string)) == expected_length
|
| 42 |
+
|
| 43 |
+
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
|
| 44 |
+
def test_localized_between_time(self, tzstr, frame_or_series):
|
| 45 |
+
tz = timezones.maybe_get_tz(tzstr)
|
| 46 |
+
|
| 47 |
+
rng = date_range("4/16/2012", "5/1/2012", freq="H")
|
| 48 |
+
ts = Series(np.random.randn(len(rng)), index=rng)
|
| 49 |
+
if frame_or_series is DataFrame:
|
| 50 |
+
ts = ts.to_frame()
|
| 51 |
+
|
| 52 |
+
ts_local = ts.tz_localize(tzstr)
|
| 53 |
+
|
| 54 |
+
t1, t2 = time(10, 0), time(11, 0)
|
| 55 |
+
result = ts_local.between_time(t1, t2)
|
| 56 |
+
expected = ts.between_time(t1, t2).tz_localize(tzstr)
|
| 57 |
+
tm.assert_equal(result, expected)
|
| 58 |
+
assert timezones.tz_compare(result.index.tz, tz)
|
| 59 |
+
|
| 60 |
+
def test_between_time_types(self, frame_or_series):
|
| 61 |
+
# GH11818
|
| 62 |
+
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
|
| 63 |
+
obj = DataFrame({"A": 0}, index=rng)
|
| 64 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 65 |
+
|
| 66 |
+
msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time"
|
| 67 |
+
with pytest.raises(ValueError, match=msg):
|
| 68 |
+
obj.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
|
| 69 |
+
|
| 70 |
+
def test_between_time(self, inclusive_endpoints_fixture, frame_or_series):
|
| 71 |
+
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
|
| 72 |
+
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
|
| 73 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 74 |
+
|
| 75 |
+
stime = time(0, 0)
|
| 76 |
+
etime = time(1, 0)
|
| 77 |
+
inclusive = inclusive_endpoints_fixture
|
| 78 |
+
|
| 79 |
+
filtered = ts.between_time(stime, etime, inclusive=inclusive)
|
| 80 |
+
exp_len = 13 * 4 + 1
|
| 81 |
+
|
| 82 |
+
if inclusive in ["right", "neither"]:
|
| 83 |
+
exp_len -= 5
|
| 84 |
+
if inclusive in ["left", "neither"]:
|
| 85 |
+
exp_len -= 4
|
| 86 |
+
|
| 87 |
+
assert len(filtered) == exp_len
|
| 88 |
+
for rs in filtered.index:
|
| 89 |
+
t = rs.time()
|
| 90 |
+
if inclusive in ["left", "both"]:
|
| 91 |
+
assert t >= stime
|
| 92 |
+
else:
|
| 93 |
+
assert t > stime
|
| 94 |
+
|
| 95 |
+
if inclusive in ["right", "both"]:
|
| 96 |
+
assert t <= etime
|
| 97 |
+
else:
|
| 98 |
+
assert t < etime
|
| 99 |
+
|
| 100 |
+
result = ts.between_time("00:00", "01:00")
|
| 101 |
+
expected = ts.between_time(stime, etime)
|
| 102 |
+
tm.assert_equal(result, expected)
|
| 103 |
+
|
| 104 |
+
# across midnight
|
| 105 |
+
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
|
| 106 |
+
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
|
| 107 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 108 |
+
stime = time(22, 0)
|
| 109 |
+
etime = time(9, 0)
|
| 110 |
+
|
| 111 |
+
filtered = ts.between_time(stime, etime, inclusive=inclusive)
|
| 112 |
+
exp_len = (12 * 11 + 1) * 4 + 1
|
| 113 |
+
if inclusive in ["right", "neither"]:
|
| 114 |
+
exp_len -= 4
|
| 115 |
+
if inclusive in ["left", "neither"]:
|
| 116 |
+
exp_len -= 4
|
| 117 |
+
|
| 118 |
+
assert len(filtered) == exp_len
|
| 119 |
+
for rs in filtered.index:
|
| 120 |
+
t = rs.time()
|
| 121 |
+
if inclusive in ["left", "both"]:
|
| 122 |
+
assert (t >= stime) or (t <= etime)
|
| 123 |
+
else:
|
| 124 |
+
assert (t > stime) or (t <= etime)
|
| 125 |
+
|
| 126 |
+
if inclusive in ["right", "both"]:
|
| 127 |
+
assert (t <= etime) or (t >= stime)
|
| 128 |
+
else:
|
| 129 |
+
assert (t < etime) or (t >= stime)
|
| 130 |
+
|
| 131 |
+
def test_between_time_raises(self, frame_or_series):
|
| 132 |
+
# GH#20725
|
| 133 |
+
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
|
| 134 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 135 |
+
|
| 136 |
+
msg = "Index must be DatetimeIndex"
|
| 137 |
+
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
|
| 138 |
+
obj.between_time(start_time="00:00", end_time="12:00")
|
| 139 |
+
|
| 140 |
+
def test_between_time_axis(self, frame_or_series):
|
| 141 |
+
# GH#8839
|
| 142 |
+
rng = date_range("1/1/2000", periods=100, freq="10min")
|
| 143 |
+
ts = Series(np.random.randn(len(rng)), index=rng)
|
| 144 |
+
if frame_or_series is DataFrame:
|
| 145 |
+
ts = ts.to_frame()
|
| 146 |
+
|
| 147 |
+
stime, etime = ("08:00:00", "09:00:00")
|
| 148 |
+
expected_length = 7
|
| 149 |
+
|
| 150 |
+
assert len(ts.between_time(stime, etime)) == expected_length
|
| 151 |
+
assert len(ts.between_time(stime, etime, axis=0)) == expected_length
|
| 152 |
+
msg = f"No axis named {ts.ndim} for object type {type(ts).__name__}"
|
| 153 |
+
with pytest.raises(ValueError, match=msg):
|
| 154 |
+
ts.between_time(stime, etime, axis=ts.ndim)
|
| 155 |
+
|
| 156 |
+
def test_between_time_axis_aliases(self, axis):
|
| 157 |
+
# GH#8839
|
| 158 |
+
rng = date_range("1/1/2000", periods=100, freq="10min")
|
| 159 |
+
ts = DataFrame(np.random.randn(len(rng), len(rng)))
|
| 160 |
+
stime, etime = ("08:00:00", "09:00:00")
|
| 161 |
+
exp_len = 7
|
| 162 |
+
|
| 163 |
+
if axis in ["index", 0]:
|
| 164 |
+
ts.index = rng
|
| 165 |
+
assert len(ts.between_time(stime, etime)) == exp_len
|
| 166 |
+
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
|
| 167 |
+
|
| 168 |
+
if axis in ["columns", 1]:
|
| 169 |
+
ts.columns = rng
|
| 170 |
+
selected = ts.between_time(stime, etime, axis=1).columns
|
| 171 |
+
assert len(selected) == exp_len
|
| 172 |
+
|
| 173 |
+
def test_between_time_axis_raises(self, axis):
|
| 174 |
+
# issue 8839
|
| 175 |
+
rng = date_range("1/1/2000", periods=100, freq="10min")
|
| 176 |
+
mask = np.arange(0, len(rng))
|
| 177 |
+
rand_data = np.random.randn(len(rng), len(rng))
|
| 178 |
+
ts = DataFrame(rand_data, index=rng, columns=rng)
|
| 179 |
+
stime, etime = ("08:00:00", "09:00:00")
|
| 180 |
+
|
| 181 |
+
msg = "Index must be DatetimeIndex"
|
| 182 |
+
if axis in ["columns", 1]:
|
| 183 |
+
ts.index = mask
|
| 184 |
+
with pytest.raises(TypeError, match=msg):
|
| 185 |
+
ts.between_time(stime, etime)
|
| 186 |
+
with pytest.raises(TypeError, match=msg):
|
| 187 |
+
ts.between_time(stime, etime, axis=0)
|
| 188 |
+
|
| 189 |
+
if axis in ["index", 0]:
|
| 190 |
+
ts.columns = mask
|
| 191 |
+
with pytest.raises(TypeError, match=msg):
|
| 192 |
+
ts.between_time(stime, etime, axis=1)
|
| 193 |
+
|
| 194 |
+
def test_between_time_datetimeindex(self):
|
| 195 |
+
index = date_range("2012-01-01", "2012-01-05", freq="30min")
|
| 196 |
+
df = DataFrame(np.random.randn(len(index), 5), index=index)
|
| 197 |
+
bkey = slice(time(13, 0, 0), time(14, 0, 0))
|
| 198 |
+
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
|
| 199 |
+
|
| 200 |
+
result = df.between_time(bkey.start, bkey.stop)
|
| 201 |
+
expected = df.loc[bkey]
|
| 202 |
+
expected2 = df.iloc[binds]
|
| 203 |
+
tm.assert_frame_equal(result, expected)
|
| 204 |
+
tm.assert_frame_equal(result, expected2)
|
| 205 |
+
assert len(result) == 12
|
| 206 |
+
|
| 207 |
+
def test_between_time_incorrect_arg_inclusive(self):
|
| 208 |
+
# GH40245
|
| 209 |
+
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
|
| 210 |
+
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
|
| 211 |
+
|
| 212 |
+
stime = time(0, 0)
|
| 213 |
+
etime = time(1, 0)
|
| 214 |
+
inclusive = "bad_string"
|
| 215 |
+
msg = "Inclusive has to be either 'both', 'neither', 'left' or 'right'"
|
| 216 |
+
with pytest.raises(ValueError, match=msg):
|
| 217 |
+
ts.between_time(stime, etime, inclusive=inclusive)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_combine.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import pandas._testing as tm
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class TestCombine:
|
| 9 |
+
@pytest.mark.parametrize(
|
| 10 |
+
"data",
|
| 11 |
+
[
|
| 12 |
+
pd.date_range("2000", periods=4),
|
| 13 |
+
pd.date_range("2000", periods=4, tz="US/Central"),
|
| 14 |
+
pd.period_range("2000", periods=4),
|
| 15 |
+
pd.timedelta_range(0, periods=4),
|
| 16 |
+
],
|
| 17 |
+
)
|
| 18 |
+
def test_combine_datetlike_udf(self, data):
|
| 19 |
+
# GH#23079
|
| 20 |
+
df = pd.DataFrame({"A": data})
|
| 21 |
+
other = df.copy()
|
| 22 |
+
df.iloc[1, 0] = None
|
| 23 |
+
|
| 24 |
+
def combiner(a, b):
|
| 25 |
+
return b
|
| 26 |
+
|
| 27 |
+
result = df.combine(other, combiner)
|
| 28 |
+
tm.assert_frame_equal(result, other)
|
| 29 |
+
|
| 30 |
+
def test_combine_generic(self, float_frame):
|
| 31 |
+
df1 = float_frame
|
| 32 |
+
df2 = float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]]
|
| 33 |
+
|
| 34 |
+
combined = df1.combine(df2, np.add)
|
| 35 |
+
combined2 = df2.combine(df1, np.add)
|
| 36 |
+
assert combined["D"].isna().all()
|
| 37 |
+
assert combined2["D"].isna().all()
|
| 38 |
+
|
| 39 |
+
chunk = combined.loc[combined.index[:-5], ["A", "B", "C"]]
|
| 40 |
+
chunk2 = combined2.loc[combined2.index[:-5], ["A", "B", "C"]]
|
| 41 |
+
|
| 42 |
+
exp = (
|
| 43 |
+
float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]].reindex_like(chunk)
|
| 44 |
+
* 2
|
| 45 |
+
)
|
| 46 |
+
tm.assert_frame_equal(chunk, exp)
|
| 47 |
+
tm.assert_frame_equal(chunk2, exp)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_combine_first.py
ADDED
|
@@ -0,0 +1,540 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from pandas.core.dtypes.cast import find_common_type
|
| 7 |
+
from pandas.core.dtypes.common import is_dtype_equal
|
| 8 |
+
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from pandas import (
|
| 11 |
+
DataFrame,
|
| 12 |
+
Index,
|
| 13 |
+
MultiIndex,
|
| 14 |
+
Series,
|
| 15 |
+
)
|
| 16 |
+
import pandas._testing as tm
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TestDataFrameCombineFirst:
|
| 20 |
+
def test_combine_first_mixed(self):
|
| 21 |
+
a = Series(["a", "b"], index=range(2))
|
| 22 |
+
b = Series(range(2), index=range(2))
|
| 23 |
+
f = DataFrame({"A": a, "B": b})
|
| 24 |
+
|
| 25 |
+
a = Series(["a", "b"], index=range(5, 7))
|
| 26 |
+
b = Series(range(2), index=range(5, 7))
|
| 27 |
+
g = DataFrame({"A": a, "B": b})
|
| 28 |
+
|
| 29 |
+
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
|
| 30 |
+
combined = f.combine_first(g)
|
| 31 |
+
tm.assert_frame_equal(combined, exp)
|
| 32 |
+
|
| 33 |
+
def test_combine_first(self, float_frame):
|
| 34 |
+
# disjoint
|
| 35 |
+
head, tail = float_frame[:5], float_frame[5:]
|
| 36 |
+
|
| 37 |
+
combined = head.combine_first(tail)
|
| 38 |
+
reordered_frame = float_frame.reindex(combined.index)
|
| 39 |
+
tm.assert_frame_equal(combined, reordered_frame)
|
| 40 |
+
assert tm.equalContents(combined.columns, float_frame.columns)
|
| 41 |
+
tm.assert_series_equal(combined["A"], reordered_frame["A"])
|
| 42 |
+
|
| 43 |
+
# same index
|
| 44 |
+
fcopy = float_frame.copy()
|
| 45 |
+
fcopy["A"] = 1
|
| 46 |
+
del fcopy["C"]
|
| 47 |
+
|
| 48 |
+
fcopy2 = float_frame.copy()
|
| 49 |
+
fcopy2["B"] = 0
|
| 50 |
+
del fcopy2["D"]
|
| 51 |
+
|
| 52 |
+
combined = fcopy.combine_first(fcopy2)
|
| 53 |
+
|
| 54 |
+
assert (combined["A"] == 1).all()
|
| 55 |
+
tm.assert_series_equal(combined["B"], fcopy["B"])
|
| 56 |
+
tm.assert_series_equal(combined["C"], fcopy2["C"])
|
| 57 |
+
tm.assert_series_equal(combined["D"], fcopy["D"])
|
| 58 |
+
|
| 59 |
+
# overlap
|
| 60 |
+
head, tail = reordered_frame[:10].copy(), reordered_frame
|
| 61 |
+
head["A"] = 1
|
| 62 |
+
|
| 63 |
+
combined = head.combine_first(tail)
|
| 64 |
+
assert (combined["A"][:10] == 1).all()
|
| 65 |
+
|
| 66 |
+
# reverse overlap
|
| 67 |
+
tail.iloc[:10, tail.columns.get_loc("A")] = 0
|
| 68 |
+
combined = tail.combine_first(head)
|
| 69 |
+
assert (combined["A"][:10] == 0).all()
|
| 70 |
+
|
| 71 |
+
# no overlap
|
| 72 |
+
f = float_frame[:10]
|
| 73 |
+
g = float_frame[10:]
|
| 74 |
+
combined = f.combine_first(g)
|
| 75 |
+
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
|
| 76 |
+
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
|
| 77 |
+
|
| 78 |
+
# corner cases
|
| 79 |
+
comb = float_frame.combine_first(DataFrame())
|
| 80 |
+
tm.assert_frame_equal(comb, float_frame)
|
| 81 |
+
|
| 82 |
+
comb = DataFrame().combine_first(float_frame)
|
| 83 |
+
tm.assert_frame_equal(comb, float_frame)
|
| 84 |
+
|
| 85 |
+
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
|
| 86 |
+
assert "faz" in comb.index
|
| 87 |
+
|
| 88 |
+
# #2525
|
| 89 |
+
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
|
| 90 |
+
df2 = DataFrame(columns=["b"])
|
| 91 |
+
result = df.combine_first(df2)
|
| 92 |
+
assert "b" in result
|
| 93 |
+
|
| 94 |
+
def test_combine_first_mixed_bug(self):
|
| 95 |
+
idx = Index(["a", "b", "c", "e"])
|
| 96 |
+
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
|
| 97 |
+
ser2 = Series(["a", "b", "c", "e"], index=idx)
|
| 98 |
+
ser3 = Series([12, 4, 5, 97], index=idx)
|
| 99 |
+
|
| 100 |
+
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
|
| 101 |
+
|
| 102 |
+
idx = Index(["a", "b", "c", "f"])
|
| 103 |
+
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
|
| 104 |
+
ser2 = Series(["a", "b", "c", "f"], index=idx)
|
| 105 |
+
ser3 = Series([12, 4, 5, 97], index=idx)
|
| 106 |
+
|
| 107 |
+
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
|
| 108 |
+
|
| 109 |
+
combined = frame1.combine_first(frame2)
|
| 110 |
+
assert len(combined.columns) == 5
|
| 111 |
+
|
| 112 |
+
def test_combine_first_same_as_in_update(self):
|
| 113 |
+
# gh 3016 (same as in update)
|
| 114 |
+
df = DataFrame(
|
| 115 |
+
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
|
| 116 |
+
columns=["A", "B", "bool1", "bool2"],
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
|
| 120 |
+
result = df.combine_first(other)
|
| 121 |
+
tm.assert_frame_equal(result, df)
|
| 122 |
+
|
| 123 |
+
df.loc[0, "A"] = np.nan
|
| 124 |
+
result = df.combine_first(other)
|
| 125 |
+
df.loc[0, "A"] = 45
|
| 126 |
+
tm.assert_frame_equal(result, df)
|
| 127 |
+
|
| 128 |
+
def test_combine_first_doc_example(self):
|
| 129 |
+
# doc example
|
| 130 |
+
df1 = DataFrame(
|
| 131 |
+
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
df2 = DataFrame(
|
| 135 |
+
{
|
| 136 |
+
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
|
| 137 |
+
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
|
| 138 |
+
}
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
result = df1.combine_first(df2)
|
| 142 |
+
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
|
| 143 |
+
tm.assert_frame_equal(result, expected)
|
| 144 |
+
|
| 145 |
+
def test_combine_first_return_obj_type_with_bools(self):
|
| 146 |
+
# GH3552
|
| 147 |
+
|
| 148 |
+
df1 = DataFrame(
|
| 149 |
+
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
|
| 150 |
+
)
|
| 151 |
+
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
|
| 152 |
+
|
| 153 |
+
expected = Series([True, True, False], name=2, dtype=bool)
|
| 154 |
+
|
| 155 |
+
result_12 = df1.combine_first(df2)[2]
|
| 156 |
+
tm.assert_series_equal(result_12, expected)
|
| 157 |
+
|
| 158 |
+
result_21 = df2.combine_first(df1)[2]
|
| 159 |
+
tm.assert_series_equal(result_21, expected)
|
| 160 |
+
|
| 161 |
+
@pytest.mark.parametrize(
|
| 162 |
+
"data1, data2, data_expected",
|
| 163 |
+
(
|
| 164 |
+
(
|
| 165 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 166 |
+
[pd.NaT, pd.NaT, pd.NaT],
|
| 167 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 168 |
+
),
|
| 169 |
+
(
|
| 170 |
+
[pd.NaT, pd.NaT, pd.NaT],
|
| 171 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 172 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 173 |
+
),
|
| 174 |
+
(
|
| 175 |
+
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
|
| 176 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 177 |
+
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 178 |
+
),
|
| 179 |
+
(
|
| 180 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 181 |
+
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
|
| 182 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 183 |
+
),
|
| 184 |
+
),
|
| 185 |
+
)
|
| 186 |
+
def test_combine_first_convert_datatime_correctly(
|
| 187 |
+
self, data1, data2, data_expected
|
| 188 |
+
):
|
| 189 |
+
# GH 3593
|
| 190 |
+
|
| 191 |
+
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
|
| 192 |
+
result = df1.combine_first(df2)
|
| 193 |
+
expected = DataFrame({"a": data_expected})
|
| 194 |
+
tm.assert_frame_equal(result, expected)
|
| 195 |
+
|
| 196 |
+
def test_combine_first_align_nan(self):
|
| 197 |
+
# GH 7509 (not fixed)
|
| 198 |
+
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
|
| 199 |
+
dfb = DataFrame([[4], [5]], columns=["b"])
|
| 200 |
+
assert dfa["a"].dtype == "datetime64[ns]"
|
| 201 |
+
assert dfa["b"].dtype == "int64"
|
| 202 |
+
|
| 203 |
+
res = dfa.combine_first(dfb)
|
| 204 |
+
exp = DataFrame(
|
| 205 |
+
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
|
| 206 |
+
columns=["a", "b"],
|
| 207 |
+
)
|
| 208 |
+
tm.assert_frame_equal(res, exp)
|
| 209 |
+
assert res["a"].dtype == "datetime64[ns]"
|
| 210 |
+
# TODO: this must be int64
|
| 211 |
+
assert res["b"].dtype == "int64"
|
| 212 |
+
|
| 213 |
+
res = dfa.iloc[:0].combine_first(dfb)
|
| 214 |
+
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
|
| 215 |
+
tm.assert_frame_equal(res, exp)
|
| 216 |
+
# TODO: this must be datetime64
|
| 217 |
+
assert res["a"].dtype == "float64"
|
| 218 |
+
# TODO: this must be int64
|
| 219 |
+
assert res["b"].dtype == "int64"
|
| 220 |
+
|
| 221 |
+
def test_combine_first_timezone(self):
|
| 222 |
+
# see gh-7630
|
| 223 |
+
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
|
| 224 |
+
df1 = DataFrame(
|
| 225 |
+
columns=["UTCdatetime", "abc"],
|
| 226 |
+
data=data1,
|
| 227 |
+
index=pd.date_range("20140627", periods=1),
|
| 228 |
+
)
|
| 229 |
+
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
|
| 230 |
+
df2 = DataFrame(
|
| 231 |
+
columns=["UTCdatetime", "xyz"],
|
| 232 |
+
data=data2,
|
| 233 |
+
index=pd.date_range("20140628", periods=1),
|
| 234 |
+
)
|
| 235 |
+
res = df2[["UTCdatetime"]].combine_first(df1)
|
| 236 |
+
exp = DataFrame(
|
| 237 |
+
{
|
| 238 |
+
"UTCdatetime": [
|
| 239 |
+
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
|
| 240 |
+
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
|
| 241 |
+
],
|
| 242 |
+
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
|
| 243 |
+
},
|
| 244 |
+
columns=["UTCdatetime", "abc"],
|
| 245 |
+
index=pd.date_range("20140627", periods=2, freq="D"),
|
| 246 |
+
)
|
| 247 |
+
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
|
| 248 |
+
assert res["abc"].dtype == "datetime64[ns, UTC]"
|
| 249 |
+
|
| 250 |
+
tm.assert_frame_equal(res, exp)
|
| 251 |
+
|
| 252 |
+
# see gh-10567
|
| 253 |
+
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
|
| 254 |
+
df1 = DataFrame({"DATE": dts1})
|
| 255 |
+
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
|
| 256 |
+
df2 = DataFrame({"DATE": dts2})
|
| 257 |
+
|
| 258 |
+
res = df1.combine_first(df2)
|
| 259 |
+
tm.assert_frame_equal(res, df1)
|
| 260 |
+
assert res["DATE"].dtype == "datetime64[ns, UTC]"
|
| 261 |
+
|
| 262 |
+
dts1 = pd.DatetimeIndex(
|
| 263 |
+
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
|
| 264 |
+
)
|
| 265 |
+
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
|
| 266 |
+
dts2 = pd.DatetimeIndex(
|
| 267 |
+
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
|
| 268 |
+
)
|
| 269 |
+
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
|
| 270 |
+
|
| 271 |
+
res = df1.combine_first(df2)
|
| 272 |
+
exp_dts = pd.DatetimeIndex(
|
| 273 |
+
[
|
| 274 |
+
"2011-01-01",
|
| 275 |
+
"2012-01-01",
|
| 276 |
+
"NaT",
|
| 277 |
+
"2012-01-02",
|
| 278 |
+
"2011-01-03",
|
| 279 |
+
"2011-01-04",
|
| 280 |
+
],
|
| 281 |
+
tz="US/Eastern",
|
| 282 |
+
)
|
| 283 |
+
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
|
| 284 |
+
tm.assert_frame_equal(res, exp)
|
| 285 |
+
|
| 286 |
+
# different tz
|
| 287 |
+
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
|
| 288 |
+
df1 = DataFrame({"DATE": dts1})
|
| 289 |
+
dts2 = pd.date_range("2015-01-03", "2015-01-05")
|
| 290 |
+
df2 = DataFrame({"DATE": dts2})
|
| 291 |
+
|
| 292 |
+
# if df1 doesn't have NaN, keep its dtype
|
| 293 |
+
res = df1.combine_first(df2)
|
| 294 |
+
tm.assert_frame_equal(res, df1)
|
| 295 |
+
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
|
| 296 |
+
|
| 297 |
+
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
|
| 298 |
+
df1 = DataFrame({"DATE": dts1})
|
| 299 |
+
dts2 = pd.date_range("2015-01-01", "2015-01-03")
|
| 300 |
+
df2 = DataFrame({"DATE": dts2})
|
| 301 |
+
|
| 302 |
+
res = df1.combine_first(df2)
|
| 303 |
+
exp_dts = [
|
| 304 |
+
pd.Timestamp("2015-01-01", tz="US/Eastern"),
|
| 305 |
+
pd.Timestamp("2015-01-02", tz="US/Eastern"),
|
| 306 |
+
pd.Timestamp("2015-01-03"),
|
| 307 |
+
]
|
| 308 |
+
exp = DataFrame({"DATE": exp_dts})
|
| 309 |
+
tm.assert_frame_equal(res, exp)
|
| 310 |
+
assert res["DATE"].dtype == "object"
|
| 311 |
+
|
| 312 |
+
def test_combine_first_timedelta(self):
|
| 313 |
+
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
|
| 314 |
+
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
|
| 315 |
+
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
|
| 316 |
+
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
|
| 317 |
+
|
| 318 |
+
res = df1.combine_first(df2)
|
| 319 |
+
exp_dts = pd.TimedeltaIndex(
|
| 320 |
+
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
|
| 321 |
+
)
|
| 322 |
+
exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
|
| 323 |
+
tm.assert_frame_equal(res, exp)
|
| 324 |
+
assert res["TD"].dtype == "timedelta64[ns]"
|
| 325 |
+
|
| 326 |
+
def test_combine_first_period(self):
|
| 327 |
+
data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
|
| 328 |
+
df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7])
|
| 329 |
+
data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
|
| 330 |
+
df2 = DataFrame({"P": data2}, index=[2, 4, 5])
|
| 331 |
+
|
| 332 |
+
res = df1.combine_first(df2)
|
| 333 |
+
exp_dts = pd.PeriodIndex(
|
| 334 |
+
["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
|
| 335 |
+
)
|
| 336 |
+
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
|
| 337 |
+
tm.assert_frame_equal(res, exp)
|
| 338 |
+
assert res["P"].dtype == data1.dtype
|
| 339 |
+
|
| 340 |
+
# different freq
|
| 341 |
+
dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
|
| 342 |
+
df2 = DataFrame({"P": dts2}, index=[2, 4, 5])
|
| 343 |
+
|
| 344 |
+
res = df1.combine_first(df2)
|
| 345 |
+
exp_dts = [
|
| 346 |
+
pd.Period("2011-01", freq="M"),
|
| 347 |
+
pd.Period("2012-01-01", freq="D"),
|
| 348 |
+
pd.NaT,
|
| 349 |
+
pd.Period("2012-01-02", freq="D"),
|
| 350 |
+
pd.Period("2011-03", freq="M"),
|
| 351 |
+
pd.Period("2011-04", freq="M"),
|
| 352 |
+
]
|
| 353 |
+
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
|
| 354 |
+
tm.assert_frame_equal(res, exp)
|
| 355 |
+
assert res["P"].dtype == "object"
|
| 356 |
+
|
| 357 |
+
def test_combine_first_int(self):
|
| 358 |
+
# GH14687 - integer series that do no align exactly
|
| 359 |
+
|
| 360 |
+
df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
|
| 361 |
+
df2 = DataFrame({"a": [1, 4]}, dtype="int64")
|
| 362 |
+
|
| 363 |
+
result_12 = df1.combine_first(df2)
|
| 364 |
+
expected_12 = DataFrame({"a": [0, 1, 3, 5]})
|
| 365 |
+
tm.assert_frame_equal(result_12, expected_12)
|
| 366 |
+
|
| 367 |
+
result_21 = df2.combine_first(df1)
|
| 368 |
+
expected_21 = DataFrame({"a": [1, 4, 3, 5]})
|
| 369 |
+
tm.assert_frame_equal(result_21, expected_21)
|
| 370 |
+
|
| 371 |
+
@pytest.mark.parametrize("val", [1, 1.0])
|
| 372 |
+
def test_combine_first_with_asymmetric_other(self, val):
|
| 373 |
+
# see gh-20699
|
| 374 |
+
df1 = DataFrame({"isNum": [val]})
|
| 375 |
+
df2 = DataFrame({"isBool": [True]})
|
| 376 |
+
|
| 377 |
+
res = df1.combine_first(df2)
|
| 378 |
+
exp = DataFrame({"isBool": [True], "isNum": [val]})
|
| 379 |
+
|
| 380 |
+
tm.assert_frame_equal(res, exp)
|
| 381 |
+
|
| 382 |
+
def test_combine_first_string_dtype_only_na(self, nullable_string_dtype):
|
| 383 |
+
# GH: 37519
|
| 384 |
+
df = DataFrame(
|
| 385 |
+
{"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype
|
| 386 |
+
)
|
| 387 |
+
df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype=nullable_string_dtype)
|
| 388 |
+
df.set_index(["a", "b"], inplace=True)
|
| 389 |
+
df2.set_index(["a", "b"], inplace=True)
|
| 390 |
+
result = df.combine_first(df2)
|
| 391 |
+
expected = DataFrame(
|
| 392 |
+
{"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype
|
| 393 |
+
).set_index(["a", "b"])
|
| 394 |
+
tm.assert_frame_equal(result, expected)
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
@pytest.mark.parametrize(
|
| 398 |
+
"scalar1, scalar2",
|
| 399 |
+
[
|
| 400 |
+
(datetime(2020, 1, 1), datetime(2020, 1, 2)),
|
| 401 |
+
(pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")),
|
| 402 |
+
(pd.Timedelta("89 days"), pd.Timedelta("60 min")),
|
| 403 |
+
(pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, closed="left")),
|
| 404 |
+
],
|
| 405 |
+
)
|
| 406 |
+
def test_combine_first_timestamp_bug(scalar1, scalar2, nulls_fixture):
|
| 407 |
+
# GH28481
|
| 408 |
+
na_value = nulls_fixture
|
| 409 |
+
|
| 410 |
+
frame = DataFrame([[na_value, na_value]], columns=["a", "b"])
|
| 411 |
+
other = DataFrame([[scalar1, scalar2]], columns=["b", "c"])
|
| 412 |
+
|
| 413 |
+
common_dtype = find_common_type([frame.dtypes["b"], other.dtypes["b"]])
|
| 414 |
+
|
| 415 |
+
if is_dtype_equal(common_dtype, "object") or frame.dtypes["b"] == other.dtypes["b"]:
|
| 416 |
+
val = scalar1
|
| 417 |
+
else:
|
| 418 |
+
val = na_value
|
| 419 |
+
|
| 420 |
+
result = frame.combine_first(other)
|
| 421 |
+
|
| 422 |
+
expected = DataFrame([[na_value, val, scalar2]], columns=["a", "b", "c"])
|
| 423 |
+
|
| 424 |
+
expected["b"] = expected["b"].astype(common_dtype)
|
| 425 |
+
|
| 426 |
+
tm.assert_frame_equal(result, expected)
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
def test_combine_first_timestamp_bug_NaT():
|
| 430 |
+
# GH28481
|
| 431 |
+
frame = DataFrame([[pd.NaT, pd.NaT]], columns=["a", "b"])
|
| 432 |
+
other = DataFrame(
|
| 433 |
+
[[datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["b", "c"]
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
result = frame.combine_first(other)
|
| 437 |
+
expected = DataFrame(
|
| 438 |
+
[[pd.NaT, datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["a", "b", "c"]
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
tm.assert_frame_equal(result, expected)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def test_combine_first_with_nan_multiindex():
|
| 445 |
+
# gh-36562
|
| 446 |
+
|
| 447 |
+
mi1 = MultiIndex.from_arrays(
|
| 448 |
+
[["b", "b", "c", "a", "b", np.nan], [1, 2, 3, 4, 5, 6]], names=["a", "b"]
|
| 449 |
+
)
|
| 450 |
+
df = DataFrame({"c": [1, 1, 1, 1, 1, 1]}, index=mi1)
|
| 451 |
+
mi2 = MultiIndex.from_arrays(
|
| 452 |
+
[["a", "b", "c", "a", "b", "d"], [1, 1, 1, 1, 1, 1]], names=["a", "b"]
|
| 453 |
+
)
|
| 454 |
+
s = Series([1, 2, 3, 4, 5, 6], index=mi2)
|
| 455 |
+
res = df.combine_first(DataFrame({"d": s}))
|
| 456 |
+
mi_expected = MultiIndex.from_arrays(
|
| 457 |
+
[
|
| 458 |
+
["a", "a", "a", "b", "b", "b", "b", "c", "c", "d", np.nan],
|
| 459 |
+
[1, 1, 4, 1, 1, 2, 5, 1, 3, 1, 6],
|
| 460 |
+
],
|
| 461 |
+
names=["a", "b"],
|
| 462 |
+
)
|
| 463 |
+
expected = DataFrame(
|
| 464 |
+
{
|
| 465 |
+
"c": [np.nan, np.nan, 1, 1, 1, 1, 1, np.nan, 1, np.nan, 1],
|
| 466 |
+
"d": [1.0, 4.0, np.nan, 2.0, 5.0, np.nan, np.nan, 3.0, np.nan, 6.0, np.nan],
|
| 467 |
+
},
|
| 468 |
+
index=mi_expected,
|
| 469 |
+
)
|
| 470 |
+
tm.assert_frame_equal(res, expected)
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
def test_combine_preserve_dtypes():
|
| 474 |
+
# GH7509
|
| 475 |
+
a_column = Series(["a", "b"], index=range(2))
|
| 476 |
+
b_column = Series(range(2), index=range(2))
|
| 477 |
+
df1 = DataFrame({"A": a_column, "B": b_column})
|
| 478 |
+
|
| 479 |
+
c_column = Series(["a", "b"], index=range(5, 7))
|
| 480 |
+
b_column = Series(range(-1, 1), index=range(5, 7))
|
| 481 |
+
df2 = DataFrame({"B": b_column, "C": c_column})
|
| 482 |
+
|
| 483 |
+
expected = DataFrame(
|
| 484 |
+
{
|
| 485 |
+
"A": ["a", "b", np.nan, np.nan],
|
| 486 |
+
"B": [0, 1, -1, 0],
|
| 487 |
+
"C": [np.nan, np.nan, "a", "b"],
|
| 488 |
+
},
|
| 489 |
+
index=[0, 1, 5, 6],
|
| 490 |
+
)
|
| 491 |
+
combined = df1.combine_first(df2)
|
| 492 |
+
tm.assert_frame_equal(combined, expected)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def test_combine_first_duplicates_rows_for_nan_index_values():
|
| 496 |
+
# GH39881
|
| 497 |
+
df1 = DataFrame(
|
| 498 |
+
{"x": [9, 10, 11]},
|
| 499 |
+
index=MultiIndex.from_arrays([[1, 2, 3], [np.nan, 5, 6]], names=["a", "b"]),
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
df2 = DataFrame(
|
| 503 |
+
{"y": [12, 13, 14]},
|
| 504 |
+
index=MultiIndex.from_arrays([[1, 2, 4], [np.nan, 5, 7]], names=["a", "b"]),
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
expected = DataFrame(
|
| 508 |
+
{
|
| 509 |
+
"x": [9.0, 10.0, 11.0, np.nan],
|
| 510 |
+
"y": [12.0, 13.0, np.nan, 14.0],
|
| 511 |
+
},
|
| 512 |
+
index=MultiIndex.from_arrays(
|
| 513 |
+
[[1, 2, 3, 4], [np.nan, 5.0, 6.0, 7.0]], names=["a", "b"]
|
| 514 |
+
),
|
| 515 |
+
)
|
| 516 |
+
combined = df1.combine_first(df2)
|
| 517 |
+
tm.assert_frame_equal(combined, expected)
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def test_combine_first_int64_not_cast_to_float64():
|
| 521 |
+
# GH 28613
|
| 522 |
+
df_1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
|
| 523 |
+
df_2 = DataFrame({"A": [1, 20, 30], "B": [40, 50, 60], "C": [12, 34, 65]})
|
| 524 |
+
result = df_1.combine_first(df_2)
|
| 525 |
+
expected = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [12, 34, 65]})
|
| 526 |
+
tm.assert_frame_equal(result, expected)
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def test_midx_losing_dtype():
|
| 530 |
+
# GH#49830
|
| 531 |
+
midx = MultiIndex.from_arrays([[0, 0], [np.nan, np.nan]])
|
| 532 |
+
midx2 = MultiIndex.from_arrays([[1, 1], [np.nan, np.nan]])
|
| 533 |
+
df1 = DataFrame({"a": [None, 4]}, index=midx)
|
| 534 |
+
df2 = DataFrame({"a": [3, 3]}, index=midx2)
|
| 535 |
+
result = df1.combine_first(df2)
|
| 536 |
+
expected_midx = MultiIndex.from_arrays(
|
| 537 |
+
[[0, 0, 1, 1], [np.nan, np.nan, np.nan, np.nan]]
|
| 538 |
+
)
|
| 539 |
+
expected = DataFrame({"a": [np.nan, 4, 3, 3]}, index=expected_midx)
|
| 540 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_convert_dtypes.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datetime
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import pandas._testing as tm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestConvertDtypes:
|
| 11 |
+
@pytest.mark.parametrize(
|
| 12 |
+
"convert_integer, expected", [(False, np.dtype("int32")), (True, "Int32")]
|
| 13 |
+
)
|
| 14 |
+
def test_convert_dtypes(self, convert_integer, expected, string_storage):
|
| 15 |
+
# Specific types are tested in tests/series/test_dtypes.py
|
| 16 |
+
# Just check that it works for DataFrame here
|
| 17 |
+
df = pd.DataFrame(
|
| 18 |
+
{
|
| 19 |
+
"a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
|
| 20 |
+
"b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
|
| 21 |
+
}
|
| 22 |
+
)
|
| 23 |
+
with pd.option_context("string_storage", string_storage):
|
| 24 |
+
result = df.convert_dtypes(True, True, convert_integer, False)
|
| 25 |
+
expected = pd.DataFrame(
|
| 26 |
+
{
|
| 27 |
+
"a": pd.Series([1, 2, 3], dtype=expected),
|
| 28 |
+
"b": pd.Series(["x", "y", "z"], dtype=f"string[{string_storage}]"),
|
| 29 |
+
}
|
| 30 |
+
)
|
| 31 |
+
tm.assert_frame_equal(result, expected)
|
| 32 |
+
|
| 33 |
+
def test_convert_empty(self):
|
| 34 |
+
# Empty DataFrame can pass convert_dtypes, see GH#40393
|
| 35 |
+
empty_df = pd.DataFrame()
|
| 36 |
+
tm.assert_frame_equal(empty_df, empty_df.convert_dtypes())
|
| 37 |
+
|
| 38 |
+
def test_convert_dtypes_retain_column_names(self):
|
| 39 |
+
# GH#41435
|
| 40 |
+
df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
|
| 41 |
+
df.columns.name = "cols"
|
| 42 |
+
|
| 43 |
+
result = df.convert_dtypes()
|
| 44 |
+
tm.assert_index_equal(result.columns, df.columns)
|
| 45 |
+
assert result.columns.name == "cols"
|
| 46 |
+
|
| 47 |
+
def test_pyarrow_dtype_backend(self):
|
| 48 |
+
pa = pytest.importorskip("pyarrow")
|
| 49 |
+
df = pd.DataFrame(
|
| 50 |
+
{
|
| 51 |
+
"a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
|
| 52 |
+
"b": pd.Series(["x", "y", None], dtype=np.dtype("O")),
|
| 53 |
+
"c": pd.Series([True, False, None], dtype=np.dtype("O")),
|
| 54 |
+
"d": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
|
| 55 |
+
"e": pd.Series(pd.date_range("2022", periods=3)),
|
| 56 |
+
"f": pd.Series(pd.date_range("2022", periods=3, tz="UTC").as_unit("s")),
|
| 57 |
+
"g": pd.Series(pd.timedelta_range("1D", periods=3)),
|
| 58 |
+
}
|
| 59 |
+
)
|
| 60 |
+
result = df.convert_dtypes(dtype_backend="pyarrow")
|
| 61 |
+
expected = pd.DataFrame(
|
| 62 |
+
{
|
| 63 |
+
"a": pd.arrays.ArrowExtensionArray(
|
| 64 |
+
pa.array([1, 2, 3], type=pa.int32())
|
| 65 |
+
),
|
| 66 |
+
"b": pd.arrays.ArrowExtensionArray(pa.array(["x", "y", None])),
|
| 67 |
+
"c": pd.arrays.ArrowExtensionArray(pa.array([True, False, None])),
|
| 68 |
+
"d": pd.arrays.ArrowExtensionArray(pa.array([None, 100.5, 200.0])),
|
| 69 |
+
"e": pd.arrays.ArrowExtensionArray(
|
| 70 |
+
pa.array(
|
| 71 |
+
[
|
| 72 |
+
datetime.datetime(2022, 1, 1),
|
| 73 |
+
datetime.datetime(2022, 1, 2),
|
| 74 |
+
datetime.datetime(2022, 1, 3),
|
| 75 |
+
],
|
| 76 |
+
type=pa.timestamp(unit="ns"),
|
| 77 |
+
)
|
| 78 |
+
),
|
| 79 |
+
"f": pd.arrays.ArrowExtensionArray(
|
| 80 |
+
pa.array(
|
| 81 |
+
[
|
| 82 |
+
datetime.datetime(2022, 1, 1),
|
| 83 |
+
datetime.datetime(2022, 1, 2),
|
| 84 |
+
datetime.datetime(2022, 1, 3),
|
| 85 |
+
],
|
| 86 |
+
type=pa.timestamp(unit="s", tz="UTC"),
|
| 87 |
+
)
|
| 88 |
+
),
|
| 89 |
+
"g": pd.arrays.ArrowExtensionArray(
|
| 90 |
+
pa.array(
|
| 91 |
+
[
|
| 92 |
+
datetime.timedelta(1),
|
| 93 |
+
datetime.timedelta(2),
|
| 94 |
+
datetime.timedelta(3),
|
| 95 |
+
],
|
| 96 |
+
type=pa.duration("ns"),
|
| 97 |
+
)
|
| 98 |
+
),
|
| 99 |
+
}
|
| 100 |
+
)
|
| 101 |
+
tm.assert_frame_equal(result, expected)
|
| 102 |
+
|
| 103 |
+
def test_pyarrow_dtype_backend_already_pyarrow(self):
|
| 104 |
+
pytest.importorskip("pyarrow")
|
| 105 |
+
expected = pd.DataFrame([1, 2, 3], dtype="int64[pyarrow]")
|
| 106 |
+
result = expected.convert_dtypes(dtype_backend="pyarrow")
|
| 107 |
+
tm.assert_frame_equal(result, expected)
|
| 108 |
+
|
| 109 |
+
def test_pyarrow_dtype_backend_from_pandas_nullable(self):
|
| 110 |
+
pa = pytest.importorskip("pyarrow")
|
| 111 |
+
df = pd.DataFrame(
|
| 112 |
+
{
|
| 113 |
+
"a": pd.Series([1, 2, None], dtype="Int32"),
|
| 114 |
+
"b": pd.Series(["x", "y", None], dtype="string[python]"),
|
| 115 |
+
"c": pd.Series([True, False, None], dtype="boolean"),
|
| 116 |
+
"d": pd.Series([None, 100.5, 200], dtype="Float64"),
|
| 117 |
+
}
|
| 118 |
+
)
|
| 119 |
+
result = df.convert_dtypes(dtype_backend="pyarrow")
|
| 120 |
+
expected = pd.DataFrame(
|
| 121 |
+
{
|
| 122 |
+
"a": pd.arrays.ArrowExtensionArray(
|
| 123 |
+
pa.array([1, 2, None], type=pa.int32())
|
| 124 |
+
),
|
| 125 |
+
"b": pd.arrays.ArrowExtensionArray(pa.array(["x", "y", None])),
|
| 126 |
+
"c": pd.arrays.ArrowExtensionArray(pa.array([True, False, None])),
|
| 127 |
+
"d": pd.arrays.ArrowExtensionArray(pa.array([None, 100.5, 200.0])),
|
| 128 |
+
}
|
| 129 |
+
)
|
| 130 |
+
tm.assert_frame_equal(result, expected)
|
| 131 |
+
|
| 132 |
+
def test_pyarrow_dtype_empty_object(self):
|
| 133 |
+
# GH 50970
|
| 134 |
+
pytest.importorskip("pyarrow")
|
| 135 |
+
expected = pd.DataFrame(columns=[0])
|
| 136 |
+
result = expected.convert_dtypes(dtype_backend="pyarrow")
|
| 137 |
+
tm.assert_frame_equal(result, expected)
|
| 138 |
+
|
| 139 |
+
def test_pyarrow_engine_lines_false(self):
|
| 140 |
+
# GH 48893
|
| 141 |
+
df = pd.DataFrame({"a": [1, 2, 3]})
|
| 142 |
+
msg = (
|
| 143 |
+
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
|
| 144 |
+
"'pyarrow' are allowed."
|
| 145 |
+
)
|
| 146 |
+
with pytest.raises(ValueError, match=msg):
|
| 147 |
+
df.convert_dtypes(dtype_backend="numpy")
|
| 148 |
+
|
| 149 |
+
def test_pyarrow_backend_no_conversion(self):
|
| 150 |
+
# GH#52872
|
| 151 |
+
pytest.importorskip("pyarrow")
|
| 152 |
+
df = pd.DataFrame({"a": [1, 2], "b": 1.5, "c": True, "d": "x"})
|
| 153 |
+
expected = df.copy()
|
| 154 |
+
result = df.convert_dtypes(
|
| 155 |
+
convert_floating=False,
|
| 156 |
+
convert_integer=False,
|
| 157 |
+
convert_boolean=False,
|
| 158 |
+
convert_string=False,
|
| 159 |
+
dtype_backend="pyarrow",
|
| 160 |
+
)
|
| 161 |
+
tm.assert_frame_equal(result, expected)
|
| 162 |
+
|
| 163 |
+
def test_convert_dtypes_pyarrow_to_np_nullable(self):
|
| 164 |
+
# GH 53648
|
| 165 |
+
pytest.importorskip("pyarrow")
|
| 166 |
+
ser = pd.DataFrame(range(2), dtype="int32[pyarrow]")
|
| 167 |
+
result = ser.convert_dtypes(dtype_backend="numpy_nullable")
|
| 168 |
+
expected = pd.DataFrame(range(2), dtype="Int32")
|
| 169 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_copy.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas.util._test_decorators as td
|
| 5 |
+
|
| 6 |
+
from pandas import DataFrame
|
| 7 |
+
import pandas._testing as tm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestCopy:
|
| 11 |
+
@pytest.mark.parametrize("attr", ["index", "columns"])
|
| 12 |
+
def test_copy_index_name_checking(self, float_frame, attr):
|
| 13 |
+
# don't want to be able to modify the index stored elsewhere after
|
| 14 |
+
# making a copy
|
| 15 |
+
ind = getattr(float_frame, attr)
|
| 16 |
+
ind.name = None
|
| 17 |
+
cp = float_frame.copy()
|
| 18 |
+
getattr(cp, attr).name = "foo"
|
| 19 |
+
assert getattr(float_frame, attr).name is None
|
| 20 |
+
|
| 21 |
+
@td.skip_copy_on_write_invalid_test
|
| 22 |
+
def test_copy_cache(self):
|
| 23 |
+
# GH#31784 _item_cache not cleared on copy causes incorrect reads after updates
|
| 24 |
+
df = DataFrame({"a": [1]})
|
| 25 |
+
|
| 26 |
+
df["x"] = [0]
|
| 27 |
+
df["a"]
|
| 28 |
+
|
| 29 |
+
df.copy()
|
| 30 |
+
|
| 31 |
+
df["a"].values[0] = -1
|
| 32 |
+
|
| 33 |
+
tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0]}))
|
| 34 |
+
|
| 35 |
+
df["y"] = [0]
|
| 36 |
+
|
| 37 |
+
assert df["a"].values[0] == -1
|
| 38 |
+
tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0], "y": [0]}))
|
| 39 |
+
|
| 40 |
+
def test_copy(self, float_frame, float_string_frame):
|
| 41 |
+
cop = float_frame.copy()
|
| 42 |
+
cop["E"] = cop["A"]
|
| 43 |
+
assert "E" not in float_frame
|
| 44 |
+
|
| 45 |
+
# copy objects
|
| 46 |
+
copy = float_string_frame.copy()
|
| 47 |
+
assert copy._mgr is not float_string_frame._mgr
|
| 48 |
+
|
| 49 |
+
@td.skip_array_manager_invalid_test
|
| 50 |
+
def test_copy_consolidates(self):
|
| 51 |
+
# GH#42477
|
| 52 |
+
df = DataFrame(
|
| 53 |
+
{
|
| 54 |
+
"a": np.random.randint(0, 100, size=55),
|
| 55 |
+
"b": np.random.randint(0, 100, size=55),
|
| 56 |
+
}
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
for i in range(0, 10):
|
| 60 |
+
df.loc[:, f"n_{i}"] = np.random.randint(0, 100, size=55)
|
| 61 |
+
|
| 62 |
+
assert len(df._mgr.blocks) == 11
|
| 63 |
+
result = df.copy()
|
| 64 |
+
assert len(result._mgr.blocks) == 1
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_count.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pandas import (
|
| 2 |
+
DataFrame,
|
| 3 |
+
Series,
|
| 4 |
+
)
|
| 5 |
+
import pandas._testing as tm
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class TestDataFrameCount:
|
| 9 |
+
def test_count(self):
|
| 10 |
+
# corner case
|
| 11 |
+
frame = DataFrame()
|
| 12 |
+
ct1 = frame.count(1)
|
| 13 |
+
assert isinstance(ct1, Series)
|
| 14 |
+
|
| 15 |
+
ct2 = frame.count(0)
|
| 16 |
+
assert isinstance(ct2, Series)
|
| 17 |
+
|
| 18 |
+
# GH#423
|
| 19 |
+
df = DataFrame(index=range(10))
|
| 20 |
+
result = df.count(1)
|
| 21 |
+
expected = Series(0, index=df.index)
|
| 22 |
+
tm.assert_series_equal(result, expected)
|
| 23 |
+
|
| 24 |
+
df = DataFrame(columns=range(10))
|
| 25 |
+
result = df.count(0)
|
| 26 |
+
expected = Series(0, index=df.columns)
|
| 27 |
+
tm.assert_series_equal(result, expected)
|
| 28 |
+
|
| 29 |
+
df = DataFrame()
|
| 30 |
+
result = df.count()
|
| 31 |
+
expected = Series(dtype="int64")
|
| 32 |
+
tm.assert_series_equal(result, expected)
|
| 33 |
+
|
| 34 |
+
def test_count_objects(self, float_string_frame):
|
| 35 |
+
dm = DataFrame(float_string_frame._series)
|
| 36 |
+
df = DataFrame(float_string_frame._series)
|
| 37 |
+
|
| 38 |
+
tm.assert_series_equal(dm.count(), df.count())
|
| 39 |
+
tm.assert_series_equal(dm.count(1), df.count(1))
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_cov_corr.py
ADDED
|
@@ -0,0 +1,433 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import pandas.util._test_decorators as td
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from pandas import (
|
| 10 |
+
DataFrame,
|
| 11 |
+
Series,
|
| 12 |
+
isna,
|
| 13 |
+
)
|
| 14 |
+
import pandas._testing as tm
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class TestDataFrameCov:
|
| 18 |
+
def test_cov(self, float_frame, float_string_frame):
|
| 19 |
+
# min_periods no NAs (corner case)
|
| 20 |
+
expected = float_frame.cov()
|
| 21 |
+
result = float_frame.cov(min_periods=len(float_frame))
|
| 22 |
+
|
| 23 |
+
tm.assert_frame_equal(expected, result)
|
| 24 |
+
|
| 25 |
+
result = float_frame.cov(min_periods=len(float_frame) + 1)
|
| 26 |
+
assert isna(result.values).all()
|
| 27 |
+
|
| 28 |
+
# with NAs
|
| 29 |
+
frame = float_frame.copy()
|
| 30 |
+
frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
|
| 31 |
+
frame.iloc[5:10, frame.columns.get_loc("B")] = np.nan
|
| 32 |
+
result = frame.cov(min_periods=len(frame) - 8)
|
| 33 |
+
expected = frame.cov()
|
| 34 |
+
expected.loc["A", "B"] = np.nan
|
| 35 |
+
expected.loc["B", "A"] = np.nan
|
| 36 |
+
tm.assert_frame_equal(result, expected)
|
| 37 |
+
|
| 38 |
+
# regular
|
| 39 |
+
result = frame.cov()
|
| 40 |
+
expected = frame["A"].cov(frame["C"])
|
| 41 |
+
tm.assert_almost_equal(result["A"]["C"], expected)
|
| 42 |
+
|
| 43 |
+
# fails on non-numeric types
|
| 44 |
+
with pytest.raises(ValueError, match="could not convert string to float"):
|
| 45 |
+
float_string_frame.cov()
|
| 46 |
+
result = float_string_frame.cov(numeric_only=True)
|
| 47 |
+
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov()
|
| 48 |
+
tm.assert_frame_equal(result, expected)
|
| 49 |
+
|
| 50 |
+
# Single column frame
|
| 51 |
+
df = DataFrame(np.linspace(0.0, 1.0, 10))
|
| 52 |
+
result = df.cov()
|
| 53 |
+
expected = DataFrame(
|
| 54 |
+
np.cov(df.values.T).reshape((1, 1)), index=df.columns, columns=df.columns
|
| 55 |
+
)
|
| 56 |
+
tm.assert_frame_equal(result, expected)
|
| 57 |
+
df.loc[0] = np.nan
|
| 58 |
+
result = df.cov()
|
| 59 |
+
expected = DataFrame(
|
| 60 |
+
np.cov(df.values[1:].T).reshape((1, 1)),
|
| 61 |
+
index=df.columns,
|
| 62 |
+
columns=df.columns,
|
| 63 |
+
)
|
| 64 |
+
tm.assert_frame_equal(result, expected)
|
| 65 |
+
|
| 66 |
+
@pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3])
|
| 67 |
+
def test_cov_ddof(self, test_ddof):
|
| 68 |
+
# GH#34611
|
| 69 |
+
np_array1 = np.random.rand(10)
|
| 70 |
+
np_array2 = np.random.rand(10)
|
| 71 |
+
df = DataFrame({0: np_array1, 1: np_array2})
|
| 72 |
+
result = df.cov(ddof=test_ddof)
|
| 73 |
+
expected_np = np.cov(np_array1, np_array2, ddof=test_ddof)
|
| 74 |
+
expected = DataFrame(expected_np)
|
| 75 |
+
tm.assert_frame_equal(result, expected)
|
| 76 |
+
|
| 77 |
+
@pytest.mark.parametrize(
|
| 78 |
+
"other_column", [pd.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])]
|
| 79 |
+
)
|
| 80 |
+
def test_cov_nullable_integer(self, other_column):
|
| 81 |
+
# https://github.com/pandas-dev/pandas/issues/33803
|
| 82 |
+
data = DataFrame({"a": pd.array([1, 2, None]), "b": other_column})
|
| 83 |
+
result = data.cov()
|
| 84 |
+
arr = np.array([[0.5, 0.5], [0.5, 1.0]])
|
| 85 |
+
expected = DataFrame(arr, columns=["a", "b"], index=["a", "b"])
|
| 86 |
+
tm.assert_frame_equal(result, expected)
|
| 87 |
+
|
| 88 |
+
@pytest.mark.parametrize("numeric_only", [True, False])
|
| 89 |
+
def test_cov_numeric_only(self, numeric_only):
|
| 90 |
+
# when dtypes of pandas series are different
|
| 91 |
+
# then ndarray will have dtype=object,
|
| 92 |
+
# so it need to be properly handled
|
| 93 |
+
df = DataFrame({"a": [1, 0], "c": ["x", "y"]})
|
| 94 |
+
expected = DataFrame(0.5, index=["a"], columns=["a"])
|
| 95 |
+
if numeric_only:
|
| 96 |
+
result = df.cov(numeric_only=numeric_only)
|
| 97 |
+
tm.assert_frame_equal(result, expected)
|
| 98 |
+
else:
|
| 99 |
+
with pytest.raises(ValueError, match="could not convert string to float"):
|
| 100 |
+
df.cov(numeric_only=numeric_only)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class TestDataFrameCorr:
|
| 104 |
+
# DataFrame.corr(), as opposed to DataFrame.corrwith
|
| 105 |
+
|
| 106 |
+
@pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"])
|
| 107 |
+
@td.skip_if_no_scipy
|
| 108 |
+
def test_corr_scipy_method(self, float_frame, method):
|
| 109 |
+
float_frame.loc[float_frame.index[:5], "A"] = np.nan
|
| 110 |
+
float_frame.loc[float_frame.index[5:10], "B"] = np.nan
|
| 111 |
+
float_frame.loc[float_frame.index[:10], "A"] = float_frame["A"][10:20]
|
| 112 |
+
|
| 113 |
+
correls = float_frame.corr(method=method)
|
| 114 |
+
expected = float_frame["A"].corr(float_frame["C"], method=method)
|
| 115 |
+
tm.assert_almost_equal(correls["A"]["C"], expected)
|
| 116 |
+
|
| 117 |
+
# ---------------------------------------------------------------------
|
| 118 |
+
|
| 119 |
+
def test_corr_non_numeric(self, float_string_frame):
|
| 120 |
+
with pytest.raises(ValueError, match="could not convert string to float"):
|
| 121 |
+
float_string_frame.corr()
|
| 122 |
+
result = float_string_frame.corr(numeric_only=True)
|
| 123 |
+
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].corr()
|
| 124 |
+
tm.assert_frame_equal(result, expected)
|
| 125 |
+
|
| 126 |
+
@td.skip_if_no_scipy
|
| 127 |
+
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
|
| 128 |
+
def test_corr_nooverlap(self, meth):
|
| 129 |
+
# nothing in common
|
| 130 |
+
df = DataFrame(
|
| 131 |
+
{
|
| 132 |
+
"A": [1, 1.5, 1, np.nan, np.nan, np.nan],
|
| 133 |
+
"B": [np.nan, np.nan, np.nan, 1, 1.5, 1],
|
| 134 |
+
"C": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
|
| 135 |
+
}
|
| 136 |
+
)
|
| 137 |
+
rs = df.corr(meth)
|
| 138 |
+
assert isna(rs.loc["A", "B"])
|
| 139 |
+
assert isna(rs.loc["B", "A"])
|
| 140 |
+
assert rs.loc["A", "A"] == 1
|
| 141 |
+
assert rs.loc["B", "B"] == 1
|
| 142 |
+
assert isna(rs.loc["C", "C"])
|
| 143 |
+
|
| 144 |
+
@pytest.mark.parametrize("meth", ["pearson", "spearman"])
|
| 145 |
+
def test_corr_constant(self, meth):
|
| 146 |
+
# constant --> all NA
|
| 147 |
+
df = DataFrame(
|
| 148 |
+
{
|
| 149 |
+
"A": [1, 1, 1, np.nan, np.nan, np.nan],
|
| 150 |
+
"B": [np.nan, np.nan, np.nan, 1, 1, 1],
|
| 151 |
+
}
|
| 152 |
+
)
|
| 153 |
+
rs = df.corr(meth)
|
| 154 |
+
assert isna(rs.values).all()
|
| 155 |
+
|
| 156 |
+
@td.skip_if_no_scipy
|
| 157 |
+
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
|
| 158 |
+
def test_corr_int_and_boolean(self, meth):
|
| 159 |
+
# when dtypes of pandas series are different
|
| 160 |
+
# then ndarray will have dtype=object,
|
| 161 |
+
# so it need to be properly handled
|
| 162 |
+
df = DataFrame({"a": [True, False], "b": [1, 0]})
|
| 163 |
+
|
| 164 |
+
expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
|
| 165 |
+
|
| 166 |
+
with warnings.catch_warnings(record=True):
|
| 167 |
+
warnings.simplefilter("ignore", RuntimeWarning)
|
| 168 |
+
result = df.corr(meth)
|
| 169 |
+
tm.assert_frame_equal(result, expected)
|
| 170 |
+
|
| 171 |
+
@pytest.mark.parametrize("method", ["cov", "corr"])
|
| 172 |
+
def test_corr_cov_independent_index_column(self, method):
|
| 173 |
+
# GH#14617
|
| 174 |
+
df = DataFrame(np.random.randn(4 * 10).reshape(10, 4), columns=list("abcd"))
|
| 175 |
+
result = getattr(df, method)()
|
| 176 |
+
assert result.index is not result.columns
|
| 177 |
+
assert result.index.equals(result.columns)
|
| 178 |
+
|
| 179 |
+
def test_corr_invalid_method(self):
|
| 180 |
+
# GH#22298
|
| 181 |
+
df = DataFrame(np.random.normal(size=(10, 2)))
|
| 182 |
+
msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
|
| 183 |
+
with pytest.raises(ValueError, match=msg):
|
| 184 |
+
df.corr(method="____")
|
| 185 |
+
|
| 186 |
+
def test_corr_int(self):
|
| 187 |
+
# dtypes other than float64 GH#1761
|
| 188 |
+
df = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
|
| 189 |
+
|
| 190 |
+
df.cov()
|
| 191 |
+
df.corr()
|
| 192 |
+
|
| 193 |
+
@td.skip_if_no_scipy
|
| 194 |
+
@pytest.mark.parametrize(
|
| 195 |
+
"nullable_column", [pd.array([1, 2, 3]), pd.array([1, 2, None])]
|
| 196 |
+
)
|
| 197 |
+
@pytest.mark.parametrize(
|
| 198 |
+
"other_column",
|
| 199 |
+
[pd.array([1, 2, 3]), np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, np.nan])],
|
| 200 |
+
)
|
| 201 |
+
@pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
|
| 202 |
+
def test_corr_nullable_integer(self, nullable_column, other_column, method):
|
| 203 |
+
# https://github.com/pandas-dev/pandas/issues/33803
|
| 204 |
+
data = DataFrame({"a": nullable_column, "b": other_column})
|
| 205 |
+
result = data.corr(method=method)
|
| 206 |
+
expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"])
|
| 207 |
+
tm.assert_frame_equal(result, expected)
|
| 208 |
+
|
| 209 |
+
def test_corr_item_cache(self, using_copy_on_write):
|
| 210 |
+
# Check that corr does not lead to incorrect entries in item_cache
|
| 211 |
+
|
| 212 |
+
df = DataFrame({"A": range(10)})
|
| 213 |
+
df["B"] = range(10)[::-1]
|
| 214 |
+
|
| 215 |
+
ser = df["A"] # populate item_cache
|
| 216 |
+
assert len(df._mgr.arrays) == 2 # i.e. 2 blocks
|
| 217 |
+
|
| 218 |
+
_ = df.corr(numeric_only=True)
|
| 219 |
+
|
| 220 |
+
if using_copy_on_write:
|
| 221 |
+
ser.iloc[0] = 99
|
| 222 |
+
assert df.loc[0, "A"] == 0
|
| 223 |
+
else:
|
| 224 |
+
# Check that the corr didn't break link between ser and df
|
| 225 |
+
ser.values[0] = 99
|
| 226 |
+
assert df.loc[0, "A"] == 99
|
| 227 |
+
assert df["A"] is ser
|
| 228 |
+
assert df.values[0, 0] == 99
|
| 229 |
+
|
| 230 |
+
@pytest.mark.parametrize("length", [2, 20, 200, 2000])
|
| 231 |
+
def test_corr_for_constant_columns(self, length):
|
| 232 |
+
# GH: 37448
|
| 233 |
+
df = DataFrame(length * [[0.4, 0.1]], columns=["A", "B"])
|
| 234 |
+
result = df.corr()
|
| 235 |
+
expected = DataFrame(
|
| 236 |
+
{"A": [np.nan, np.nan], "B": [np.nan, np.nan]}, index=["A", "B"]
|
| 237 |
+
)
|
| 238 |
+
tm.assert_frame_equal(result, expected)
|
| 239 |
+
|
| 240 |
+
def test_calc_corr_small_numbers(self):
|
| 241 |
+
# GH: 37452
|
| 242 |
+
df = DataFrame(
|
| 243 |
+
{"A": [1.0e-20, 2.0e-20, 3.0e-20], "B": [1.0e-20, 2.0e-20, 3.0e-20]}
|
| 244 |
+
)
|
| 245 |
+
result = df.corr()
|
| 246 |
+
expected = DataFrame({"A": [1.0, 1.0], "B": [1.0, 1.0]}, index=["A", "B"])
|
| 247 |
+
tm.assert_frame_equal(result, expected)
|
| 248 |
+
|
| 249 |
+
@td.skip_if_no_scipy
|
| 250 |
+
@pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
|
| 251 |
+
def test_corr_min_periods_greater_than_length(self, method):
|
| 252 |
+
df = DataFrame({"A": [1, 2], "B": [1, 2]})
|
| 253 |
+
result = df.corr(method=method, min_periods=3)
|
| 254 |
+
expected = DataFrame(
|
| 255 |
+
{"A": [np.nan, np.nan], "B": [np.nan, np.nan]}, index=["A", "B"]
|
| 256 |
+
)
|
| 257 |
+
tm.assert_frame_equal(result, expected)
|
| 258 |
+
|
| 259 |
+
@td.skip_if_no_scipy
|
| 260 |
+
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
|
| 261 |
+
@pytest.mark.parametrize("numeric_only", [True, False])
|
| 262 |
+
def test_corr_numeric_only(self, meth, numeric_only):
|
| 263 |
+
# when dtypes of pandas series are different
|
| 264 |
+
# then ndarray will have dtype=object,
|
| 265 |
+
# so it need to be properly handled
|
| 266 |
+
df = DataFrame({"a": [1, 0], "b": [1, 0], "c": ["x", "y"]})
|
| 267 |
+
expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
|
| 268 |
+
if numeric_only:
|
| 269 |
+
result = df.corr(meth, numeric_only=numeric_only)
|
| 270 |
+
tm.assert_frame_equal(result, expected)
|
| 271 |
+
else:
|
| 272 |
+
with pytest.raises(ValueError, match="could not convert string to float"):
|
| 273 |
+
df.corr(meth, numeric_only=numeric_only)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
class TestDataFrameCorrWith:
|
| 277 |
+
def test_corrwith(self, datetime_frame):
|
| 278 |
+
a = datetime_frame
|
| 279 |
+
noise = Series(np.random.randn(len(a)), index=a.index)
|
| 280 |
+
|
| 281 |
+
b = datetime_frame.add(noise, axis=0)
|
| 282 |
+
|
| 283 |
+
# make sure order does not matter
|
| 284 |
+
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
|
| 285 |
+
del b["B"]
|
| 286 |
+
|
| 287 |
+
colcorr = a.corrwith(b, axis=0)
|
| 288 |
+
tm.assert_almost_equal(colcorr["A"], a["A"].corr(b["A"]))
|
| 289 |
+
|
| 290 |
+
rowcorr = a.corrwith(b, axis=1)
|
| 291 |
+
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
|
| 292 |
+
|
| 293 |
+
dropped = a.corrwith(b, axis=0, drop=True)
|
| 294 |
+
tm.assert_almost_equal(dropped["A"], a["A"].corr(b["A"]))
|
| 295 |
+
assert "B" not in dropped
|
| 296 |
+
|
| 297 |
+
dropped = a.corrwith(b, axis=1, drop=True)
|
| 298 |
+
assert a.index[-1] not in dropped.index
|
| 299 |
+
|
| 300 |
+
# non time-series data
|
| 301 |
+
index = ["a", "b", "c", "d", "e"]
|
| 302 |
+
columns = ["one", "two", "three", "four"]
|
| 303 |
+
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
|
| 304 |
+
df2 = DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
|
| 305 |
+
correls = df1.corrwith(df2, axis=1)
|
| 306 |
+
for row in index[:4]:
|
| 307 |
+
tm.assert_almost_equal(correls[row], df1.loc[row].corr(df2.loc[row]))
|
| 308 |
+
|
| 309 |
+
def test_corrwith_with_objects(self):
|
| 310 |
+
df1 = tm.makeTimeDataFrame()
|
| 311 |
+
df2 = tm.makeTimeDataFrame()
|
| 312 |
+
cols = ["A", "B", "C", "D"]
|
| 313 |
+
|
| 314 |
+
df1["obj"] = "foo"
|
| 315 |
+
df2["obj"] = "bar"
|
| 316 |
+
|
| 317 |
+
with pytest.raises(TypeError, match="Could not convert"):
|
| 318 |
+
df1.corrwith(df2)
|
| 319 |
+
result = df1.corrwith(df2, numeric_only=True)
|
| 320 |
+
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
|
| 321 |
+
tm.assert_series_equal(result, expected)
|
| 322 |
+
|
| 323 |
+
with pytest.raises(TypeError, match="unsupported operand type"):
|
| 324 |
+
df1.corrwith(df2, axis=1)
|
| 325 |
+
result = df1.corrwith(df2, axis=1, numeric_only=True)
|
| 326 |
+
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
|
| 327 |
+
tm.assert_series_equal(result, expected)
|
| 328 |
+
|
| 329 |
+
def test_corrwith_series(self, datetime_frame):
|
| 330 |
+
result = datetime_frame.corrwith(datetime_frame["A"])
|
| 331 |
+
expected = datetime_frame.apply(datetime_frame["A"].corr)
|
| 332 |
+
|
| 333 |
+
tm.assert_series_equal(result, expected)
|
| 334 |
+
|
| 335 |
+
def test_corrwith_matches_corrcoef(self):
|
| 336 |
+
df1 = DataFrame(np.arange(10000), columns=["a"])
|
| 337 |
+
df2 = DataFrame(np.arange(10000) ** 2, columns=["a"])
|
| 338 |
+
c1 = df1.corrwith(df2)["a"]
|
| 339 |
+
c2 = np.corrcoef(df1["a"], df2["a"])[0][1]
|
| 340 |
+
|
| 341 |
+
tm.assert_almost_equal(c1, c2)
|
| 342 |
+
assert c1 < 1
|
| 343 |
+
|
| 344 |
+
@pytest.mark.parametrize("numeric_only", [True, False])
|
| 345 |
+
def test_corrwith_mixed_dtypes(self, numeric_only):
|
| 346 |
+
# GH#18570
|
| 347 |
+
df = DataFrame(
|
| 348 |
+
{"a": [1, 4, 3, 2], "b": [4, 6, 7, 3], "c": ["a", "b", "c", "d"]}
|
| 349 |
+
)
|
| 350 |
+
s = Series([0, 6, 7, 3])
|
| 351 |
+
if numeric_only:
|
| 352 |
+
result = df.corrwith(s, numeric_only=numeric_only)
|
| 353 |
+
corrs = [df["a"].corr(s), df["b"].corr(s)]
|
| 354 |
+
expected = Series(data=corrs, index=["a", "b"])
|
| 355 |
+
tm.assert_series_equal(result, expected)
|
| 356 |
+
else:
|
| 357 |
+
with pytest.raises(
|
| 358 |
+
TypeError,
|
| 359 |
+
match=r"unsupported operand type\(s\) for /: 'str' and 'int'",
|
| 360 |
+
):
|
| 361 |
+
df.corrwith(s, numeric_only=numeric_only)
|
| 362 |
+
|
| 363 |
+
def test_corrwith_index_intersection(self):
|
| 364 |
+
df1 = DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
|
| 365 |
+
df2 = DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
|
| 366 |
+
|
| 367 |
+
result = df1.corrwith(df2, drop=True).index.sort_values()
|
| 368 |
+
expected = df1.columns.intersection(df2.columns).sort_values()
|
| 369 |
+
tm.assert_index_equal(result, expected)
|
| 370 |
+
|
| 371 |
+
def test_corrwith_index_union(self):
|
| 372 |
+
df1 = DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
|
| 373 |
+
df2 = DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
|
| 374 |
+
|
| 375 |
+
result = df1.corrwith(df2, drop=False).index.sort_values()
|
| 376 |
+
expected = df1.columns.union(df2.columns).sort_values()
|
| 377 |
+
tm.assert_index_equal(result, expected)
|
| 378 |
+
|
| 379 |
+
def test_corrwith_dup_cols(self):
|
| 380 |
+
# GH#21925
|
| 381 |
+
df1 = DataFrame(np.vstack([np.arange(10)] * 3).T)
|
| 382 |
+
df2 = df1.copy()
|
| 383 |
+
df2 = pd.concat((df2, df2[0]), axis=1)
|
| 384 |
+
|
| 385 |
+
result = df1.corrwith(df2)
|
| 386 |
+
expected = Series(np.ones(4), index=[0, 0, 1, 2])
|
| 387 |
+
tm.assert_series_equal(result, expected)
|
| 388 |
+
|
| 389 |
+
def test_corr_numerical_instabilities(self):
|
| 390 |
+
# GH#45640
|
| 391 |
+
df = DataFrame([[0.2, 0.4], [0.4, 0.2]])
|
| 392 |
+
result = df.corr()
|
| 393 |
+
expected = DataFrame({0: [1.0, -1.0], 1: [-1.0, 1.0]})
|
| 394 |
+
tm.assert_frame_equal(result - 1, expected - 1, atol=1e-17)
|
| 395 |
+
|
| 396 |
+
@td.skip_if_no_scipy
|
| 397 |
+
def test_corrwith_spearman(self):
|
| 398 |
+
# GH#21925
|
| 399 |
+
df = DataFrame(np.random.random(size=(100, 3)))
|
| 400 |
+
result = df.corrwith(df**2, method="spearman")
|
| 401 |
+
expected = Series(np.ones(len(result)))
|
| 402 |
+
tm.assert_series_equal(result, expected)
|
| 403 |
+
|
| 404 |
+
@td.skip_if_no_scipy
|
| 405 |
+
def test_corrwith_kendall(self):
|
| 406 |
+
# GH#21925
|
| 407 |
+
df = DataFrame(np.random.random(size=(100, 3)))
|
| 408 |
+
result = df.corrwith(df**2, method="kendall")
|
| 409 |
+
expected = Series(np.ones(len(result)))
|
| 410 |
+
tm.assert_series_equal(result, expected)
|
| 411 |
+
|
| 412 |
+
@td.skip_if_no_scipy
|
| 413 |
+
def test_corrwith_spearman_with_tied_data(self):
|
| 414 |
+
# GH#48826
|
| 415 |
+
df1 = DataFrame(
|
| 416 |
+
{
|
| 417 |
+
"A": [1, np.nan, 7, 8],
|
| 418 |
+
"B": [False, True, True, False],
|
| 419 |
+
"C": [10, 4, 9, 3],
|
| 420 |
+
}
|
| 421 |
+
)
|
| 422 |
+
df2 = df1[["B", "C"]]
|
| 423 |
+
result = (df1 + 1).corrwith(df2.B, method="spearman")
|
| 424 |
+
expected = Series([0.0, 1.0, 0.0], index=["A", "B", "C"])
|
| 425 |
+
tm.assert_series_equal(result, expected)
|
| 426 |
+
|
| 427 |
+
df_bool = DataFrame(
|
| 428 |
+
{"A": [True, True, False, False], "B": [True, False, False, True]}
|
| 429 |
+
)
|
| 430 |
+
ser_bool = Series([True, True, False, True])
|
| 431 |
+
result = df_bool.corrwith(ser_bool)
|
| 432 |
+
expected = Series([0.57735, 0.57735], index=["A", "B"])
|
| 433 |
+
tm.assert_series_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_diff.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import (
|
| 6 |
+
DataFrame,
|
| 7 |
+
Series,
|
| 8 |
+
Timestamp,
|
| 9 |
+
date_range,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestDataFrameDiff:
|
| 15 |
+
def test_diff_requires_integer(self):
|
| 16 |
+
df = DataFrame(np.random.randn(2, 2))
|
| 17 |
+
with pytest.raises(ValueError, match="periods must be an integer"):
|
| 18 |
+
df.diff(1.5)
|
| 19 |
+
|
| 20 |
+
# GH#44572 np.int64 is accepted
|
| 21 |
+
@pytest.mark.parametrize("num", [1, np.int64(1)])
|
| 22 |
+
def test_diff(self, datetime_frame, num):
|
| 23 |
+
df = datetime_frame
|
| 24 |
+
the_diff = df.diff(num)
|
| 25 |
+
|
| 26 |
+
expected = df["A"] - df["A"].shift(num)
|
| 27 |
+
tm.assert_series_equal(the_diff["A"], expected)
|
| 28 |
+
|
| 29 |
+
def test_diff_int_dtype(self):
|
| 30 |
+
# int dtype
|
| 31 |
+
a = 10_000_000_000_000_000
|
| 32 |
+
b = a + 1
|
| 33 |
+
ser = Series([a, b])
|
| 34 |
+
|
| 35 |
+
rs = DataFrame({"s": ser}).diff()
|
| 36 |
+
assert rs.s[1] == 1
|
| 37 |
+
|
| 38 |
+
def test_diff_mixed_numeric(self, datetime_frame):
|
| 39 |
+
# mixed numeric
|
| 40 |
+
tf = datetime_frame.astype("float32")
|
| 41 |
+
the_diff = tf.diff(1)
|
| 42 |
+
tm.assert_series_equal(the_diff["A"], tf["A"] - tf["A"].shift(1))
|
| 43 |
+
|
| 44 |
+
def test_diff_axis1_nonconsolidated(self):
|
| 45 |
+
# GH#10907
|
| 46 |
+
df = DataFrame({"y": Series([2]), "z": Series([3])})
|
| 47 |
+
df.insert(0, "x", 1)
|
| 48 |
+
result = df.diff(axis=1)
|
| 49 |
+
expected = DataFrame({"x": np.nan, "y": Series(1), "z": Series(1)})
|
| 50 |
+
tm.assert_frame_equal(result, expected)
|
| 51 |
+
|
| 52 |
+
def test_diff_timedelta64_with_nat(self):
|
| 53 |
+
# GH#32441
|
| 54 |
+
arr = np.arange(6).reshape(3, 2).astype("timedelta64[ns]")
|
| 55 |
+
arr[:, 0] = np.timedelta64("NaT", "ns")
|
| 56 |
+
|
| 57 |
+
df = DataFrame(arr)
|
| 58 |
+
result = df.diff(1, axis=0)
|
| 59 |
+
|
| 60 |
+
expected = DataFrame({0: df[0], 1: [pd.NaT, pd.Timedelta(2), pd.Timedelta(2)]})
|
| 61 |
+
tm.assert_equal(result, expected)
|
| 62 |
+
|
| 63 |
+
result = df.diff(0)
|
| 64 |
+
expected = df - df
|
| 65 |
+
assert expected[0].isna().all()
|
| 66 |
+
tm.assert_equal(result, expected)
|
| 67 |
+
|
| 68 |
+
result = df.diff(-1, axis=1)
|
| 69 |
+
expected = df * np.nan
|
| 70 |
+
tm.assert_equal(result, expected)
|
| 71 |
+
|
| 72 |
+
@pytest.mark.parametrize("tz", [None, "UTC"])
|
| 73 |
+
def test_diff_datetime_axis0_with_nat(self, tz):
|
| 74 |
+
# GH#32441
|
| 75 |
+
dti = pd.DatetimeIndex(["NaT", "2019-01-01", "2019-01-02"], tz=tz)
|
| 76 |
+
ser = Series(dti)
|
| 77 |
+
|
| 78 |
+
df = ser.to_frame()
|
| 79 |
+
|
| 80 |
+
result = df.diff()
|
| 81 |
+
ex_index = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta(days=1)])
|
| 82 |
+
expected = Series(ex_index).to_frame()
|
| 83 |
+
tm.assert_frame_equal(result, expected)
|
| 84 |
+
|
| 85 |
+
@pytest.mark.parametrize("tz", [None, "UTC"])
|
| 86 |
+
def test_diff_datetime_with_nat_zero_periods(self, tz):
|
| 87 |
+
# diff on NaT values should give NaT, not timedelta64(0)
|
| 88 |
+
dti = date_range("2016-01-01", periods=4, tz=tz)
|
| 89 |
+
ser = Series(dti)
|
| 90 |
+
df = ser.to_frame()
|
| 91 |
+
|
| 92 |
+
df[1] = ser.copy()
|
| 93 |
+
|
| 94 |
+
df.iloc[:, 0] = pd.NaT
|
| 95 |
+
|
| 96 |
+
expected = df - df
|
| 97 |
+
assert expected[0].isna().all()
|
| 98 |
+
|
| 99 |
+
result = df.diff(0, axis=0)
|
| 100 |
+
tm.assert_frame_equal(result, expected)
|
| 101 |
+
|
| 102 |
+
result = df.diff(0, axis=1)
|
| 103 |
+
tm.assert_frame_equal(result, expected)
|
| 104 |
+
|
| 105 |
+
@pytest.mark.parametrize("tz", [None, "UTC"])
|
| 106 |
+
def test_diff_datetime_axis0(self, tz):
|
| 107 |
+
# GH#18578
|
| 108 |
+
df = DataFrame(
|
| 109 |
+
{
|
| 110 |
+
0: date_range("2010", freq="D", periods=2, tz=tz),
|
| 111 |
+
1: date_range("2010", freq="D", periods=2, tz=tz),
|
| 112 |
+
}
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
result = df.diff(axis=0)
|
| 116 |
+
expected = DataFrame(
|
| 117 |
+
{
|
| 118 |
+
0: pd.TimedeltaIndex(["NaT", "1 days"]),
|
| 119 |
+
1: pd.TimedeltaIndex(["NaT", "1 days"]),
|
| 120 |
+
}
|
| 121 |
+
)
|
| 122 |
+
tm.assert_frame_equal(result, expected)
|
| 123 |
+
|
| 124 |
+
@pytest.mark.parametrize("tz", [None, "UTC"])
|
| 125 |
+
def test_diff_datetime_axis1(self, tz):
|
| 126 |
+
# GH#18578
|
| 127 |
+
df = DataFrame(
|
| 128 |
+
{
|
| 129 |
+
0: date_range("2010", freq="D", periods=2, tz=tz),
|
| 130 |
+
1: date_range("2010", freq="D", periods=2, tz=tz),
|
| 131 |
+
}
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
result = df.diff(axis=1)
|
| 135 |
+
expected = DataFrame(
|
| 136 |
+
{
|
| 137 |
+
0: pd.TimedeltaIndex(["NaT", "NaT"]),
|
| 138 |
+
1: pd.TimedeltaIndex(["0 days", "0 days"]),
|
| 139 |
+
}
|
| 140 |
+
)
|
| 141 |
+
tm.assert_frame_equal(result, expected)
|
| 142 |
+
|
| 143 |
+
def test_diff_timedelta(self):
|
| 144 |
+
# GH#4533
|
| 145 |
+
df = DataFrame(
|
| 146 |
+
{
|
| 147 |
+
"time": [Timestamp("20130101 9:01"), Timestamp("20130101 9:02")],
|
| 148 |
+
"value": [1.0, 2.0],
|
| 149 |
+
}
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
res = df.diff()
|
| 153 |
+
exp = DataFrame(
|
| 154 |
+
[[pd.NaT, np.nan], [pd.Timedelta("00:01:00"), 1]], columns=["time", "value"]
|
| 155 |
+
)
|
| 156 |
+
tm.assert_frame_equal(res, exp)
|
| 157 |
+
|
| 158 |
+
def test_diff_mixed_dtype(self):
|
| 159 |
+
df = DataFrame(np.random.randn(5, 3))
|
| 160 |
+
df["A"] = np.array([1, 2, 3, 4, 5], dtype=object)
|
| 161 |
+
|
| 162 |
+
result = df.diff()
|
| 163 |
+
assert result[0].dtype == np.float64
|
| 164 |
+
|
| 165 |
+
def test_diff_neg_n(self, datetime_frame):
|
| 166 |
+
rs = datetime_frame.diff(-1)
|
| 167 |
+
xp = datetime_frame - datetime_frame.shift(-1)
|
| 168 |
+
tm.assert_frame_equal(rs, xp)
|
| 169 |
+
|
| 170 |
+
def test_diff_float_n(self, datetime_frame):
|
| 171 |
+
rs = datetime_frame.diff(1.0)
|
| 172 |
+
xp = datetime_frame.diff(1)
|
| 173 |
+
tm.assert_frame_equal(rs, xp)
|
| 174 |
+
|
| 175 |
+
def test_diff_axis(self):
|
| 176 |
+
# GH#9727
|
| 177 |
+
df = DataFrame([[1.0, 2.0], [3.0, 4.0]])
|
| 178 |
+
tm.assert_frame_equal(
|
| 179 |
+
df.diff(axis=1), DataFrame([[np.nan, 1.0], [np.nan, 1.0]])
|
| 180 |
+
)
|
| 181 |
+
tm.assert_frame_equal(
|
| 182 |
+
df.diff(axis=0), DataFrame([[np.nan, np.nan], [2.0, 2.0]])
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
def test_diff_period(self):
|
| 186 |
+
# GH#32995 Don't pass an incorrect axis
|
| 187 |
+
pi = date_range("2016-01-01", periods=3).to_period("D")
|
| 188 |
+
df = DataFrame({"A": pi})
|
| 189 |
+
|
| 190 |
+
result = df.diff(1, axis=1)
|
| 191 |
+
|
| 192 |
+
expected = (df - pd.NaT).astype(object)
|
| 193 |
+
tm.assert_frame_equal(result, expected)
|
| 194 |
+
|
| 195 |
+
def test_diff_axis1_mixed_dtypes(self):
|
| 196 |
+
# GH#32995 operate column-wise when we have mixed dtypes and axis=1
|
| 197 |
+
df = DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)})
|
| 198 |
+
|
| 199 |
+
expected = DataFrame({"A": [np.nan, np.nan, np.nan], "B": df["B"] / 2})
|
| 200 |
+
|
| 201 |
+
result = df.diff(axis=1)
|
| 202 |
+
tm.assert_frame_equal(result, expected)
|
| 203 |
+
|
| 204 |
+
# GH#21437 mixed-float-dtypes
|
| 205 |
+
df = DataFrame(
|
| 206 |
+
{"a": np.arange(3, dtype="float32"), "b": np.arange(3, dtype="float64")}
|
| 207 |
+
)
|
| 208 |
+
result = df.diff(axis=1)
|
| 209 |
+
expected = DataFrame({"a": df["a"] * np.nan, "b": df["b"] * 0})
|
| 210 |
+
tm.assert_frame_equal(result, expected)
|
| 211 |
+
|
| 212 |
+
def test_diff_axis1_mixed_dtypes_large_periods(self):
|
| 213 |
+
# GH#32995 operate column-wise when we have mixed dtypes and axis=1
|
| 214 |
+
df = DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)})
|
| 215 |
+
|
| 216 |
+
expected = df * np.nan
|
| 217 |
+
|
| 218 |
+
result = df.diff(axis=1, periods=3)
|
| 219 |
+
tm.assert_frame_equal(result, expected)
|
| 220 |
+
|
| 221 |
+
def test_diff_axis1_mixed_dtypes_negative_periods(self):
|
| 222 |
+
# GH#32995 operate column-wise when we have mixed dtypes and axis=1
|
| 223 |
+
df = DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)})
|
| 224 |
+
|
| 225 |
+
expected = DataFrame({"A": -1.0 * df["A"], "B": df["B"] * np.nan})
|
| 226 |
+
|
| 227 |
+
result = df.diff(axis=1, periods=-1)
|
| 228 |
+
tm.assert_frame_equal(result, expected)
|
| 229 |
+
|
| 230 |
+
def test_diff_sparse(self):
|
| 231 |
+
# GH#28813 .diff() should work for sparse dataframes as well
|
| 232 |
+
sparse_df = DataFrame([[0, 1], [1, 0]], dtype="Sparse[int]")
|
| 233 |
+
|
| 234 |
+
result = sparse_df.diff()
|
| 235 |
+
expected = DataFrame(
|
| 236 |
+
[[np.nan, np.nan], [1.0, -1.0]], dtype=pd.SparseDtype("float", 0.0)
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
tm.assert_frame_equal(result, expected)
|
| 240 |
+
|
| 241 |
+
@pytest.mark.parametrize(
|
| 242 |
+
"axis,expected",
|
| 243 |
+
[
|
| 244 |
+
(
|
| 245 |
+
0,
|
| 246 |
+
DataFrame(
|
| 247 |
+
{
|
| 248 |
+
"a": [np.nan, 0, 1, 0, np.nan, np.nan, np.nan, 0],
|
| 249 |
+
"b": [np.nan, 1, np.nan, np.nan, -2, 1, np.nan, np.nan],
|
| 250 |
+
"c": np.repeat(np.nan, 8),
|
| 251 |
+
"d": [np.nan, 3, 5, 7, 9, 11, 13, 15],
|
| 252 |
+
},
|
| 253 |
+
dtype="Int64",
|
| 254 |
+
),
|
| 255 |
+
),
|
| 256 |
+
(
|
| 257 |
+
1,
|
| 258 |
+
DataFrame(
|
| 259 |
+
{
|
| 260 |
+
"a": np.repeat(np.nan, 8),
|
| 261 |
+
"b": [0, 1, np.nan, 1, np.nan, np.nan, np.nan, 0],
|
| 262 |
+
"c": np.repeat(np.nan, 8),
|
| 263 |
+
"d": np.repeat(np.nan, 8),
|
| 264 |
+
},
|
| 265 |
+
dtype="Int64",
|
| 266 |
+
),
|
| 267 |
+
),
|
| 268 |
+
],
|
| 269 |
+
)
|
| 270 |
+
def test_diff_integer_na(self, axis, expected):
|
| 271 |
+
# GH#24171 IntegerNA Support for DataFrame.diff()
|
| 272 |
+
df = DataFrame(
|
| 273 |
+
{
|
| 274 |
+
"a": np.repeat([0, 1, np.nan, 2], 2),
|
| 275 |
+
"b": np.tile([0, 1, np.nan, 2], 2),
|
| 276 |
+
"c": np.repeat(np.nan, 8),
|
| 277 |
+
"d": np.arange(1, 9) ** 2,
|
| 278 |
+
},
|
| 279 |
+
dtype="Int64",
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
# Test case for default behaviour of diff
|
| 283 |
+
result = df.diff(axis=axis)
|
| 284 |
+
tm.assert_frame_equal(result, expected)
|
| 285 |
+
|
| 286 |
+
def test_diff_readonly(self):
|
| 287 |
+
# https://github.com/pandas-dev/pandas/issues/35559
|
| 288 |
+
arr = np.random.randn(5, 2)
|
| 289 |
+
arr.flags.writeable = False
|
| 290 |
+
df = DataFrame(arr)
|
| 291 |
+
result = df.diff()
|
| 292 |
+
expected = DataFrame(np.array(df)).diff()
|
| 293 |
+
tm.assert_frame_equal(result, expected)
|
| 294 |
+
|
| 295 |
+
def test_diff_all_int_dtype(self, any_int_numpy_dtype):
|
| 296 |
+
# GH 14773
|
| 297 |
+
df = DataFrame(range(5))
|
| 298 |
+
df = df.astype(any_int_numpy_dtype)
|
| 299 |
+
result = df.diff()
|
| 300 |
+
expected_dtype = (
|
| 301 |
+
"float32" if any_int_numpy_dtype in ("int8", "int16") else "float64"
|
| 302 |
+
)
|
| 303 |
+
expected = DataFrame([np.nan, 1.0, 1.0, 1.0, 1.0], dtype=expected_dtype)
|
| 304 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_dot.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
DataFrame,
|
| 6 |
+
Series,
|
| 7 |
+
)
|
| 8 |
+
import pandas._testing as tm
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class DotSharedTests:
|
| 12 |
+
@pytest.fixture
|
| 13 |
+
def obj(self):
|
| 14 |
+
raise NotImplementedError
|
| 15 |
+
|
| 16 |
+
@pytest.fixture
|
| 17 |
+
def other(self) -> DataFrame:
|
| 18 |
+
"""
|
| 19 |
+
other is a DataFrame that is indexed so that obj.dot(other) is valid
|
| 20 |
+
"""
|
| 21 |
+
raise NotImplementedError
|
| 22 |
+
|
| 23 |
+
@pytest.fixture
|
| 24 |
+
def expected(self, obj, other) -> DataFrame:
|
| 25 |
+
"""
|
| 26 |
+
The expected result of obj.dot(other)
|
| 27 |
+
"""
|
| 28 |
+
raise NotImplementedError
|
| 29 |
+
|
| 30 |
+
@classmethod
|
| 31 |
+
def reduced_dim_assert(cls, result, expected):
|
| 32 |
+
"""
|
| 33 |
+
Assertion about results with 1 fewer dimension that self.obj
|
| 34 |
+
"""
|
| 35 |
+
raise NotImplementedError
|
| 36 |
+
|
| 37 |
+
def test_dot_equiv_values_dot(self, obj, other, expected):
|
| 38 |
+
# `expected` is constructed from obj.values.dot(other.values)
|
| 39 |
+
result = obj.dot(other)
|
| 40 |
+
tm.assert_equal(result, expected)
|
| 41 |
+
|
| 42 |
+
def test_dot_2d_ndarray(self, obj, other, expected):
|
| 43 |
+
# Check ndarray argument; in this case we get matching values,
|
| 44 |
+
# but index/columns may not match
|
| 45 |
+
result = obj.dot(other.values)
|
| 46 |
+
assert np.all(result == expected.values)
|
| 47 |
+
|
| 48 |
+
def test_dot_1d_ndarray(self, obj, expected):
|
| 49 |
+
# can pass correct-length array
|
| 50 |
+
row = obj.iloc[0] if obj.ndim == 2 else obj
|
| 51 |
+
|
| 52 |
+
result = obj.dot(row.values)
|
| 53 |
+
expected = obj.dot(row)
|
| 54 |
+
self.reduced_dim_assert(result, expected)
|
| 55 |
+
|
| 56 |
+
def test_dot_series(self, obj, other, expected):
|
| 57 |
+
# Check series argument
|
| 58 |
+
result = obj.dot(other["1"])
|
| 59 |
+
self.reduced_dim_assert(result, expected["1"])
|
| 60 |
+
|
| 61 |
+
def test_dot_series_alignment(self, obj, other, expected):
|
| 62 |
+
result = obj.dot(other.iloc[::-1]["1"])
|
| 63 |
+
self.reduced_dim_assert(result, expected["1"])
|
| 64 |
+
|
| 65 |
+
def test_dot_aligns(self, obj, other, expected):
|
| 66 |
+
# Check index alignment
|
| 67 |
+
other2 = other.iloc[::-1]
|
| 68 |
+
result = obj.dot(other2)
|
| 69 |
+
tm.assert_equal(result, expected)
|
| 70 |
+
|
| 71 |
+
def test_dot_shape_mismatch(self, obj):
|
| 72 |
+
msg = "Dot product shape mismatch"
|
| 73 |
+
# exception raised is of type Exception
|
| 74 |
+
with pytest.raises(Exception, match=msg):
|
| 75 |
+
obj.dot(obj.values[:3])
|
| 76 |
+
|
| 77 |
+
def test_dot_misaligned(self, obj, other):
|
| 78 |
+
msg = "matrices are not aligned"
|
| 79 |
+
with pytest.raises(ValueError, match=msg):
|
| 80 |
+
obj.dot(other.T)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class TestSeriesDot(DotSharedTests):
|
| 84 |
+
@pytest.fixture
|
| 85 |
+
def obj(self):
|
| 86 |
+
return Series(np.random.randn(4), index=["p", "q", "r", "s"])
|
| 87 |
+
|
| 88 |
+
@pytest.fixture
|
| 89 |
+
def other(self):
|
| 90 |
+
return DataFrame(
|
| 91 |
+
np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"]
|
| 92 |
+
).T
|
| 93 |
+
|
| 94 |
+
@pytest.fixture
|
| 95 |
+
def expected(self, obj, other):
|
| 96 |
+
return Series(np.dot(obj.values, other.values), index=other.columns)
|
| 97 |
+
|
| 98 |
+
@classmethod
|
| 99 |
+
def reduced_dim_assert(cls, result, expected):
|
| 100 |
+
"""
|
| 101 |
+
Assertion about results with 1 fewer dimension that self.obj
|
| 102 |
+
"""
|
| 103 |
+
tm.assert_almost_equal(result, expected)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class TestDataFrameDot(DotSharedTests):
|
| 107 |
+
@pytest.fixture
|
| 108 |
+
def obj(self):
|
| 109 |
+
return DataFrame(
|
| 110 |
+
np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"]
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
@pytest.fixture
|
| 114 |
+
def other(self):
|
| 115 |
+
return DataFrame(
|
| 116 |
+
np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["1", "2"]
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
@pytest.fixture
|
| 120 |
+
def expected(self, obj, other):
|
| 121 |
+
return DataFrame(
|
| 122 |
+
np.dot(obj.values, other.values), index=obj.index, columns=other.columns
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
@classmethod
|
| 126 |
+
def reduced_dim_assert(cls, result, expected):
|
| 127 |
+
"""
|
| 128 |
+
Assertion about results with 1 fewer dimension that self.obj
|
| 129 |
+
"""
|
| 130 |
+
tm.assert_series_equal(result, expected, check_names=False)
|
| 131 |
+
assert result.name is None
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_drop.py
ADDED
|
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from pandas.errors import PerformanceWarning
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from pandas import (
|
| 10 |
+
DataFrame,
|
| 11 |
+
DatetimeIndex,
|
| 12 |
+
Index,
|
| 13 |
+
MultiIndex,
|
| 14 |
+
Series,
|
| 15 |
+
Timestamp,
|
| 16 |
+
)
|
| 17 |
+
import pandas._testing as tm
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@pytest.mark.parametrize(
|
| 21 |
+
"msg,labels,level",
|
| 22 |
+
[
|
| 23 |
+
(r"labels \[4\] not found in level", 4, "a"),
|
| 24 |
+
(r"labels \[7\] not found in level", 7, "b"),
|
| 25 |
+
],
|
| 26 |
+
)
|
| 27 |
+
def test_drop_raise_exception_if_labels_not_in_level(msg, labels, level):
|
| 28 |
+
# GH 8594
|
| 29 |
+
mi = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
|
| 30 |
+
s = Series([10, 20, 30], index=mi)
|
| 31 |
+
df = DataFrame([10, 20, 30], index=mi)
|
| 32 |
+
|
| 33 |
+
with pytest.raises(KeyError, match=msg):
|
| 34 |
+
s.drop(labels, level=level)
|
| 35 |
+
with pytest.raises(KeyError, match=msg):
|
| 36 |
+
df.drop(labels, level=level)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@pytest.mark.parametrize("labels,level", [(4, "a"), (7, "b")])
|
| 40 |
+
def test_drop_errors_ignore(labels, level):
|
| 41 |
+
# GH 8594
|
| 42 |
+
mi = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
|
| 43 |
+
s = Series([10, 20, 30], index=mi)
|
| 44 |
+
df = DataFrame([10, 20, 30], index=mi)
|
| 45 |
+
|
| 46 |
+
expected_s = s.drop(labels, level=level, errors="ignore")
|
| 47 |
+
tm.assert_series_equal(s, expected_s)
|
| 48 |
+
|
| 49 |
+
expected_df = df.drop(labels, level=level, errors="ignore")
|
| 50 |
+
tm.assert_frame_equal(df, expected_df)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def test_drop_with_non_unique_datetime_index_and_invalid_keys():
|
| 54 |
+
# GH 30399
|
| 55 |
+
|
| 56 |
+
# define dataframe with unique datetime index
|
| 57 |
+
df = DataFrame(
|
| 58 |
+
np.random.randn(5, 3),
|
| 59 |
+
columns=["a", "b", "c"],
|
| 60 |
+
index=pd.date_range("2012", freq="H", periods=5),
|
| 61 |
+
)
|
| 62 |
+
# create dataframe with non-unique datetime index
|
| 63 |
+
df = df.iloc[[0, 2, 2, 3]].copy()
|
| 64 |
+
|
| 65 |
+
with pytest.raises(KeyError, match="not found in axis"):
|
| 66 |
+
df.drop(["a", "b"]) # Dropping with labels not exist in the index
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class TestDataFrameDrop:
|
| 70 |
+
def test_drop_names(self):
|
| 71 |
+
df = DataFrame(
|
| 72 |
+
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
|
| 73 |
+
index=["a", "b", "c"],
|
| 74 |
+
columns=["d", "e", "f"],
|
| 75 |
+
)
|
| 76 |
+
df.index.name, df.columns.name = "first", "second"
|
| 77 |
+
df_dropped_b = df.drop("b")
|
| 78 |
+
df_dropped_e = df.drop("e", axis=1)
|
| 79 |
+
df_inplace_b, df_inplace_e = df.copy(), df.copy()
|
| 80 |
+
return_value = df_inplace_b.drop("b", inplace=True)
|
| 81 |
+
assert return_value is None
|
| 82 |
+
return_value = df_inplace_e.drop("e", axis=1, inplace=True)
|
| 83 |
+
assert return_value is None
|
| 84 |
+
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
|
| 85 |
+
assert obj.index.name == "first"
|
| 86 |
+
assert obj.columns.name == "second"
|
| 87 |
+
assert list(df.columns) == ["d", "e", "f"]
|
| 88 |
+
|
| 89 |
+
msg = r"\['g'\] not found in axis"
|
| 90 |
+
with pytest.raises(KeyError, match=msg):
|
| 91 |
+
df.drop(["g"])
|
| 92 |
+
with pytest.raises(KeyError, match=msg):
|
| 93 |
+
df.drop(["g"], axis=1)
|
| 94 |
+
|
| 95 |
+
# errors = 'ignore'
|
| 96 |
+
dropped = df.drop(["g"], errors="ignore")
|
| 97 |
+
expected = Index(["a", "b", "c"], name="first")
|
| 98 |
+
tm.assert_index_equal(dropped.index, expected)
|
| 99 |
+
|
| 100 |
+
dropped = df.drop(["b", "g"], errors="ignore")
|
| 101 |
+
expected = Index(["a", "c"], name="first")
|
| 102 |
+
tm.assert_index_equal(dropped.index, expected)
|
| 103 |
+
|
| 104 |
+
dropped = df.drop(["g"], axis=1, errors="ignore")
|
| 105 |
+
expected = Index(["d", "e", "f"], name="second")
|
| 106 |
+
tm.assert_index_equal(dropped.columns, expected)
|
| 107 |
+
|
| 108 |
+
dropped = df.drop(["d", "g"], axis=1, errors="ignore")
|
| 109 |
+
expected = Index(["e", "f"], name="second")
|
| 110 |
+
tm.assert_index_equal(dropped.columns, expected)
|
| 111 |
+
|
| 112 |
+
# GH 16398
|
| 113 |
+
dropped = df.drop([], errors="ignore")
|
| 114 |
+
expected = Index(["a", "b", "c"], name="first")
|
| 115 |
+
tm.assert_index_equal(dropped.index, expected)
|
| 116 |
+
|
| 117 |
+
def test_drop(self):
|
| 118 |
+
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
|
| 119 |
+
tm.assert_frame_equal(simple.drop("A", axis=1), simple[["B"]])
|
| 120 |
+
tm.assert_frame_equal(simple.drop(["A", "B"], axis="columns"), simple[[]])
|
| 121 |
+
tm.assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
|
| 122 |
+
tm.assert_frame_equal(simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
|
| 123 |
+
|
| 124 |
+
with pytest.raises(KeyError, match=r"\[5\] not found in axis"):
|
| 125 |
+
simple.drop(5)
|
| 126 |
+
with pytest.raises(KeyError, match=r"\['C'\] not found in axis"):
|
| 127 |
+
simple.drop("C", axis=1)
|
| 128 |
+
with pytest.raises(KeyError, match=r"\[5\] not found in axis"):
|
| 129 |
+
simple.drop([1, 5])
|
| 130 |
+
with pytest.raises(KeyError, match=r"\['C'\] not found in axis"):
|
| 131 |
+
simple.drop(["A", "C"], axis=1)
|
| 132 |
+
|
| 133 |
+
# GH 42881
|
| 134 |
+
with pytest.raises(KeyError, match=r"\['C', 'D', 'F'\] not found in axis"):
|
| 135 |
+
simple.drop(["C", "D", "F"], axis=1)
|
| 136 |
+
|
| 137 |
+
# errors = 'ignore'
|
| 138 |
+
tm.assert_frame_equal(simple.drop(5, errors="ignore"), simple)
|
| 139 |
+
tm.assert_frame_equal(
|
| 140 |
+
simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :]
|
| 141 |
+
)
|
| 142 |
+
tm.assert_frame_equal(simple.drop("C", axis=1, errors="ignore"), simple)
|
| 143 |
+
tm.assert_frame_equal(
|
| 144 |
+
simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]]
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
# non-unique - wheee!
|
| 148 |
+
nu_df = DataFrame(
|
| 149 |
+
list(zip(range(3), range(-3, 1), list("abc"))), columns=["a", "a", "b"]
|
| 150 |
+
)
|
| 151 |
+
tm.assert_frame_equal(nu_df.drop("a", axis=1), nu_df[["b"]])
|
| 152 |
+
tm.assert_frame_equal(nu_df.drop("b", axis="columns"), nu_df["a"])
|
| 153 |
+
tm.assert_frame_equal(nu_df.drop([]), nu_df) # GH 16398
|
| 154 |
+
|
| 155 |
+
nu_df = nu_df.set_index(Index(["X", "Y", "X"]))
|
| 156 |
+
nu_df.columns = list("abc")
|
| 157 |
+
tm.assert_frame_equal(nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
|
| 158 |
+
tm.assert_frame_equal(nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
|
| 159 |
+
|
| 160 |
+
# inplace cache issue
|
| 161 |
+
# GH#5628
|
| 162 |
+
df = DataFrame(np.random.randn(10, 3), columns=list("abc"))
|
| 163 |
+
expected = df[~(df.b > 0)]
|
| 164 |
+
return_value = df.drop(labels=df[df.b > 0].index, inplace=True)
|
| 165 |
+
assert return_value is None
|
| 166 |
+
tm.assert_frame_equal(df, expected)
|
| 167 |
+
|
| 168 |
+
def test_drop_multiindex_not_lexsorted(self):
|
| 169 |
+
# GH#11640
|
| 170 |
+
|
| 171 |
+
# define the lexsorted version
|
| 172 |
+
lexsorted_mi = MultiIndex.from_tuples(
|
| 173 |
+
[("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"]
|
| 174 |
+
)
|
| 175 |
+
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
|
| 176 |
+
assert lexsorted_df.columns._is_lexsorted()
|
| 177 |
+
|
| 178 |
+
# define the non-lexsorted version
|
| 179 |
+
not_lexsorted_df = DataFrame(
|
| 180 |
+
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
|
| 181 |
+
)
|
| 182 |
+
not_lexsorted_df = not_lexsorted_df.pivot_table(
|
| 183 |
+
index="a", columns=["b", "c"], values="d"
|
| 184 |
+
)
|
| 185 |
+
not_lexsorted_df = not_lexsorted_df.reset_index()
|
| 186 |
+
assert not not_lexsorted_df.columns._is_lexsorted()
|
| 187 |
+
|
| 188 |
+
# compare the results
|
| 189 |
+
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
|
| 190 |
+
|
| 191 |
+
expected = lexsorted_df.drop("a", axis=1)
|
| 192 |
+
with tm.assert_produces_warning(PerformanceWarning):
|
| 193 |
+
result = not_lexsorted_df.drop("a", axis=1)
|
| 194 |
+
|
| 195 |
+
tm.assert_frame_equal(result, expected)
|
| 196 |
+
|
| 197 |
+
def test_drop_api_equivalence(self):
|
| 198 |
+
# equivalence of the labels/axis and index/columns API's (GH#12392)
|
| 199 |
+
df = DataFrame(
|
| 200 |
+
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
|
| 201 |
+
index=["a", "b", "c"],
|
| 202 |
+
columns=["d", "e", "f"],
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
res1 = df.drop("a")
|
| 206 |
+
res2 = df.drop(index="a")
|
| 207 |
+
tm.assert_frame_equal(res1, res2)
|
| 208 |
+
|
| 209 |
+
res1 = df.drop("d", axis=1)
|
| 210 |
+
res2 = df.drop(columns="d")
|
| 211 |
+
tm.assert_frame_equal(res1, res2)
|
| 212 |
+
|
| 213 |
+
res1 = df.drop(labels="e", axis=1)
|
| 214 |
+
res2 = df.drop(columns="e")
|
| 215 |
+
tm.assert_frame_equal(res1, res2)
|
| 216 |
+
|
| 217 |
+
res1 = df.drop(["a"], axis=0)
|
| 218 |
+
res2 = df.drop(index=["a"])
|
| 219 |
+
tm.assert_frame_equal(res1, res2)
|
| 220 |
+
|
| 221 |
+
res1 = df.drop(["a"], axis=0).drop(["d"], axis=1)
|
| 222 |
+
res2 = df.drop(index=["a"], columns=["d"])
|
| 223 |
+
tm.assert_frame_equal(res1, res2)
|
| 224 |
+
|
| 225 |
+
msg = "Cannot specify both 'labels' and 'index'/'columns'"
|
| 226 |
+
with pytest.raises(ValueError, match=msg):
|
| 227 |
+
df.drop(labels="a", index="b")
|
| 228 |
+
|
| 229 |
+
with pytest.raises(ValueError, match=msg):
|
| 230 |
+
df.drop(labels="a", columns="b")
|
| 231 |
+
|
| 232 |
+
msg = "Need to specify at least one of 'labels', 'index' or 'columns'"
|
| 233 |
+
with pytest.raises(ValueError, match=msg):
|
| 234 |
+
df.drop(axis=1)
|
| 235 |
+
|
| 236 |
+
data = [[1, 2, 3], [1, 2, 3]]
|
| 237 |
+
|
| 238 |
+
@pytest.mark.parametrize(
|
| 239 |
+
"actual",
|
| 240 |
+
[
|
| 241 |
+
DataFrame(data=data, index=["a", "a"]),
|
| 242 |
+
DataFrame(data=data, index=["a", "b"]),
|
| 243 |
+
DataFrame(data=data, index=["a", "b"]).set_index([0, 1]),
|
| 244 |
+
DataFrame(data=data, index=["a", "a"]).set_index([0, 1]),
|
| 245 |
+
],
|
| 246 |
+
)
|
| 247 |
+
def test_raise_on_drop_duplicate_index(self, actual):
|
| 248 |
+
# GH#19186
|
| 249 |
+
level = 0 if isinstance(actual.index, MultiIndex) else None
|
| 250 |
+
msg = re.escape("\"['c'] not found in axis\"")
|
| 251 |
+
with pytest.raises(KeyError, match=msg):
|
| 252 |
+
actual.drop("c", level=level, axis=0)
|
| 253 |
+
with pytest.raises(KeyError, match=msg):
|
| 254 |
+
actual.T.drop("c", level=level, axis=1)
|
| 255 |
+
expected_no_err = actual.drop("c", axis=0, level=level, errors="ignore")
|
| 256 |
+
tm.assert_frame_equal(expected_no_err, actual)
|
| 257 |
+
expected_no_err = actual.T.drop("c", axis=1, level=level, errors="ignore")
|
| 258 |
+
tm.assert_frame_equal(expected_no_err.T, actual)
|
| 259 |
+
|
| 260 |
+
@pytest.mark.parametrize("index", [[1, 2, 3], [1, 1, 2]])
|
| 261 |
+
@pytest.mark.parametrize("drop_labels", [[], [1], [2]])
|
| 262 |
+
def test_drop_empty_list(self, index, drop_labels):
|
| 263 |
+
# GH#21494
|
| 264 |
+
expected_index = [i for i in index if i not in drop_labels]
|
| 265 |
+
frame = DataFrame(index=index).drop(drop_labels)
|
| 266 |
+
tm.assert_frame_equal(frame, DataFrame(index=expected_index))
|
| 267 |
+
|
| 268 |
+
@pytest.mark.parametrize("index", [[1, 2, 3], [1, 2, 2]])
|
| 269 |
+
@pytest.mark.parametrize("drop_labels", [[1, 4], [4, 5]])
|
| 270 |
+
def test_drop_non_empty_list(self, index, drop_labels):
|
| 271 |
+
# GH# 21494
|
| 272 |
+
with pytest.raises(KeyError, match="not found in axis"):
|
| 273 |
+
DataFrame(index=index).drop(drop_labels)
|
| 274 |
+
|
| 275 |
+
@pytest.mark.parametrize(
|
| 276 |
+
"empty_listlike",
|
| 277 |
+
[
|
| 278 |
+
[],
|
| 279 |
+
{},
|
| 280 |
+
np.array([]),
|
| 281 |
+
Series([], dtype="datetime64[ns]"),
|
| 282 |
+
Index([]),
|
| 283 |
+
DatetimeIndex([]),
|
| 284 |
+
],
|
| 285 |
+
)
|
| 286 |
+
def test_drop_empty_listlike_non_unique_datetime_index(self, empty_listlike):
|
| 287 |
+
# GH#27994
|
| 288 |
+
data = {"column_a": [5, 10], "column_b": ["one", "two"]}
|
| 289 |
+
index = [Timestamp("2021-01-01"), Timestamp("2021-01-01")]
|
| 290 |
+
df = DataFrame(data, index=index)
|
| 291 |
+
|
| 292 |
+
# Passing empty list-like should return the same DataFrame.
|
| 293 |
+
expected = df.copy()
|
| 294 |
+
result = df.drop(empty_listlike)
|
| 295 |
+
tm.assert_frame_equal(result, expected)
|
| 296 |
+
|
| 297 |
+
def test_mixed_depth_drop(self):
|
| 298 |
+
arrays = [
|
| 299 |
+
["a", "top", "top", "routine1", "routine1", "routine2"],
|
| 300 |
+
["", "OD", "OD", "result1", "result2", "result1"],
|
| 301 |
+
["", "wx", "wy", "", "", ""],
|
| 302 |
+
]
|
| 303 |
+
|
| 304 |
+
tuples = sorted(zip(*arrays))
|
| 305 |
+
index = MultiIndex.from_tuples(tuples)
|
| 306 |
+
df = DataFrame(np.random.randn(4, 6), columns=index)
|
| 307 |
+
|
| 308 |
+
result = df.drop("a", axis=1)
|
| 309 |
+
expected = df.drop([("a", "", "")], axis=1)
|
| 310 |
+
tm.assert_frame_equal(expected, result)
|
| 311 |
+
|
| 312 |
+
result = df.drop(["top"], axis=1)
|
| 313 |
+
expected = df.drop([("top", "OD", "wx")], axis=1)
|
| 314 |
+
expected = expected.drop([("top", "OD", "wy")], axis=1)
|
| 315 |
+
tm.assert_frame_equal(expected, result)
|
| 316 |
+
|
| 317 |
+
result = df.drop(("top", "OD", "wx"), axis=1)
|
| 318 |
+
expected = df.drop([("top", "OD", "wx")], axis=1)
|
| 319 |
+
tm.assert_frame_equal(expected, result)
|
| 320 |
+
|
| 321 |
+
expected = df.drop([("top", "OD", "wy")], axis=1)
|
| 322 |
+
expected = df.drop("top", axis=1)
|
| 323 |
+
|
| 324 |
+
result = df.drop("result1", level=1, axis=1)
|
| 325 |
+
expected = df.drop(
|
| 326 |
+
[("routine1", "result1", ""), ("routine2", "result1", "")], axis=1
|
| 327 |
+
)
|
| 328 |
+
tm.assert_frame_equal(expected, result)
|
| 329 |
+
|
| 330 |
+
def test_drop_multiindex_other_level_nan(self):
|
| 331 |
+
# GH#12754
|
| 332 |
+
df = (
|
| 333 |
+
DataFrame(
|
| 334 |
+
{
|
| 335 |
+
"A": ["one", "one", "two", "two"],
|
| 336 |
+
"B": [np.nan, 0.0, 1.0, 2.0],
|
| 337 |
+
"C": ["a", "b", "c", "c"],
|
| 338 |
+
"D": [1, 2, 3, 4],
|
| 339 |
+
}
|
| 340 |
+
)
|
| 341 |
+
.set_index(["A", "B", "C"])
|
| 342 |
+
.sort_index()
|
| 343 |
+
)
|
| 344 |
+
result = df.drop("c", level="C")
|
| 345 |
+
expected = DataFrame(
|
| 346 |
+
[2, 1],
|
| 347 |
+
columns=["D"],
|
| 348 |
+
index=MultiIndex.from_tuples(
|
| 349 |
+
[("one", 0.0, "b"), ("one", np.nan, "a")], names=["A", "B", "C"]
|
| 350 |
+
),
|
| 351 |
+
)
|
| 352 |
+
tm.assert_frame_equal(result, expected)
|
| 353 |
+
|
| 354 |
+
def test_drop_nonunique(self):
|
| 355 |
+
df = DataFrame(
|
| 356 |
+
[
|
| 357 |
+
["x-a", "x", "a", 1.5],
|
| 358 |
+
["x-a", "x", "a", 1.2],
|
| 359 |
+
["z-c", "z", "c", 3.1],
|
| 360 |
+
["x-a", "x", "a", 4.1],
|
| 361 |
+
["x-b", "x", "b", 5.1],
|
| 362 |
+
["x-b", "x", "b", 4.1],
|
| 363 |
+
["x-b", "x", "b", 2.2],
|
| 364 |
+
["y-a", "y", "a", 1.2],
|
| 365 |
+
["z-b", "z", "b", 2.1],
|
| 366 |
+
],
|
| 367 |
+
columns=["var1", "var2", "var3", "var4"],
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
grp_size = df.groupby("var1").size()
|
| 371 |
+
drop_idx = grp_size.loc[grp_size == 1]
|
| 372 |
+
|
| 373 |
+
idf = df.set_index(["var1", "var2", "var3"])
|
| 374 |
+
|
| 375 |
+
# it works! GH#2101
|
| 376 |
+
result = idf.drop(drop_idx.index, level=0).reset_index()
|
| 377 |
+
expected = df[-df.var1.isin(drop_idx.index)]
|
| 378 |
+
|
| 379 |
+
result.index = expected.index
|
| 380 |
+
|
| 381 |
+
tm.assert_frame_equal(result, expected)
|
| 382 |
+
|
| 383 |
+
def test_drop_level(self, multiindex_dataframe_random_data):
|
| 384 |
+
frame = multiindex_dataframe_random_data
|
| 385 |
+
|
| 386 |
+
result = frame.drop(["bar", "qux"], level="first")
|
| 387 |
+
expected = frame.iloc[[0, 1, 2, 5, 6]]
|
| 388 |
+
tm.assert_frame_equal(result, expected)
|
| 389 |
+
|
| 390 |
+
result = frame.drop(["two"], level="second")
|
| 391 |
+
expected = frame.iloc[[0, 2, 3, 6, 7, 9]]
|
| 392 |
+
tm.assert_frame_equal(result, expected)
|
| 393 |
+
|
| 394 |
+
result = frame.T.drop(["bar", "qux"], axis=1, level="first")
|
| 395 |
+
expected = frame.iloc[[0, 1, 2, 5, 6]].T
|
| 396 |
+
tm.assert_frame_equal(result, expected)
|
| 397 |
+
|
| 398 |
+
result = frame.T.drop(["two"], axis=1, level="second")
|
| 399 |
+
expected = frame.iloc[[0, 2, 3, 6, 7, 9]].T
|
| 400 |
+
tm.assert_frame_equal(result, expected)
|
| 401 |
+
|
| 402 |
+
def test_drop_level_nonunique_datetime(self):
|
| 403 |
+
# GH#12701
|
| 404 |
+
idx = Index([2, 3, 4, 4, 5], name="id")
|
| 405 |
+
idxdt = pd.to_datetime(
|
| 406 |
+
[
|
| 407 |
+
"2016-03-23 14:00",
|
| 408 |
+
"2016-03-23 15:00",
|
| 409 |
+
"2016-03-23 16:00",
|
| 410 |
+
"2016-03-23 16:00",
|
| 411 |
+
"2016-03-23 17:00",
|
| 412 |
+
]
|
| 413 |
+
)
|
| 414 |
+
df = DataFrame(np.arange(10).reshape(5, 2), columns=list("ab"), index=idx)
|
| 415 |
+
df["tstamp"] = idxdt
|
| 416 |
+
df = df.set_index("tstamp", append=True)
|
| 417 |
+
ts = Timestamp("201603231600")
|
| 418 |
+
assert df.index.is_unique is False
|
| 419 |
+
|
| 420 |
+
result = df.drop(ts, level="tstamp")
|
| 421 |
+
expected = df.loc[idx != 4]
|
| 422 |
+
tm.assert_frame_equal(result, expected)
|
| 423 |
+
|
| 424 |
+
def test_drop_tz_aware_timestamp_across_dst(self, frame_or_series):
|
| 425 |
+
# GH#21761
|
| 426 |
+
start = Timestamp("2017-10-29", tz="Europe/Berlin")
|
| 427 |
+
end = Timestamp("2017-10-29 04:00:00", tz="Europe/Berlin")
|
| 428 |
+
index = pd.date_range(start, end, freq="15min")
|
| 429 |
+
data = frame_or_series(data=[1] * len(index), index=index)
|
| 430 |
+
result = data.drop(start)
|
| 431 |
+
expected_start = Timestamp("2017-10-29 00:15:00", tz="Europe/Berlin")
|
| 432 |
+
expected_idx = pd.date_range(expected_start, end, freq="15min")
|
| 433 |
+
expected = frame_or_series(data=[1] * len(expected_idx), index=expected_idx)
|
| 434 |
+
tm.assert_equal(result, expected)
|
| 435 |
+
|
| 436 |
+
def test_drop_preserve_names(self):
|
| 437 |
+
index = MultiIndex.from_arrays(
|
| 438 |
+
[[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]], names=["one", "two"]
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
df = DataFrame(np.random.randn(6, 3), index=index)
|
| 442 |
+
|
| 443 |
+
result = df.drop([(0, 2)])
|
| 444 |
+
assert result.index.names == ("one", "two")
|
| 445 |
+
|
| 446 |
+
@pytest.mark.parametrize(
|
| 447 |
+
"operation", ["__iadd__", "__isub__", "__imul__", "__ipow__"]
|
| 448 |
+
)
|
| 449 |
+
@pytest.mark.parametrize("inplace", [False, True])
|
| 450 |
+
def test_inplace_drop_and_operation(self, operation, inplace):
|
| 451 |
+
# GH#30484
|
| 452 |
+
df = DataFrame({"x": range(5)})
|
| 453 |
+
expected = df.copy()
|
| 454 |
+
df["y"] = range(5)
|
| 455 |
+
y = df["y"]
|
| 456 |
+
|
| 457 |
+
with tm.assert_produces_warning(None):
|
| 458 |
+
if inplace:
|
| 459 |
+
df.drop("y", axis=1, inplace=inplace)
|
| 460 |
+
else:
|
| 461 |
+
df = df.drop("y", axis=1, inplace=inplace)
|
| 462 |
+
|
| 463 |
+
# Perform operation and check result
|
| 464 |
+
getattr(y, operation)(1)
|
| 465 |
+
tm.assert_frame_equal(df, expected)
|
| 466 |
+
|
| 467 |
+
def test_drop_with_non_unique_multiindex(self):
|
| 468 |
+
# GH#36293
|
| 469 |
+
mi = MultiIndex.from_arrays([["x", "y", "x"], ["i", "j", "i"]])
|
| 470 |
+
df = DataFrame([1, 2, 3], index=mi)
|
| 471 |
+
result = df.drop(index="x")
|
| 472 |
+
expected = DataFrame([2], index=MultiIndex.from_arrays([["y"], ["j"]]))
|
| 473 |
+
tm.assert_frame_equal(result, expected)
|
| 474 |
+
|
| 475 |
+
@pytest.mark.parametrize("indexer", [("a", "a"), [("a", "a")]])
|
| 476 |
+
def test_drop_tuple_with_non_unique_multiindex(self, indexer):
|
| 477 |
+
# GH#42771
|
| 478 |
+
idx = MultiIndex.from_product([["a", "b"], ["a", "a"]])
|
| 479 |
+
df = DataFrame({"x": range(len(idx))}, index=idx)
|
| 480 |
+
result = df.drop(index=[("a", "a")])
|
| 481 |
+
expected = DataFrame(
|
| 482 |
+
{"x": [2, 3]}, index=MultiIndex.from_tuples([("b", "a"), ("b", "a")])
|
| 483 |
+
)
|
| 484 |
+
tm.assert_frame_equal(result, expected)
|
| 485 |
+
|
| 486 |
+
def test_drop_with_duplicate_columns(self):
|
| 487 |
+
df = DataFrame(
|
| 488 |
+
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
|
| 489 |
+
)
|
| 490 |
+
result = df.drop(["a"], axis=1)
|
| 491 |
+
expected = DataFrame([[1], [1], [1]], columns=["bar"])
|
| 492 |
+
tm.assert_frame_equal(result, expected)
|
| 493 |
+
result = df.drop("a", axis=1)
|
| 494 |
+
tm.assert_frame_equal(result, expected)
|
| 495 |
+
|
| 496 |
+
def test_drop_with_duplicate_columns2(self):
|
| 497 |
+
# drop buggy GH#6240
|
| 498 |
+
df = DataFrame(
|
| 499 |
+
{
|
| 500 |
+
"A": np.random.randn(5),
|
| 501 |
+
"B": np.random.randn(5),
|
| 502 |
+
"C": np.random.randn(5),
|
| 503 |
+
"D": ["a", "b", "c", "d", "e"],
|
| 504 |
+
}
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
expected = df.take([0, 1, 1], axis=1)
|
| 508 |
+
df2 = df.take([2, 0, 1, 2, 1], axis=1)
|
| 509 |
+
result = df2.drop("C", axis=1)
|
| 510 |
+
tm.assert_frame_equal(result, expected)
|
| 511 |
+
|
| 512 |
+
def test_drop_inplace_no_leftover_column_reference(self):
|
| 513 |
+
# GH 13934
|
| 514 |
+
df = DataFrame({"a": [1, 2, 3]})
|
| 515 |
+
a = df.a
|
| 516 |
+
df.drop(["a"], axis=1, inplace=True)
|
| 517 |
+
tm.assert_index_equal(df.columns, Index([], dtype="object"))
|
| 518 |
+
a -= a.mean()
|
| 519 |
+
tm.assert_index_equal(df.columns, Index([], dtype="object"))
|
| 520 |
+
|
| 521 |
+
def test_drop_level_missing_label_multiindex(self):
|
| 522 |
+
# GH 18561
|
| 523 |
+
df = DataFrame(index=MultiIndex.from_product([range(3), range(3)]))
|
| 524 |
+
with pytest.raises(KeyError, match="labels \\[5\\] not found in level"):
|
| 525 |
+
df.drop(5, level=0)
|
| 526 |
+
|
| 527 |
+
@pytest.mark.parametrize("idx, level", [(["a", "b"], 0), (["a"], None)])
|
| 528 |
+
def test_drop_index_ea_dtype(self, any_numeric_ea_dtype, idx, level):
|
| 529 |
+
# GH#45860
|
| 530 |
+
df = DataFrame(
|
| 531 |
+
{"a": [1, 2, 2, pd.NA], "b": 100}, dtype=any_numeric_ea_dtype
|
| 532 |
+
).set_index(idx)
|
| 533 |
+
result = df.drop(Index([2, pd.NA]), level=level)
|
| 534 |
+
expected = DataFrame(
|
| 535 |
+
{"a": [1], "b": 100}, dtype=any_numeric_ea_dtype
|
| 536 |
+
).set_index(idx)
|
| 537 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_drop_duplicates.py
ADDED
|
@@ -0,0 +1,473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from pandas import (
|
| 8 |
+
DataFrame,
|
| 9 |
+
NaT,
|
| 10 |
+
concat,
|
| 11 |
+
)
|
| 12 |
+
import pandas._testing as tm
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]])
|
| 16 |
+
def test_drop_duplicates_with_misspelled_column_name(subset):
|
| 17 |
+
# GH 19730
|
| 18 |
+
df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]})
|
| 19 |
+
msg = re.escape("Index(['a'], dtype='object')")
|
| 20 |
+
|
| 21 |
+
with pytest.raises(KeyError, match=msg):
|
| 22 |
+
df.drop_duplicates(subset)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def test_drop_duplicates():
|
| 26 |
+
df = DataFrame(
|
| 27 |
+
{
|
| 28 |
+
"AAA": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
|
| 29 |
+
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
|
| 30 |
+
"C": [1, 1, 2, 2, 2, 2, 1, 2],
|
| 31 |
+
"D": range(8),
|
| 32 |
+
}
|
| 33 |
+
)
|
| 34 |
+
# single column
|
| 35 |
+
result = df.drop_duplicates("AAA")
|
| 36 |
+
expected = df[:2]
|
| 37 |
+
tm.assert_frame_equal(result, expected)
|
| 38 |
+
|
| 39 |
+
result = df.drop_duplicates("AAA", keep="last")
|
| 40 |
+
expected = df.loc[[6, 7]]
|
| 41 |
+
tm.assert_frame_equal(result, expected)
|
| 42 |
+
|
| 43 |
+
result = df.drop_duplicates("AAA", keep=False)
|
| 44 |
+
expected = df.loc[[]]
|
| 45 |
+
tm.assert_frame_equal(result, expected)
|
| 46 |
+
assert len(result) == 0
|
| 47 |
+
|
| 48 |
+
# multi column
|
| 49 |
+
expected = df.loc[[0, 1, 2, 3]]
|
| 50 |
+
result = df.drop_duplicates(np.array(["AAA", "B"]))
|
| 51 |
+
tm.assert_frame_equal(result, expected)
|
| 52 |
+
result = df.drop_duplicates(["AAA", "B"])
|
| 53 |
+
tm.assert_frame_equal(result, expected)
|
| 54 |
+
|
| 55 |
+
result = df.drop_duplicates(("AAA", "B"), keep="last")
|
| 56 |
+
expected = df.loc[[0, 5, 6, 7]]
|
| 57 |
+
tm.assert_frame_equal(result, expected)
|
| 58 |
+
|
| 59 |
+
result = df.drop_duplicates(("AAA", "B"), keep=False)
|
| 60 |
+
expected = df.loc[[0]]
|
| 61 |
+
tm.assert_frame_equal(result, expected)
|
| 62 |
+
|
| 63 |
+
# consider everything
|
| 64 |
+
df2 = df.loc[:, ["AAA", "B", "C"]]
|
| 65 |
+
|
| 66 |
+
result = df2.drop_duplicates()
|
| 67 |
+
# in this case only
|
| 68 |
+
expected = df2.drop_duplicates(["AAA", "B"])
|
| 69 |
+
tm.assert_frame_equal(result, expected)
|
| 70 |
+
|
| 71 |
+
result = df2.drop_duplicates(keep="last")
|
| 72 |
+
expected = df2.drop_duplicates(["AAA", "B"], keep="last")
|
| 73 |
+
tm.assert_frame_equal(result, expected)
|
| 74 |
+
|
| 75 |
+
result = df2.drop_duplicates(keep=False)
|
| 76 |
+
expected = df2.drop_duplicates(["AAA", "B"], keep=False)
|
| 77 |
+
tm.assert_frame_equal(result, expected)
|
| 78 |
+
|
| 79 |
+
# integers
|
| 80 |
+
result = df.drop_duplicates("C")
|
| 81 |
+
expected = df.iloc[[0, 2]]
|
| 82 |
+
tm.assert_frame_equal(result, expected)
|
| 83 |
+
result = df.drop_duplicates("C", keep="last")
|
| 84 |
+
expected = df.iloc[[-2, -1]]
|
| 85 |
+
tm.assert_frame_equal(result, expected)
|
| 86 |
+
|
| 87 |
+
df["E"] = df["C"].astype("int8")
|
| 88 |
+
result = df.drop_duplicates("E")
|
| 89 |
+
expected = df.iloc[[0, 2]]
|
| 90 |
+
tm.assert_frame_equal(result, expected)
|
| 91 |
+
result = df.drop_duplicates("E", keep="last")
|
| 92 |
+
expected = df.iloc[[-2, -1]]
|
| 93 |
+
tm.assert_frame_equal(result, expected)
|
| 94 |
+
|
| 95 |
+
# GH 11376
|
| 96 |
+
df = DataFrame({"x": [7, 6, 3, 3, 4, 8, 0], "y": [0, 6, 5, 5, 9, 1, 2]})
|
| 97 |
+
expected = df.loc[df.index != 3]
|
| 98 |
+
tm.assert_frame_equal(df.drop_duplicates(), expected)
|
| 99 |
+
|
| 100 |
+
df = DataFrame([[1, 0], [0, 2]])
|
| 101 |
+
tm.assert_frame_equal(df.drop_duplicates(), df)
|
| 102 |
+
|
| 103 |
+
df = DataFrame([[-2, 0], [0, -4]])
|
| 104 |
+
tm.assert_frame_equal(df.drop_duplicates(), df)
|
| 105 |
+
|
| 106 |
+
x = np.iinfo(np.int64).max / 3 * 2
|
| 107 |
+
df = DataFrame([[-x, x], [0, x + 4]])
|
| 108 |
+
tm.assert_frame_equal(df.drop_duplicates(), df)
|
| 109 |
+
|
| 110 |
+
df = DataFrame([[-x, x], [x, x + 4]])
|
| 111 |
+
tm.assert_frame_equal(df.drop_duplicates(), df)
|
| 112 |
+
|
| 113 |
+
# GH 11864
|
| 114 |
+
df = DataFrame([i] * 9 for i in range(16))
|
| 115 |
+
df = concat([df, DataFrame([[1] + [0] * 8])], ignore_index=True)
|
| 116 |
+
|
| 117 |
+
for keep in ["first", "last", False]:
|
| 118 |
+
assert df.duplicated(keep=keep).sum() == 0
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def test_drop_duplicates_with_duplicate_column_names():
|
| 122 |
+
# GH17836
|
| 123 |
+
df = DataFrame([[1, 2, 5], [3, 4, 6], [3, 4, 7]], columns=["a", "a", "b"])
|
| 124 |
+
|
| 125 |
+
result0 = df.drop_duplicates()
|
| 126 |
+
tm.assert_frame_equal(result0, df)
|
| 127 |
+
|
| 128 |
+
result1 = df.drop_duplicates("a")
|
| 129 |
+
expected1 = df[:2]
|
| 130 |
+
tm.assert_frame_equal(result1, expected1)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def test_drop_duplicates_for_take_all():
|
| 134 |
+
df = DataFrame(
|
| 135 |
+
{
|
| 136 |
+
"AAA": ["foo", "bar", "baz", "bar", "foo", "bar", "qux", "foo"],
|
| 137 |
+
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
|
| 138 |
+
"C": [1, 1, 2, 2, 2, 2, 1, 2],
|
| 139 |
+
"D": range(8),
|
| 140 |
+
}
|
| 141 |
+
)
|
| 142 |
+
# single column
|
| 143 |
+
result = df.drop_duplicates("AAA")
|
| 144 |
+
expected = df.iloc[[0, 1, 2, 6]]
|
| 145 |
+
tm.assert_frame_equal(result, expected)
|
| 146 |
+
|
| 147 |
+
result = df.drop_duplicates("AAA", keep="last")
|
| 148 |
+
expected = df.iloc[[2, 5, 6, 7]]
|
| 149 |
+
tm.assert_frame_equal(result, expected)
|
| 150 |
+
|
| 151 |
+
result = df.drop_duplicates("AAA", keep=False)
|
| 152 |
+
expected = df.iloc[[2, 6]]
|
| 153 |
+
tm.assert_frame_equal(result, expected)
|
| 154 |
+
|
| 155 |
+
# multiple columns
|
| 156 |
+
result = df.drop_duplicates(["AAA", "B"])
|
| 157 |
+
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
|
| 158 |
+
tm.assert_frame_equal(result, expected)
|
| 159 |
+
|
| 160 |
+
result = df.drop_duplicates(["AAA", "B"], keep="last")
|
| 161 |
+
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
|
| 162 |
+
tm.assert_frame_equal(result, expected)
|
| 163 |
+
|
| 164 |
+
result = df.drop_duplicates(["AAA", "B"], keep=False)
|
| 165 |
+
expected = df.iloc[[0, 1, 2, 6]]
|
| 166 |
+
tm.assert_frame_equal(result, expected)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def test_drop_duplicates_tuple():
|
| 170 |
+
df = DataFrame(
|
| 171 |
+
{
|
| 172 |
+
("AA", "AB"): ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
|
| 173 |
+
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
|
| 174 |
+
"C": [1, 1, 2, 2, 2, 2, 1, 2],
|
| 175 |
+
"D": range(8),
|
| 176 |
+
}
|
| 177 |
+
)
|
| 178 |
+
# single column
|
| 179 |
+
result = df.drop_duplicates(("AA", "AB"))
|
| 180 |
+
expected = df[:2]
|
| 181 |
+
tm.assert_frame_equal(result, expected)
|
| 182 |
+
|
| 183 |
+
result = df.drop_duplicates(("AA", "AB"), keep="last")
|
| 184 |
+
expected = df.loc[[6, 7]]
|
| 185 |
+
tm.assert_frame_equal(result, expected)
|
| 186 |
+
|
| 187 |
+
result = df.drop_duplicates(("AA", "AB"), keep=False)
|
| 188 |
+
expected = df.loc[[]] # empty df
|
| 189 |
+
assert len(result) == 0
|
| 190 |
+
tm.assert_frame_equal(result, expected)
|
| 191 |
+
|
| 192 |
+
# multi column
|
| 193 |
+
expected = df.loc[[0, 1, 2, 3]]
|
| 194 |
+
result = df.drop_duplicates((("AA", "AB"), "B"))
|
| 195 |
+
tm.assert_frame_equal(result, expected)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
@pytest.mark.parametrize(
|
| 199 |
+
"df",
|
| 200 |
+
[
|
| 201 |
+
DataFrame(),
|
| 202 |
+
DataFrame(columns=[]),
|
| 203 |
+
DataFrame(columns=["A", "B", "C"]),
|
| 204 |
+
DataFrame(index=[]),
|
| 205 |
+
DataFrame(index=["A", "B", "C"]),
|
| 206 |
+
],
|
| 207 |
+
)
|
| 208 |
+
def test_drop_duplicates_empty(df):
|
| 209 |
+
# GH 20516
|
| 210 |
+
result = df.drop_duplicates()
|
| 211 |
+
tm.assert_frame_equal(result, df)
|
| 212 |
+
|
| 213 |
+
result = df.copy()
|
| 214 |
+
result.drop_duplicates(inplace=True)
|
| 215 |
+
tm.assert_frame_equal(result, df)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def test_drop_duplicates_NA():
|
| 219 |
+
# none
|
| 220 |
+
df = DataFrame(
|
| 221 |
+
{
|
| 222 |
+
"A": [None, None, "foo", "bar", "foo", "bar", "bar", "foo"],
|
| 223 |
+
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
|
| 224 |
+
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],
|
| 225 |
+
"D": range(8),
|
| 226 |
+
}
|
| 227 |
+
)
|
| 228 |
+
# single column
|
| 229 |
+
result = df.drop_duplicates("A")
|
| 230 |
+
expected = df.loc[[0, 2, 3]]
|
| 231 |
+
tm.assert_frame_equal(result, expected)
|
| 232 |
+
|
| 233 |
+
result = df.drop_duplicates("A", keep="last")
|
| 234 |
+
expected = df.loc[[1, 6, 7]]
|
| 235 |
+
tm.assert_frame_equal(result, expected)
|
| 236 |
+
|
| 237 |
+
result = df.drop_duplicates("A", keep=False)
|
| 238 |
+
expected = df.loc[[]] # empty df
|
| 239 |
+
tm.assert_frame_equal(result, expected)
|
| 240 |
+
assert len(result) == 0
|
| 241 |
+
|
| 242 |
+
# multi column
|
| 243 |
+
result = df.drop_duplicates(["A", "B"])
|
| 244 |
+
expected = df.loc[[0, 2, 3, 6]]
|
| 245 |
+
tm.assert_frame_equal(result, expected)
|
| 246 |
+
|
| 247 |
+
result = df.drop_duplicates(["A", "B"], keep="last")
|
| 248 |
+
expected = df.loc[[1, 5, 6, 7]]
|
| 249 |
+
tm.assert_frame_equal(result, expected)
|
| 250 |
+
|
| 251 |
+
result = df.drop_duplicates(["A", "B"], keep=False)
|
| 252 |
+
expected = df.loc[[6]]
|
| 253 |
+
tm.assert_frame_equal(result, expected)
|
| 254 |
+
|
| 255 |
+
# nan
|
| 256 |
+
df = DataFrame(
|
| 257 |
+
{
|
| 258 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
|
| 259 |
+
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
|
| 260 |
+
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],
|
| 261 |
+
"D": range(8),
|
| 262 |
+
}
|
| 263 |
+
)
|
| 264 |
+
# single column
|
| 265 |
+
result = df.drop_duplicates("C")
|
| 266 |
+
expected = df[:2]
|
| 267 |
+
tm.assert_frame_equal(result, expected)
|
| 268 |
+
|
| 269 |
+
result = df.drop_duplicates("C", keep="last")
|
| 270 |
+
expected = df.loc[[3, 7]]
|
| 271 |
+
tm.assert_frame_equal(result, expected)
|
| 272 |
+
|
| 273 |
+
result = df.drop_duplicates("C", keep=False)
|
| 274 |
+
expected = df.loc[[]] # empty df
|
| 275 |
+
tm.assert_frame_equal(result, expected)
|
| 276 |
+
assert len(result) == 0
|
| 277 |
+
|
| 278 |
+
# multi column
|
| 279 |
+
result = df.drop_duplicates(["C", "B"])
|
| 280 |
+
expected = df.loc[[0, 1, 2, 4]]
|
| 281 |
+
tm.assert_frame_equal(result, expected)
|
| 282 |
+
|
| 283 |
+
result = df.drop_duplicates(["C", "B"], keep="last")
|
| 284 |
+
expected = df.loc[[1, 3, 6, 7]]
|
| 285 |
+
tm.assert_frame_equal(result, expected)
|
| 286 |
+
|
| 287 |
+
result = df.drop_duplicates(["C", "B"], keep=False)
|
| 288 |
+
expected = df.loc[[1]]
|
| 289 |
+
tm.assert_frame_equal(result, expected)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def test_drop_duplicates_NA_for_take_all():
|
| 293 |
+
# none
|
| 294 |
+
df = DataFrame(
|
| 295 |
+
{
|
| 296 |
+
"A": [None, None, "foo", "bar", "foo", "baz", "bar", "qux"],
|
| 297 |
+
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 2.0, 3, 1.0],
|
| 298 |
+
}
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
# single column
|
| 302 |
+
result = df.drop_duplicates("A")
|
| 303 |
+
expected = df.iloc[[0, 2, 3, 5, 7]]
|
| 304 |
+
tm.assert_frame_equal(result, expected)
|
| 305 |
+
|
| 306 |
+
result = df.drop_duplicates("A", keep="last")
|
| 307 |
+
expected = df.iloc[[1, 4, 5, 6, 7]]
|
| 308 |
+
tm.assert_frame_equal(result, expected)
|
| 309 |
+
|
| 310 |
+
result = df.drop_duplicates("A", keep=False)
|
| 311 |
+
expected = df.iloc[[5, 7]]
|
| 312 |
+
tm.assert_frame_equal(result, expected)
|
| 313 |
+
|
| 314 |
+
# nan
|
| 315 |
+
|
| 316 |
+
# single column
|
| 317 |
+
result = df.drop_duplicates("C")
|
| 318 |
+
expected = df.iloc[[0, 1, 5, 6]]
|
| 319 |
+
tm.assert_frame_equal(result, expected)
|
| 320 |
+
|
| 321 |
+
result = df.drop_duplicates("C", keep="last")
|
| 322 |
+
expected = df.iloc[[3, 5, 6, 7]]
|
| 323 |
+
tm.assert_frame_equal(result, expected)
|
| 324 |
+
|
| 325 |
+
result = df.drop_duplicates("C", keep=False)
|
| 326 |
+
expected = df.iloc[[5, 6]]
|
| 327 |
+
tm.assert_frame_equal(result, expected)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def test_drop_duplicates_inplace():
|
| 331 |
+
orig = DataFrame(
|
| 332 |
+
{
|
| 333 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
|
| 334 |
+
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
|
| 335 |
+
"C": [1, 1, 2, 2, 2, 2, 1, 2],
|
| 336 |
+
"D": range(8),
|
| 337 |
+
}
|
| 338 |
+
)
|
| 339 |
+
# single column
|
| 340 |
+
df = orig.copy()
|
| 341 |
+
return_value = df.drop_duplicates("A", inplace=True)
|
| 342 |
+
expected = orig[:2]
|
| 343 |
+
result = df
|
| 344 |
+
tm.assert_frame_equal(result, expected)
|
| 345 |
+
assert return_value is None
|
| 346 |
+
|
| 347 |
+
df = orig.copy()
|
| 348 |
+
return_value = df.drop_duplicates("A", keep="last", inplace=True)
|
| 349 |
+
expected = orig.loc[[6, 7]]
|
| 350 |
+
result = df
|
| 351 |
+
tm.assert_frame_equal(result, expected)
|
| 352 |
+
assert return_value is None
|
| 353 |
+
|
| 354 |
+
df = orig.copy()
|
| 355 |
+
return_value = df.drop_duplicates("A", keep=False, inplace=True)
|
| 356 |
+
expected = orig.loc[[]]
|
| 357 |
+
result = df
|
| 358 |
+
tm.assert_frame_equal(result, expected)
|
| 359 |
+
assert len(df) == 0
|
| 360 |
+
assert return_value is None
|
| 361 |
+
|
| 362 |
+
# multi column
|
| 363 |
+
df = orig.copy()
|
| 364 |
+
return_value = df.drop_duplicates(["A", "B"], inplace=True)
|
| 365 |
+
expected = orig.loc[[0, 1, 2, 3]]
|
| 366 |
+
result = df
|
| 367 |
+
tm.assert_frame_equal(result, expected)
|
| 368 |
+
assert return_value is None
|
| 369 |
+
|
| 370 |
+
df = orig.copy()
|
| 371 |
+
return_value = df.drop_duplicates(["A", "B"], keep="last", inplace=True)
|
| 372 |
+
expected = orig.loc[[0, 5, 6, 7]]
|
| 373 |
+
result = df
|
| 374 |
+
tm.assert_frame_equal(result, expected)
|
| 375 |
+
assert return_value is None
|
| 376 |
+
|
| 377 |
+
df = orig.copy()
|
| 378 |
+
return_value = df.drop_duplicates(["A", "B"], keep=False, inplace=True)
|
| 379 |
+
expected = orig.loc[[0]]
|
| 380 |
+
result = df
|
| 381 |
+
tm.assert_frame_equal(result, expected)
|
| 382 |
+
assert return_value is None
|
| 383 |
+
|
| 384 |
+
# consider everything
|
| 385 |
+
orig2 = orig.loc[:, ["A", "B", "C"]].copy()
|
| 386 |
+
|
| 387 |
+
df2 = orig2.copy()
|
| 388 |
+
return_value = df2.drop_duplicates(inplace=True)
|
| 389 |
+
# in this case only
|
| 390 |
+
expected = orig2.drop_duplicates(["A", "B"])
|
| 391 |
+
result = df2
|
| 392 |
+
tm.assert_frame_equal(result, expected)
|
| 393 |
+
assert return_value is None
|
| 394 |
+
|
| 395 |
+
df2 = orig2.copy()
|
| 396 |
+
return_value = df2.drop_duplicates(keep="last", inplace=True)
|
| 397 |
+
expected = orig2.drop_duplicates(["A", "B"], keep="last")
|
| 398 |
+
result = df2
|
| 399 |
+
tm.assert_frame_equal(result, expected)
|
| 400 |
+
assert return_value is None
|
| 401 |
+
|
| 402 |
+
df2 = orig2.copy()
|
| 403 |
+
return_value = df2.drop_duplicates(keep=False, inplace=True)
|
| 404 |
+
expected = orig2.drop_duplicates(["A", "B"], keep=False)
|
| 405 |
+
result = df2
|
| 406 |
+
tm.assert_frame_equal(result, expected)
|
| 407 |
+
assert return_value is None
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
@pytest.mark.parametrize("inplace", [True, False])
|
| 411 |
+
@pytest.mark.parametrize(
|
| 412 |
+
"origin_dict, output_dict, ignore_index, output_index",
|
| 413 |
+
[
|
| 414 |
+
({"A": [2, 2, 3]}, {"A": [2, 3]}, True, [0, 1]),
|
| 415 |
+
({"A": [2, 2, 3]}, {"A": [2, 3]}, False, [0, 2]),
|
| 416 |
+
({"A": [2, 2, 3], "B": [2, 2, 4]}, {"A": [2, 3], "B": [2, 4]}, True, [0, 1]),
|
| 417 |
+
({"A": [2, 2, 3], "B": [2, 2, 4]}, {"A": [2, 3], "B": [2, 4]}, False, [0, 2]),
|
| 418 |
+
],
|
| 419 |
+
)
|
| 420 |
+
def test_drop_duplicates_ignore_index(
|
| 421 |
+
inplace, origin_dict, output_dict, ignore_index, output_index
|
| 422 |
+
):
|
| 423 |
+
# GH 30114
|
| 424 |
+
df = DataFrame(origin_dict)
|
| 425 |
+
expected = DataFrame(output_dict, index=output_index)
|
| 426 |
+
|
| 427 |
+
if inplace:
|
| 428 |
+
result_df = df.copy()
|
| 429 |
+
result_df.drop_duplicates(ignore_index=ignore_index, inplace=inplace)
|
| 430 |
+
else:
|
| 431 |
+
result_df = df.drop_duplicates(ignore_index=ignore_index, inplace=inplace)
|
| 432 |
+
|
| 433 |
+
tm.assert_frame_equal(result_df, expected)
|
| 434 |
+
tm.assert_frame_equal(df, DataFrame(origin_dict))
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def test_drop_duplicates_null_in_object_column(nulls_fixture):
|
| 438 |
+
# https://github.com/pandas-dev/pandas/issues/32992
|
| 439 |
+
df = DataFrame([[1, nulls_fixture], [2, "a"]], dtype=object)
|
| 440 |
+
result = df.drop_duplicates()
|
| 441 |
+
tm.assert_frame_equal(result, df)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def test_drop_duplicates_series_vs_dataframe(keep):
|
| 445 |
+
# GH#14192
|
| 446 |
+
df = DataFrame(
|
| 447 |
+
{
|
| 448 |
+
"a": [1, 1, 1, "one", "one"],
|
| 449 |
+
"b": [2, 2, np.nan, np.nan, np.nan],
|
| 450 |
+
"c": [3, 3, np.nan, np.nan, "three"],
|
| 451 |
+
"d": [1, 2, 3, 4, 4],
|
| 452 |
+
"e": [
|
| 453 |
+
datetime(2015, 1, 1),
|
| 454 |
+
datetime(2015, 1, 1),
|
| 455 |
+
datetime(2015, 2, 1),
|
| 456 |
+
NaT,
|
| 457 |
+
NaT,
|
| 458 |
+
],
|
| 459 |
+
}
|
| 460 |
+
)
|
| 461 |
+
for column in df.columns:
|
| 462 |
+
dropped_frame = df[[column]].drop_duplicates(keep=keep)
|
| 463 |
+
dropped_series = df[column].drop_duplicates(keep=keep)
|
| 464 |
+
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
@pytest.mark.parametrize("arg", [[1], 1, "True", [], 0])
|
| 468 |
+
def test_drop_duplicates_non_boolean_ignore_index(arg):
|
| 469 |
+
# GH#38274
|
| 470 |
+
df = DataFrame({"a": [1, 2, 1, 3]})
|
| 471 |
+
msg = '^For argument "ignore_index" expected type bool, received type .*.$'
|
| 472 |
+
with pytest.raises(ValueError, match=msg):
|
| 473 |
+
df.drop_duplicates(ignore_index=arg)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_droplevel.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from pandas import (
|
| 4 |
+
DataFrame,
|
| 5 |
+
Index,
|
| 6 |
+
MultiIndex,
|
| 7 |
+
)
|
| 8 |
+
import pandas._testing as tm
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TestDropLevel:
|
| 12 |
+
def test_droplevel(self, frame_or_series):
|
| 13 |
+
# GH#20342
|
| 14 |
+
cols = MultiIndex.from_tuples(
|
| 15 |
+
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
|
| 16 |
+
)
|
| 17 |
+
mi = MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"])
|
| 18 |
+
df = DataFrame([[3, 4], [7, 8], [11, 12]], index=mi, columns=cols)
|
| 19 |
+
if frame_or_series is not DataFrame:
|
| 20 |
+
df = df.iloc[:, 0]
|
| 21 |
+
|
| 22 |
+
# test that dropping of a level in index works
|
| 23 |
+
expected = df.reset_index("a", drop=True)
|
| 24 |
+
result = df.droplevel("a", axis="index")
|
| 25 |
+
tm.assert_equal(result, expected)
|
| 26 |
+
|
| 27 |
+
if frame_or_series is DataFrame:
|
| 28 |
+
# test that dropping of a level in columns works
|
| 29 |
+
expected = df.copy()
|
| 30 |
+
expected.columns = Index(["c", "d"], name="level_1")
|
| 31 |
+
result = df.droplevel("level_2", axis="columns")
|
| 32 |
+
tm.assert_equal(result, expected)
|
| 33 |
+
else:
|
| 34 |
+
# test that droplevel raises ValueError on axis != 0
|
| 35 |
+
with pytest.raises(ValueError, match="No axis named columns"):
|
| 36 |
+
df.droplevel(1, axis="columns")
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_dropna.py
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datetime
|
| 2 |
+
|
| 3 |
+
import dateutil
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
from pandas import (
|
| 9 |
+
DataFrame,
|
| 10 |
+
Series,
|
| 11 |
+
)
|
| 12 |
+
import pandas._testing as tm
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TestDataFrameMissingData:
|
| 16 |
+
def test_dropEmptyRows(self, float_frame):
|
| 17 |
+
N = len(float_frame.index)
|
| 18 |
+
mat = np.random.randn(N)
|
| 19 |
+
mat[:5] = np.nan
|
| 20 |
+
|
| 21 |
+
frame = DataFrame({"foo": mat}, index=float_frame.index)
|
| 22 |
+
original = Series(mat, index=float_frame.index, name="foo")
|
| 23 |
+
expected = original.dropna()
|
| 24 |
+
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
|
| 25 |
+
|
| 26 |
+
smaller_frame = frame.dropna(how="all")
|
| 27 |
+
# check that original was preserved
|
| 28 |
+
tm.assert_series_equal(frame["foo"], original)
|
| 29 |
+
return_value = inplace_frame1.dropna(how="all", inplace=True)
|
| 30 |
+
tm.assert_series_equal(smaller_frame["foo"], expected)
|
| 31 |
+
tm.assert_series_equal(inplace_frame1["foo"], expected)
|
| 32 |
+
assert return_value is None
|
| 33 |
+
|
| 34 |
+
smaller_frame = frame.dropna(how="all", subset=["foo"])
|
| 35 |
+
return_value = inplace_frame2.dropna(how="all", subset=["foo"], inplace=True)
|
| 36 |
+
tm.assert_series_equal(smaller_frame["foo"], expected)
|
| 37 |
+
tm.assert_series_equal(inplace_frame2["foo"], expected)
|
| 38 |
+
assert return_value is None
|
| 39 |
+
|
| 40 |
+
def test_dropIncompleteRows(self, float_frame):
|
| 41 |
+
N = len(float_frame.index)
|
| 42 |
+
mat = np.random.randn(N)
|
| 43 |
+
mat[:5] = np.nan
|
| 44 |
+
|
| 45 |
+
frame = DataFrame({"foo": mat}, index=float_frame.index)
|
| 46 |
+
frame["bar"] = 5
|
| 47 |
+
original = Series(mat, index=float_frame.index, name="foo")
|
| 48 |
+
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
|
| 49 |
+
|
| 50 |
+
smaller_frame = frame.dropna()
|
| 51 |
+
tm.assert_series_equal(frame["foo"], original)
|
| 52 |
+
return_value = inp_frame1.dropna(inplace=True)
|
| 53 |
+
|
| 54 |
+
exp = Series(mat[5:], index=float_frame.index[5:], name="foo")
|
| 55 |
+
tm.assert_series_equal(smaller_frame["foo"], exp)
|
| 56 |
+
tm.assert_series_equal(inp_frame1["foo"], exp)
|
| 57 |
+
assert return_value is None
|
| 58 |
+
|
| 59 |
+
samesize_frame = frame.dropna(subset=["bar"])
|
| 60 |
+
tm.assert_series_equal(frame["foo"], original)
|
| 61 |
+
assert (frame["bar"] == 5).all()
|
| 62 |
+
return_value = inp_frame2.dropna(subset=["bar"], inplace=True)
|
| 63 |
+
tm.assert_index_equal(samesize_frame.index, float_frame.index)
|
| 64 |
+
tm.assert_index_equal(inp_frame2.index, float_frame.index)
|
| 65 |
+
assert return_value is None
|
| 66 |
+
|
| 67 |
+
def test_dropna(self):
|
| 68 |
+
df = DataFrame(np.random.randn(6, 4))
|
| 69 |
+
df.iloc[:2, 2] = np.nan
|
| 70 |
+
|
| 71 |
+
dropped = df.dropna(axis=1)
|
| 72 |
+
expected = df.loc[:, [0, 1, 3]]
|
| 73 |
+
inp = df.copy()
|
| 74 |
+
return_value = inp.dropna(axis=1, inplace=True)
|
| 75 |
+
tm.assert_frame_equal(dropped, expected)
|
| 76 |
+
tm.assert_frame_equal(inp, expected)
|
| 77 |
+
assert return_value is None
|
| 78 |
+
|
| 79 |
+
dropped = df.dropna(axis=0)
|
| 80 |
+
expected = df.loc[list(range(2, 6))]
|
| 81 |
+
inp = df.copy()
|
| 82 |
+
return_value = inp.dropna(axis=0, inplace=True)
|
| 83 |
+
tm.assert_frame_equal(dropped, expected)
|
| 84 |
+
tm.assert_frame_equal(inp, expected)
|
| 85 |
+
assert return_value is None
|
| 86 |
+
|
| 87 |
+
# threshold
|
| 88 |
+
dropped = df.dropna(axis=1, thresh=5)
|
| 89 |
+
expected = df.loc[:, [0, 1, 3]]
|
| 90 |
+
inp = df.copy()
|
| 91 |
+
return_value = inp.dropna(axis=1, thresh=5, inplace=True)
|
| 92 |
+
tm.assert_frame_equal(dropped, expected)
|
| 93 |
+
tm.assert_frame_equal(inp, expected)
|
| 94 |
+
assert return_value is None
|
| 95 |
+
|
| 96 |
+
dropped = df.dropna(axis=0, thresh=4)
|
| 97 |
+
expected = df.loc[range(2, 6)]
|
| 98 |
+
inp = df.copy()
|
| 99 |
+
return_value = inp.dropna(axis=0, thresh=4, inplace=True)
|
| 100 |
+
tm.assert_frame_equal(dropped, expected)
|
| 101 |
+
tm.assert_frame_equal(inp, expected)
|
| 102 |
+
assert return_value is None
|
| 103 |
+
|
| 104 |
+
dropped = df.dropna(axis=1, thresh=4)
|
| 105 |
+
tm.assert_frame_equal(dropped, df)
|
| 106 |
+
|
| 107 |
+
dropped = df.dropna(axis=1, thresh=3)
|
| 108 |
+
tm.assert_frame_equal(dropped, df)
|
| 109 |
+
|
| 110 |
+
# subset
|
| 111 |
+
dropped = df.dropna(axis=0, subset=[0, 1, 3])
|
| 112 |
+
inp = df.copy()
|
| 113 |
+
return_value = inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
|
| 114 |
+
tm.assert_frame_equal(dropped, df)
|
| 115 |
+
tm.assert_frame_equal(inp, df)
|
| 116 |
+
assert return_value is None
|
| 117 |
+
|
| 118 |
+
# all
|
| 119 |
+
dropped = df.dropna(axis=1, how="all")
|
| 120 |
+
tm.assert_frame_equal(dropped, df)
|
| 121 |
+
|
| 122 |
+
df[2] = np.nan
|
| 123 |
+
dropped = df.dropna(axis=1, how="all")
|
| 124 |
+
expected = df.loc[:, [0, 1, 3]]
|
| 125 |
+
tm.assert_frame_equal(dropped, expected)
|
| 126 |
+
|
| 127 |
+
# bad input
|
| 128 |
+
msg = "No axis named 3 for object type DataFrame"
|
| 129 |
+
with pytest.raises(ValueError, match=msg):
|
| 130 |
+
df.dropna(axis=3)
|
| 131 |
+
|
| 132 |
+
def test_drop_and_dropna_caching(self):
|
| 133 |
+
# tst that cacher updates
|
| 134 |
+
original = Series([1, 2, np.nan], name="A")
|
| 135 |
+
expected = Series([1, 2], dtype=original.dtype, name="A")
|
| 136 |
+
df = DataFrame({"A": original.values.copy()})
|
| 137 |
+
df2 = df.copy()
|
| 138 |
+
df["A"].dropna()
|
| 139 |
+
tm.assert_series_equal(df["A"], original)
|
| 140 |
+
|
| 141 |
+
ser = df["A"]
|
| 142 |
+
return_value = ser.dropna(inplace=True)
|
| 143 |
+
tm.assert_series_equal(ser, expected)
|
| 144 |
+
tm.assert_series_equal(df["A"], original)
|
| 145 |
+
assert return_value is None
|
| 146 |
+
|
| 147 |
+
df2["A"].drop([1])
|
| 148 |
+
tm.assert_series_equal(df2["A"], original)
|
| 149 |
+
|
| 150 |
+
ser = df2["A"]
|
| 151 |
+
return_value = ser.drop([1], inplace=True)
|
| 152 |
+
tm.assert_series_equal(ser, original.drop([1]))
|
| 153 |
+
tm.assert_series_equal(df2["A"], original)
|
| 154 |
+
assert return_value is None
|
| 155 |
+
|
| 156 |
+
def test_dropna_corner(self, float_frame):
|
| 157 |
+
# bad input
|
| 158 |
+
msg = "invalid how option: foo"
|
| 159 |
+
with pytest.raises(ValueError, match=msg):
|
| 160 |
+
float_frame.dropna(how="foo")
|
| 161 |
+
# non-existent column - 8303
|
| 162 |
+
with pytest.raises(KeyError, match=r"^\['X'\]$"):
|
| 163 |
+
float_frame.dropna(subset=["A", "X"])
|
| 164 |
+
|
| 165 |
+
def test_dropna_multiple_axes(self):
|
| 166 |
+
df = DataFrame(
|
| 167 |
+
[
|
| 168 |
+
[1, np.nan, 2, 3],
|
| 169 |
+
[4, np.nan, 5, 6],
|
| 170 |
+
[np.nan, np.nan, np.nan, np.nan],
|
| 171 |
+
[7, np.nan, 8, 9],
|
| 172 |
+
]
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
# GH20987
|
| 176 |
+
with pytest.raises(TypeError, match="supplying multiple axes"):
|
| 177 |
+
df.dropna(how="all", axis=[0, 1])
|
| 178 |
+
with pytest.raises(TypeError, match="supplying multiple axes"):
|
| 179 |
+
df.dropna(how="all", axis=(0, 1))
|
| 180 |
+
|
| 181 |
+
inp = df.copy()
|
| 182 |
+
with pytest.raises(TypeError, match="supplying multiple axes"):
|
| 183 |
+
inp.dropna(how="all", axis=(0, 1), inplace=True)
|
| 184 |
+
|
| 185 |
+
def test_dropna_tz_aware_datetime(self):
|
| 186 |
+
# GH13407
|
| 187 |
+
df = DataFrame()
|
| 188 |
+
dt1 = datetime.datetime(2015, 1, 1, tzinfo=dateutil.tz.tzutc())
|
| 189 |
+
dt2 = datetime.datetime(2015, 2, 2, tzinfo=dateutil.tz.tzutc())
|
| 190 |
+
df["Time"] = [dt1]
|
| 191 |
+
result = df.dropna(axis=0)
|
| 192 |
+
expected = DataFrame({"Time": [dt1]})
|
| 193 |
+
tm.assert_frame_equal(result, expected)
|
| 194 |
+
|
| 195 |
+
# Ex2
|
| 196 |
+
df = DataFrame({"Time": [dt1, None, np.nan, dt2]})
|
| 197 |
+
result = df.dropna(axis=0)
|
| 198 |
+
expected = DataFrame([dt1, dt2], columns=["Time"], index=[0, 3])
|
| 199 |
+
tm.assert_frame_equal(result, expected)
|
| 200 |
+
|
| 201 |
+
def test_dropna_categorical_interval_index(self):
|
| 202 |
+
# GH 25087
|
| 203 |
+
ii = pd.IntervalIndex.from_breaks([0, 2.78, 3.14, 6.28])
|
| 204 |
+
ci = pd.CategoricalIndex(ii)
|
| 205 |
+
df = DataFrame({"A": list("abc")}, index=ci)
|
| 206 |
+
|
| 207 |
+
expected = df
|
| 208 |
+
result = df.dropna()
|
| 209 |
+
tm.assert_frame_equal(result, expected)
|
| 210 |
+
|
| 211 |
+
def test_dropna_with_duplicate_columns(self):
|
| 212 |
+
df = DataFrame(
|
| 213 |
+
{
|
| 214 |
+
"A": np.random.randn(5),
|
| 215 |
+
"B": np.random.randn(5),
|
| 216 |
+
"C": np.random.randn(5),
|
| 217 |
+
"D": ["a", "b", "c", "d", "e"],
|
| 218 |
+
}
|
| 219 |
+
)
|
| 220 |
+
df.iloc[2, [0, 1, 2]] = np.nan
|
| 221 |
+
df.iloc[0, 0] = np.nan
|
| 222 |
+
df.iloc[1, 1] = np.nan
|
| 223 |
+
df.iloc[:, 3] = np.nan
|
| 224 |
+
expected = df.dropna(subset=["A", "B", "C"], how="all")
|
| 225 |
+
expected.columns = ["A", "A", "B", "C"]
|
| 226 |
+
|
| 227 |
+
df.columns = ["A", "A", "B", "C"]
|
| 228 |
+
|
| 229 |
+
result = df.dropna(subset=["A", "C"], how="all")
|
| 230 |
+
tm.assert_frame_equal(result, expected)
|
| 231 |
+
|
| 232 |
+
def test_set_single_column_subset(self):
|
| 233 |
+
# GH 41021
|
| 234 |
+
df = DataFrame({"A": [1, 2, 3], "B": list("abc"), "C": [4, np.NaN, 5]})
|
| 235 |
+
expected = DataFrame(
|
| 236 |
+
{"A": [1, 3], "B": list("ac"), "C": [4.0, 5.0]}, index=[0, 2]
|
| 237 |
+
)
|
| 238 |
+
result = df.dropna(subset="C")
|
| 239 |
+
tm.assert_frame_equal(result, expected)
|
| 240 |
+
|
| 241 |
+
def test_single_column_not_present_in_axis(self):
|
| 242 |
+
# GH 41021
|
| 243 |
+
df = DataFrame({"A": [1, 2, 3]})
|
| 244 |
+
|
| 245 |
+
# Column not present
|
| 246 |
+
with pytest.raises(KeyError, match="['D']"):
|
| 247 |
+
df.dropna(subset="D", axis=0)
|
| 248 |
+
|
| 249 |
+
def test_subset_is_nparray(self):
|
| 250 |
+
# GH 41021
|
| 251 |
+
df = DataFrame({"A": [1, 2, np.NaN], "B": list("abc"), "C": [4, np.NaN, 5]})
|
| 252 |
+
expected = DataFrame({"A": [1.0], "B": ["a"], "C": [4.0]})
|
| 253 |
+
result = df.dropna(subset=np.array(["A", "C"]))
|
| 254 |
+
tm.assert_frame_equal(result, expected)
|
| 255 |
+
|
| 256 |
+
def test_no_nans_in_frame(self, axis):
|
| 257 |
+
# GH#41965
|
| 258 |
+
df = DataFrame([[1, 2], [3, 4]], columns=pd.RangeIndex(0, 2))
|
| 259 |
+
expected = df.copy()
|
| 260 |
+
result = df.dropna(axis=axis)
|
| 261 |
+
tm.assert_frame_equal(result, expected, check_index_type=True)
|
| 262 |
+
|
| 263 |
+
def test_how_thresh_param_incompatible(self):
|
| 264 |
+
# GH46575
|
| 265 |
+
df = DataFrame([1, 2, pd.NA])
|
| 266 |
+
msg = "You cannot set both the how and thresh arguments at the same time"
|
| 267 |
+
with pytest.raises(TypeError, match=msg):
|
| 268 |
+
df.dropna(how="all", thresh=2)
|
| 269 |
+
|
| 270 |
+
with pytest.raises(TypeError, match=msg):
|
| 271 |
+
df.dropna(how="any", thresh=2)
|
| 272 |
+
|
| 273 |
+
with pytest.raises(TypeError, match=msg):
|
| 274 |
+
df.dropna(how=None, thresh=None)
|
| 275 |
+
|
| 276 |
+
@pytest.mark.parametrize("val", [1, 1.5])
|
| 277 |
+
def test_dropna_ignore_index(self, val):
|
| 278 |
+
# GH#31725
|
| 279 |
+
df = DataFrame({"a": [1, 2, val]}, index=[3, 2, 1])
|
| 280 |
+
result = df.dropna(ignore_index=True)
|
| 281 |
+
expected = DataFrame({"a": [1, 2, val]})
|
| 282 |
+
tm.assert_frame_equal(result, expected)
|
| 283 |
+
|
| 284 |
+
df.dropna(ignore_index=True, inplace=True)
|
| 285 |
+
tm.assert_frame_equal(df, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_dtypes.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import timedelta
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from pandas import (
|
| 10 |
+
DataFrame,
|
| 11 |
+
Series,
|
| 12 |
+
date_range,
|
| 13 |
+
option_context,
|
| 14 |
+
)
|
| 15 |
+
import pandas._testing as tm
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class TestDataFrameDataTypes:
|
| 19 |
+
def test_empty_frame_dtypes(self):
|
| 20 |
+
empty_df = DataFrame()
|
| 21 |
+
tm.assert_series_equal(empty_df.dtypes, Series(dtype=object))
|
| 22 |
+
|
| 23 |
+
nocols_df = DataFrame(index=[1, 2, 3])
|
| 24 |
+
tm.assert_series_equal(nocols_df.dtypes, Series(dtype=object))
|
| 25 |
+
|
| 26 |
+
norows_df = DataFrame(columns=list("abc"))
|
| 27 |
+
tm.assert_series_equal(norows_df.dtypes, Series(object, index=list("abc")))
|
| 28 |
+
|
| 29 |
+
norows_int_df = DataFrame(columns=list("abc")).astype(np.int32)
|
| 30 |
+
tm.assert_series_equal(
|
| 31 |
+
norows_int_df.dtypes, Series(np.dtype("int32"), index=list("abc"))
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
df = DataFrame({"a": 1, "b": True, "c": 1.0}, index=[1, 2, 3])
|
| 35 |
+
ex_dtypes = Series({"a": np.int64, "b": np.bool_, "c": np.float64})
|
| 36 |
+
tm.assert_series_equal(df.dtypes, ex_dtypes)
|
| 37 |
+
|
| 38 |
+
# same but for empty slice of df
|
| 39 |
+
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
|
| 40 |
+
|
| 41 |
+
def test_datetime_with_tz_dtypes(self):
|
| 42 |
+
tzframe = DataFrame(
|
| 43 |
+
{
|
| 44 |
+
"A": date_range("20130101", periods=3),
|
| 45 |
+
"B": date_range("20130101", periods=3, tz="US/Eastern"),
|
| 46 |
+
"C": date_range("20130101", periods=3, tz="CET"),
|
| 47 |
+
}
|
| 48 |
+
)
|
| 49 |
+
tzframe.iloc[1, 1] = pd.NaT
|
| 50 |
+
tzframe.iloc[1, 2] = pd.NaT
|
| 51 |
+
result = tzframe.dtypes.sort_index()
|
| 52 |
+
expected = Series(
|
| 53 |
+
[
|
| 54 |
+
np.dtype("datetime64[ns]"),
|
| 55 |
+
DatetimeTZDtype("ns", "US/Eastern"),
|
| 56 |
+
DatetimeTZDtype("ns", "CET"),
|
| 57 |
+
],
|
| 58 |
+
["A", "B", "C"],
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
tm.assert_series_equal(result, expected)
|
| 62 |
+
|
| 63 |
+
def test_dtypes_are_correct_after_column_slice(self):
|
| 64 |
+
# GH6525
|
| 65 |
+
df = DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
|
| 66 |
+
tm.assert_series_equal(
|
| 67 |
+
df.dtypes,
|
| 68 |
+
Series({"a": np.float_, "b": np.float_, "c": np.float_}),
|
| 69 |
+
)
|
| 70 |
+
tm.assert_series_equal(df.iloc[:, 2:].dtypes, Series({"c": np.float_}))
|
| 71 |
+
tm.assert_series_equal(
|
| 72 |
+
df.dtypes,
|
| 73 |
+
Series({"a": np.float_, "b": np.float_, "c": np.float_}),
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
@pytest.mark.parametrize(
|
| 77 |
+
"data",
|
| 78 |
+
[pd.NA, True],
|
| 79 |
+
)
|
| 80 |
+
def test_dtypes_are_correct_after_groupby_last(self, data):
|
| 81 |
+
# GH46409
|
| 82 |
+
df = DataFrame(
|
| 83 |
+
{"id": [1, 2, 3, 4], "test": [True, pd.NA, data, False]}
|
| 84 |
+
).convert_dtypes()
|
| 85 |
+
result = df.groupby("id").last().test
|
| 86 |
+
expected = df.set_index("id").test
|
| 87 |
+
assert result.dtype == pd.BooleanDtype()
|
| 88 |
+
tm.assert_series_equal(expected, result)
|
| 89 |
+
|
| 90 |
+
def test_dtypes_gh8722(self, float_string_frame):
|
| 91 |
+
float_string_frame["bool"] = float_string_frame["A"] > 0
|
| 92 |
+
result = float_string_frame.dtypes
|
| 93 |
+
expected = Series(
|
| 94 |
+
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
|
| 95 |
+
)
|
| 96 |
+
tm.assert_series_equal(result, expected)
|
| 97 |
+
|
| 98 |
+
# compat, GH 8722
|
| 99 |
+
with option_context("use_inf_as_na", True):
|
| 100 |
+
df = DataFrame([[1]])
|
| 101 |
+
result = df.dtypes
|
| 102 |
+
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
|
| 103 |
+
|
| 104 |
+
def test_dtypes_timedeltas(self):
|
| 105 |
+
df = DataFrame(
|
| 106 |
+
{
|
| 107 |
+
"A": Series(date_range("2012-1-1", periods=3, freq="D")),
|
| 108 |
+
"B": Series([timedelta(days=i) for i in range(3)]),
|
| 109 |
+
}
|
| 110 |
+
)
|
| 111 |
+
result = df.dtypes
|
| 112 |
+
expected = Series(
|
| 113 |
+
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
|
| 114 |
+
)
|
| 115 |
+
tm.assert_series_equal(result, expected)
|
| 116 |
+
|
| 117 |
+
df["C"] = df["A"] + df["B"]
|
| 118 |
+
result = df.dtypes
|
| 119 |
+
expected = Series(
|
| 120 |
+
[
|
| 121 |
+
np.dtype("datetime64[ns]"),
|
| 122 |
+
np.dtype("timedelta64[ns]"),
|
| 123 |
+
np.dtype("datetime64[ns]"),
|
| 124 |
+
],
|
| 125 |
+
index=list("ABC"),
|
| 126 |
+
)
|
| 127 |
+
tm.assert_series_equal(result, expected)
|
| 128 |
+
|
| 129 |
+
# mixed int types
|
| 130 |
+
df["D"] = 1
|
| 131 |
+
result = df.dtypes
|
| 132 |
+
expected = Series(
|
| 133 |
+
[
|
| 134 |
+
np.dtype("datetime64[ns]"),
|
| 135 |
+
np.dtype("timedelta64[ns]"),
|
| 136 |
+
np.dtype("datetime64[ns]"),
|
| 137 |
+
np.dtype("int64"),
|
| 138 |
+
],
|
| 139 |
+
index=list("ABCD"),
|
| 140 |
+
)
|
| 141 |
+
tm.assert_series_equal(result, expected)
|
| 142 |
+
|
| 143 |
+
def test_frame_apply_np_array_return_type(self):
|
| 144 |
+
# GH 35517
|
| 145 |
+
df = DataFrame([["foo"]])
|
| 146 |
+
result = df.apply(lambda col: np.array("bar"))
|
| 147 |
+
expected = Series(["bar"])
|
| 148 |
+
tm.assert_series_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_duplicated.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
DataFrame,
|
| 8 |
+
Series,
|
| 9 |
+
date_range,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]])
|
| 15 |
+
def test_duplicated_with_misspelled_column_name(subset):
|
| 16 |
+
# GH 19730
|
| 17 |
+
df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]})
|
| 18 |
+
msg = re.escape("Index(['a'], dtype='object')")
|
| 19 |
+
|
| 20 |
+
with pytest.raises(KeyError, match=msg):
|
| 21 |
+
df.duplicated(subset)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@pytest.mark.slow
|
| 25 |
+
def test_duplicated_do_not_fail_on_wide_dataframes():
|
| 26 |
+
# gh-21524
|
| 27 |
+
# Given the wide dataframe with a lot of columns
|
| 28 |
+
# with different (important!) values
|
| 29 |
+
data = {f"col_{i:02d}": np.random.randint(0, 1000, 30000) for i in range(100)}
|
| 30 |
+
df = DataFrame(data).T
|
| 31 |
+
result = df.duplicated()
|
| 32 |
+
|
| 33 |
+
# Then duplicates produce the bool Series as a result and don't fail during
|
| 34 |
+
# calculation. Actual values doesn't matter here, though usually it's all
|
| 35 |
+
# False in this case
|
| 36 |
+
assert isinstance(result, Series)
|
| 37 |
+
assert result.dtype == np.bool_
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@pytest.mark.parametrize(
|
| 41 |
+
"keep, expected",
|
| 42 |
+
[
|
| 43 |
+
("first", Series([False, False, True, False, True])),
|
| 44 |
+
("last", Series([True, True, False, False, False])),
|
| 45 |
+
(False, Series([True, True, True, False, True])),
|
| 46 |
+
],
|
| 47 |
+
)
|
| 48 |
+
def test_duplicated_keep(keep, expected):
|
| 49 |
+
df = DataFrame({"A": [0, 1, 1, 2, 0], "B": ["a", "b", "b", "c", "a"]})
|
| 50 |
+
|
| 51 |
+
result = df.duplicated(keep=keep)
|
| 52 |
+
tm.assert_series_equal(result, expected)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@pytest.mark.xfail(reason="GH#21720; nan/None falsely considered equal")
|
| 56 |
+
@pytest.mark.parametrize(
|
| 57 |
+
"keep, expected",
|
| 58 |
+
[
|
| 59 |
+
("first", Series([False, False, True, False, True])),
|
| 60 |
+
("last", Series([True, True, False, False, False])),
|
| 61 |
+
(False, Series([True, True, True, False, True])),
|
| 62 |
+
],
|
| 63 |
+
)
|
| 64 |
+
def test_duplicated_nan_none(keep, expected):
|
| 65 |
+
df = DataFrame({"C": [np.nan, 3, 3, None, np.nan], "x": 1}, dtype=object)
|
| 66 |
+
|
| 67 |
+
result = df.duplicated(keep=keep)
|
| 68 |
+
tm.assert_series_equal(result, expected)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@pytest.mark.parametrize("subset", [None, ["A", "B"], "A"])
|
| 72 |
+
def test_duplicated_subset(subset, keep):
|
| 73 |
+
df = DataFrame(
|
| 74 |
+
{
|
| 75 |
+
"A": [0, 1, 1, 2, 0],
|
| 76 |
+
"B": ["a", "b", "b", "c", "a"],
|
| 77 |
+
"C": [np.nan, 3, 3, None, np.nan],
|
| 78 |
+
}
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
if subset is None:
|
| 82 |
+
subset = list(df.columns)
|
| 83 |
+
elif isinstance(subset, str):
|
| 84 |
+
# need to have a DataFrame, not a Series
|
| 85 |
+
# -> select columns with singleton list, not string
|
| 86 |
+
subset = [subset]
|
| 87 |
+
|
| 88 |
+
expected = df[subset].duplicated(keep=keep)
|
| 89 |
+
result = df.duplicated(keep=keep, subset=subset)
|
| 90 |
+
tm.assert_series_equal(result, expected)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def test_duplicated_on_empty_frame():
|
| 94 |
+
# GH 25184
|
| 95 |
+
|
| 96 |
+
df = DataFrame(columns=["a", "b"])
|
| 97 |
+
dupes = df.duplicated("a")
|
| 98 |
+
|
| 99 |
+
result = df[dupes]
|
| 100 |
+
expected = df.copy()
|
| 101 |
+
tm.assert_frame_equal(result, expected)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def test_frame_datetime64_duplicated():
|
| 105 |
+
dates = date_range("2010-07-01", end="2010-08-05")
|
| 106 |
+
|
| 107 |
+
tst = DataFrame({"symbol": "AAA", "date": dates})
|
| 108 |
+
result = tst.duplicated(["date", "symbol"])
|
| 109 |
+
assert (-result).all()
|
| 110 |
+
|
| 111 |
+
tst = DataFrame({"date": dates})
|
| 112 |
+
result = tst.date.duplicated()
|
| 113 |
+
assert (-result).all()
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_equals.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from pandas import (
|
| 4 |
+
DataFrame,
|
| 5 |
+
date_range,
|
| 6 |
+
)
|
| 7 |
+
import pandas._testing as tm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestEquals:
|
| 11 |
+
def test_dataframe_not_equal(self):
|
| 12 |
+
# see GH#28839
|
| 13 |
+
df1 = DataFrame({"a": [1, 2], "b": ["s", "d"]})
|
| 14 |
+
df2 = DataFrame({"a": ["s", "d"], "b": [1, 2]})
|
| 15 |
+
assert df1.equals(df2) is False
|
| 16 |
+
|
| 17 |
+
def test_equals_different_blocks(self, using_array_manager):
|
| 18 |
+
# GH#9330
|
| 19 |
+
df0 = DataFrame({"A": ["x", "y"], "B": [1, 2], "C": ["w", "z"]})
|
| 20 |
+
df1 = df0.reset_index()[["A", "B", "C"]]
|
| 21 |
+
if not using_array_manager:
|
| 22 |
+
# this assert verifies that the above operations have
|
| 23 |
+
# induced a block rearrangement
|
| 24 |
+
assert df0._mgr.blocks[0].dtype != df1._mgr.blocks[0].dtype
|
| 25 |
+
|
| 26 |
+
# do the real tests
|
| 27 |
+
tm.assert_frame_equal(df0, df1)
|
| 28 |
+
assert df0.equals(df1)
|
| 29 |
+
assert df1.equals(df0)
|
| 30 |
+
|
| 31 |
+
def test_equals(self):
|
| 32 |
+
# Add object dtype column with nans
|
| 33 |
+
index = np.random.random(10)
|
| 34 |
+
df1 = DataFrame(np.random.random(10), index=index, columns=["floats"])
|
| 35 |
+
df1["text"] = "the sky is so blue. we could use more chocolate.".split()
|
| 36 |
+
df1["start"] = date_range("2000-1-1", periods=10, freq="T")
|
| 37 |
+
df1["end"] = date_range("2000-1-1", periods=10, freq="D")
|
| 38 |
+
df1["diff"] = df1["end"] - df1["start"]
|
| 39 |
+
# Explicitly cast to object, to avoid implicit cast when setting np.nan
|
| 40 |
+
df1["bool"] = (np.arange(10) % 3 == 0).astype(object)
|
| 41 |
+
df1.loc[::2] = np.nan
|
| 42 |
+
df2 = df1.copy()
|
| 43 |
+
assert df1["text"].equals(df2["text"])
|
| 44 |
+
assert df1["start"].equals(df2["start"])
|
| 45 |
+
assert df1["end"].equals(df2["end"])
|
| 46 |
+
assert df1["diff"].equals(df2["diff"])
|
| 47 |
+
assert df1["bool"].equals(df2["bool"])
|
| 48 |
+
assert df1.equals(df2)
|
| 49 |
+
assert not df1.equals(object)
|
| 50 |
+
|
| 51 |
+
# different dtype
|
| 52 |
+
different = df1.copy()
|
| 53 |
+
different["floats"] = different["floats"].astype("float32")
|
| 54 |
+
assert not df1.equals(different)
|
| 55 |
+
|
| 56 |
+
# different index
|
| 57 |
+
different_index = -index
|
| 58 |
+
different = df2.set_index(different_index)
|
| 59 |
+
assert not df1.equals(different)
|
| 60 |
+
|
| 61 |
+
# different columns
|
| 62 |
+
different = df2.copy()
|
| 63 |
+
different.columns = df2.columns[::-1]
|
| 64 |
+
assert not df1.equals(different)
|
| 65 |
+
|
| 66 |
+
# DatetimeIndex
|
| 67 |
+
index = date_range("2000-1-1", periods=10, freq="T")
|
| 68 |
+
df1 = df1.set_index(index)
|
| 69 |
+
df2 = df1.copy()
|
| 70 |
+
assert df1.equals(df2)
|
| 71 |
+
|
| 72 |
+
# MultiIndex
|
| 73 |
+
df3 = df1.set_index(["text"], append=True)
|
| 74 |
+
df2 = df1.set_index(["text"], append=True)
|
| 75 |
+
assert df3.equals(df2)
|
| 76 |
+
|
| 77 |
+
df2 = df1.set_index(["floats"], append=True)
|
| 78 |
+
assert not df3.equals(df2)
|
| 79 |
+
|
| 80 |
+
# NaN in index
|
| 81 |
+
df3 = df1.set_index(["floats"], append=True)
|
| 82 |
+
df2 = df1.set_index(["floats"], append=True)
|
| 83 |
+
assert df3.equals(df2)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_explode.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import pandas._testing as tm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def test_error():
|
| 11 |
+
df = pd.DataFrame(
|
| 12 |
+
{"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1}
|
| 13 |
+
)
|
| 14 |
+
with pytest.raises(
|
| 15 |
+
ValueError, match="column must be a scalar, tuple, or list thereof"
|
| 16 |
+
):
|
| 17 |
+
df.explode([list("AA")])
|
| 18 |
+
|
| 19 |
+
with pytest.raises(ValueError, match="column must be unique"):
|
| 20 |
+
df.explode(list("AA"))
|
| 21 |
+
|
| 22 |
+
df.columns = list("AA")
|
| 23 |
+
with pytest.raises(
|
| 24 |
+
ValueError,
|
| 25 |
+
match=re.escape("DataFrame columns must be unique. Duplicate columns: ['A']"),
|
| 26 |
+
):
|
| 27 |
+
df.explode("A")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@pytest.mark.parametrize(
|
| 31 |
+
"input_subset, error_message",
|
| 32 |
+
[
|
| 33 |
+
(
|
| 34 |
+
list("AC"),
|
| 35 |
+
"columns must have matching element counts",
|
| 36 |
+
),
|
| 37 |
+
(
|
| 38 |
+
[],
|
| 39 |
+
"column must be nonempty",
|
| 40 |
+
),
|
| 41 |
+
(
|
| 42 |
+
list("AC"),
|
| 43 |
+
"columns must have matching element counts",
|
| 44 |
+
),
|
| 45 |
+
],
|
| 46 |
+
)
|
| 47 |
+
def test_error_multi_columns(input_subset, error_message):
|
| 48 |
+
# GH 39240
|
| 49 |
+
df = pd.DataFrame(
|
| 50 |
+
{
|
| 51 |
+
"A": [[0, 1, 2], np.nan, [], (3, 4)],
|
| 52 |
+
"B": 1,
|
| 53 |
+
"C": [["a", "b", "c"], "foo", [], ["d", "e", "f"]],
|
| 54 |
+
},
|
| 55 |
+
index=list("abcd"),
|
| 56 |
+
)
|
| 57 |
+
with pytest.raises(ValueError, match=error_message):
|
| 58 |
+
df.explode(input_subset)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@pytest.mark.parametrize(
|
| 62 |
+
"scalar",
|
| 63 |
+
["a", 0, 1.5, pd.Timedelta("1 days"), pd.Timestamp("2019-12-31")],
|
| 64 |
+
)
|
| 65 |
+
def test_basic(scalar):
|
| 66 |
+
df = pd.DataFrame(
|
| 67 |
+
{scalar: pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1}
|
| 68 |
+
)
|
| 69 |
+
result = df.explode(scalar)
|
| 70 |
+
expected = pd.DataFrame(
|
| 71 |
+
{
|
| 72 |
+
scalar: pd.Series(
|
| 73 |
+
[0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object
|
| 74 |
+
),
|
| 75 |
+
"B": 1,
|
| 76 |
+
}
|
| 77 |
+
)
|
| 78 |
+
tm.assert_frame_equal(result, expected)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def test_multi_index_rows():
|
| 82 |
+
df = pd.DataFrame(
|
| 83 |
+
{"A": np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), "B": 1},
|
| 84 |
+
index=pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]),
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
result = df.explode("A")
|
| 88 |
+
expected = pd.DataFrame(
|
| 89 |
+
{
|
| 90 |
+
"A": pd.Series(
|
| 91 |
+
[0, 1, 2, np.nan, np.nan, 3, 4],
|
| 92 |
+
index=pd.MultiIndex.from_tuples(
|
| 93 |
+
[
|
| 94 |
+
("a", 1),
|
| 95 |
+
("a", 1),
|
| 96 |
+
("a", 1),
|
| 97 |
+
("a", 2),
|
| 98 |
+
("b", 1),
|
| 99 |
+
("b", 2),
|
| 100 |
+
("b", 2),
|
| 101 |
+
]
|
| 102 |
+
),
|
| 103 |
+
dtype=object,
|
| 104 |
+
),
|
| 105 |
+
"B": 1,
|
| 106 |
+
}
|
| 107 |
+
)
|
| 108 |
+
tm.assert_frame_equal(result, expected)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def test_multi_index_columns():
|
| 112 |
+
df = pd.DataFrame(
|
| 113 |
+
{("A", 1): np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), ("A", 2): 1}
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
result = df.explode(("A", 1))
|
| 117 |
+
expected = pd.DataFrame(
|
| 118 |
+
{
|
| 119 |
+
("A", 1): pd.Series(
|
| 120 |
+
[0, 1, 2, np.nan, np.nan, 3, 4],
|
| 121 |
+
index=pd.Index([0, 0, 0, 1, 2, 3, 3]),
|
| 122 |
+
dtype=object,
|
| 123 |
+
),
|
| 124 |
+
("A", 2): 1,
|
| 125 |
+
}
|
| 126 |
+
)
|
| 127 |
+
tm.assert_frame_equal(result, expected)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def test_usecase():
|
| 131 |
+
# explode a single column
|
| 132 |
+
# gh-10511
|
| 133 |
+
df = pd.DataFrame(
|
| 134 |
+
[[11, range(5), 10], [22, range(3), 20]], columns=list("ABC")
|
| 135 |
+
).set_index("C")
|
| 136 |
+
result = df.explode("B")
|
| 137 |
+
|
| 138 |
+
expected = pd.DataFrame(
|
| 139 |
+
{
|
| 140 |
+
"A": [11, 11, 11, 11, 11, 22, 22, 22],
|
| 141 |
+
"B": np.array([0, 1, 2, 3, 4, 0, 1, 2], dtype=object),
|
| 142 |
+
"C": [10, 10, 10, 10, 10, 20, 20, 20],
|
| 143 |
+
},
|
| 144 |
+
columns=list("ABC"),
|
| 145 |
+
).set_index("C")
|
| 146 |
+
|
| 147 |
+
tm.assert_frame_equal(result, expected)
|
| 148 |
+
|
| 149 |
+
# gh-8517
|
| 150 |
+
df = pd.DataFrame(
|
| 151 |
+
[["2014-01-01", "Alice", "A B"], ["2014-01-02", "Bob", "C D"]],
|
| 152 |
+
columns=["dt", "name", "text"],
|
| 153 |
+
)
|
| 154 |
+
result = df.assign(text=df.text.str.split(" ")).explode("text")
|
| 155 |
+
expected = pd.DataFrame(
|
| 156 |
+
[
|
| 157 |
+
["2014-01-01", "Alice", "A"],
|
| 158 |
+
["2014-01-01", "Alice", "B"],
|
| 159 |
+
["2014-01-02", "Bob", "C"],
|
| 160 |
+
["2014-01-02", "Bob", "D"],
|
| 161 |
+
],
|
| 162 |
+
columns=["dt", "name", "text"],
|
| 163 |
+
index=[0, 0, 1, 1],
|
| 164 |
+
)
|
| 165 |
+
tm.assert_frame_equal(result, expected)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
@pytest.mark.parametrize(
|
| 169 |
+
"input_dict, input_index, expected_dict, expected_index",
|
| 170 |
+
[
|
| 171 |
+
(
|
| 172 |
+
{"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]},
|
| 173 |
+
[0, 0],
|
| 174 |
+
{"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]},
|
| 175 |
+
[0, 0, 0, 0],
|
| 176 |
+
),
|
| 177 |
+
(
|
| 178 |
+
{"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]},
|
| 179 |
+
pd.Index([0, 0], name="my_index"),
|
| 180 |
+
{"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]},
|
| 181 |
+
pd.Index([0, 0, 0, 0], name="my_index"),
|
| 182 |
+
),
|
| 183 |
+
(
|
| 184 |
+
{"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]},
|
| 185 |
+
pd.MultiIndex.from_arrays(
|
| 186 |
+
[[0, 0], [1, 1]], names=["my_first_index", "my_second_index"]
|
| 187 |
+
),
|
| 188 |
+
{"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]},
|
| 189 |
+
pd.MultiIndex.from_arrays(
|
| 190 |
+
[[0, 0, 0, 0], [1, 1, 1, 1]],
|
| 191 |
+
names=["my_first_index", "my_second_index"],
|
| 192 |
+
),
|
| 193 |
+
),
|
| 194 |
+
(
|
| 195 |
+
{"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]},
|
| 196 |
+
pd.MultiIndex.from_arrays([[0, 0], [1, 1]], names=["my_index", None]),
|
| 197 |
+
{"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]},
|
| 198 |
+
pd.MultiIndex.from_arrays(
|
| 199 |
+
[[0, 0, 0, 0], [1, 1, 1, 1]], names=["my_index", None]
|
| 200 |
+
),
|
| 201 |
+
),
|
| 202 |
+
],
|
| 203 |
+
)
|
| 204 |
+
def test_duplicate_index(input_dict, input_index, expected_dict, expected_index):
|
| 205 |
+
# GH 28005
|
| 206 |
+
df = pd.DataFrame(input_dict, index=input_index)
|
| 207 |
+
result = df.explode("col1")
|
| 208 |
+
expected = pd.DataFrame(expected_dict, index=expected_index, dtype=object)
|
| 209 |
+
tm.assert_frame_equal(result, expected)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def test_ignore_index():
|
| 213 |
+
# GH 34932
|
| 214 |
+
df = pd.DataFrame({"id": range(0, 20, 10), "values": [list("ab"), list("cd")]})
|
| 215 |
+
result = df.explode("values", ignore_index=True)
|
| 216 |
+
expected = pd.DataFrame(
|
| 217 |
+
{"id": [0, 0, 10, 10], "values": list("abcd")}, index=[0, 1, 2, 3]
|
| 218 |
+
)
|
| 219 |
+
tm.assert_frame_equal(result, expected)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def test_explode_sets():
|
| 223 |
+
# https://github.com/pandas-dev/pandas/issues/35614
|
| 224 |
+
df = pd.DataFrame({"a": [{"x", "y"}], "b": [1]}, index=[1])
|
| 225 |
+
result = df.explode(column="a").sort_values(by="a")
|
| 226 |
+
expected = pd.DataFrame({"a": ["x", "y"], "b": [1, 1]}, index=[1, 1])
|
| 227 |
+
tm.assert_frame_equal(result, expected)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
@pytest.mark.parametrize(
|
| 231 |
+
"input_subset, expected_dict, expected_index",
|
| 232 |
+
[
|
| 233 |
+
(
|
| 234 |
+
list("AC"),
|
| 235 |
+
{
|
| 236 |
+
"A": pd.Series(
|
| 237 |
+
[0, 1, 2, np.nan, np.nan, 3, 4, np.nan],
|
| 238 |
+
index=list("aaabcdde"),
|
| 239 |
+
dtype=object,
|
| 240 |
+
),
|
| 241 |
+
"B": 1,
|
| 242 |
+
"C": ["a", "b", "c", "foo", np.nan, "d", "e", np.nan],
|
| 243 |
+
},
|
| 244 |
+
list("aaabcdde"),
|
| 245 |
+
),
|
| 246 |
+
(
|
| 247 |
+
list("A"),
|
| 248 |
+
{
|
| 249 |
+
"A": pd.Series(
|
| 250 |
+
[0, 1, 2, np.nan, np.nan, 3, 4, np.nan],
|
| 251 |
+
index=list("aaabcdde"),
|
| 252 |
+
dtype=object,
|
| 253 |
+
),
|
| 254 |
+
"B": 1,
|
| 255 |
+
"C": [
|
| 256 |
+
["a", "b", "c"],
|
| 257 |
+
["a", "b", "c"],
|
| 258 |
+
["a", "b", "c"],
|
| 259 |
+
"foo",
|
| 260 |
+
[],
|
| 261 |
+
["d", "e"],
|
| 262 |
+
["d", "e"],
|
| 263 |
+
np.nan,
|
| 264 |
+
],
|
| 265 |
+
},
|
| 266 |
+
list("aaabcdde"),
|
| 267 |
+
),
|
| 268 |
+
],
|
| 269 |
+
)
|
| 270 |
+
def test_multi_columns(input_subset, expected_dict, expected_index):
|
| 271 |
+
# GH 39240
|
| 272 |
+
df = pd.DataFrame(
|
| 273 |
+
{
|
| 274 |
+
"A": [[0, 1, 2], np.nan, [], (3, 4), np.nan],
|
| 275 |
+
"B": 1,
|
| 276 |
+
"C": [["a", "b", "c"], "foo", [], ["d", "e"], np.nan],
|
| 277 |
+
},
|
| 278 |
+
index=list("abcde"),
|
| 279 |
+
)
|
| 280 |
+
result = df.explode(input_subset)
|
| 281 |
+
expected = pd.DataFrame(expected_dict, expected_index)
|
| 282 |
+
tm.assert_frame_equal(result, expected)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def test_multi_columns_nan_empty():
|
| 286 |
+
# GH 46084
|
| 287 |
+
df = pd.DataFrame(
|
| 288 |
+
{
|
| 289 |
+
"A": [[0, 1], [5], [], [2, 3]],
|
| 290 |
+
"B": [9, 8, 7, 6],
|
| 291 |
+
"C": [[1, 2], np.nan, [], [3, 4]],
|
| 292 |
+
}
|
| 293 |
+
)
|
| 294 |
+
result = df.explode(["A", "C"])
|
| 295 |
+
expected = pd.DataFrame(
|
| 296 |
+
{
|
| 297 |
+
"A": np.array([0, 1, 5, np.nan, 2, 3], dtype=object),
|
| 298 |
+
"B": [9, 9, 8, 7, 6, 6],
|
| 299 |
+
"C": np.array([1, 2, np.nan, np.nan, 3, 4], dtype=object),
|
| 300 |
+
},
|
| 301 |
+
index=[0, 0, 1, 2, 3, 3],
|
| 302 |
+
)
|
| 303 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_fillna.py
ADDED
|
@@ -0,0 +1,778 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas.util._test_decorators as td
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
Categorical,
|
| 8 |
+
DataFrame,
|
| 9 |
+
DatetimeIndex,
|
| 10 |
+
NaT,
|
| 11 |
+
PeriodIndex,
|
| 12 |
+
Series,
|
| 13 |
+
TimedeltaIndex,
|
| 14 |
+
Timestamp,
|
| 15 |
+
date_range,
|
| 16 |
+
to_datetime,
|
| 17 |
+
)
|
| 18 |
+
import pandas._testing as tm
|
| 19 |
+
from pandas.tests.frame.common import _check_mixed_float
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class TestFillNA:
|
| 23 |
+
def test_fillna_dict_inplace_nonunique_columns(self, using_copy_on_write):
|
| 24 |
+
df = DataFrame(
|
| 25 |
+
{"A": [np.nan] * 3, "B": [NaT, Timestamp(1), NaT], "C": [np.nan, "foo", 2]}
|
| 26 |
+
)
|
| 27 |
+
df.columns = ["A", "A", "A"]
|
| 28 |
+
orig = df[:]
|
| 29 |
+
|
| 30 |
+
df.fillna({"A": 2}, inplace=True)
|
| 31 |
+
# The first and third columns can be set inplace, while the second cannot.
|
| 32 |
+
|
| 33 |
+
expected = DataFrame(
|
| 34 |
+
{"A": [2.0] * 3, "B": [2, Timestamp(1), 2], "C": [2, "foo", 2]}
|
| 35 |
+
)
|
| 36 |
+
expected.columns = ["A", "A", "A"]
|
| 37 |
+
tm.assert_frame_equal(df, expected)
|
| 38 |
+
|
| 39 |
+
# TODO: what's the expected/desired behavior with CoW?
|
| 40 |
+
if not using_copy_on_write:
|
| 41 |
+
assert tm.shares_memory(df.iloc[:, 0], orig.iloc[:, 0])
|
| 42 |
+
assert not tm.shares_memory(df.iloc[:, 1], orig.iloc[:, 1])
|
| 43 |
+
if not using_copy_on_write:
|
| 44 |
+
assert tm.shares_memory(df.iloc[:, 2], orig.iloc[:, 2])
|
| 45 |
+
|
| 46 |
+
@td.skip_array_manager_not_yet_implemented
|
| 47 |
+
def test_fillna_on_column_view(self, using_copy_on_write):
|
| 48 |
+
# GH#46149 avoid unnecessary copies
|
| 49 |
+
arr = np.full((40, 50), np.nan)
|
| 50 |
+
df = DataFrame(arr, copy=False)
|
| 51 |
+
|
| 52 |
+
# TODO(CoW): This should raise a chained assignment error
|
| 53 |
+
df[0].fillna(-1, inplace=True)
|
| 54 |
+
if using_copy_on_write:
|
| 55 |
+
assert np.isnan(arr[:, 0]).all()
|
| 56 |
+
else:
|
| 57 |
+
assert (arr[:, 0] == -1).all()
|
| 58 |
+
|
| 59 |
+
# i.e. we didn't create a new 49-column block
|
| 60 |
+
assert len(df._mgr.arrays) == 1
|
| 61 |
+
assert np.shares_memory(df.values, arr)
|
| 62 |
+
|
| 63 |
+
def test_fillna_datetime(self, datetime_frame):
|
| 64 |
+
tf = datetime_frame
|
| 65 |
+
tf.loc[tf.index[:5], "A"] = np.nan
|
| 66 |
+
tf.loc[tf.index[-5:], "A"] = np.nan
|
| 67 |
+
|
| 68 |
+
zero_filled = datetime_frame.fillna(0)
|
| 69 |
+
assert (zero_filled.loc[zero_filled.index[:5], "A"] == 0).all()
|
| 70 |
+
|
| 71 |
+
padded = datetime_frame.fillna(method="pad")
|
| 72 |
+
assert np.isnan(padded.loc[padded.index[:5], "A"]).all()
|
| 73 |
+
assert (
|
| 74 |
+
padded.loc[padded.index[-5:], "A"] == padded.loc[padded.index[-5], "A"]
|
| 75 |
+
).all()
|
| 76 |
+
|
| 77 |
+
msg = "Must specify a fill 'value' or 'method'"
|
| 78 |
+
with pytest.raises(ValueError, match=msg):
|
| 79 |
+
datetime_frame.fillna()
|
| 80 |
+
msg = "Cannot specify both 'value' and 'method'"
|
| 81 |
+
with pytest.raises(ValueError, match=msg):
|
| 82 |
+
datetime_frame.fillna(5, method="ffill")
|
| 83 |
+
|
| 84 |
+
def test_fillna_mixed_type(self, float_string_frame):
|
| 85 |
+
mf = float_string_frame
|
| 86 |
+
mf.loc[mf.index[5:20], "foo"] = np.nan
|
| 87 |
+
mf.loc[mf.index[-10:], "A"] = np.nan
|
| 88 |
+
# TODO: make stronger assertion here, GH 25640
|
| 89 |
+
mf.fillna(value=0)
|
| 90 |
+
mf.fillna(method="pad")
|
| 91 |
+
|
| 92 |
+
def test_fillna_mixed_float(self, mixed_float_frame):
|
| 93 |
+
# mixed numeric (but no float16)
|
| 94 |
+
mf = mixed_float_frame.reindex(columns=["A", "B", "D"])
|
| 95 |
+
mf.loc[mf.index[-10:], "A"] = np.nan
|
| 96 |
+
result = mf.fillna(value=0)
|
| 97 |
+
_check_mixed_float(result, dtype={"C": None})
|
| 98 |
+
|
| 99 |
+
result = mf.fillna(method="pad")
|
| 100 |
+
_check_mixed_float(result, dtype={"C": None})
|
| 101 |
+
|
| 102 |
+
def test_fillna_empty(self):
|
| 103 |
+
# empty frame (GH#2778)
|
| 104 |
+
df = DataFrame(columns=["x"])
|
| 105 |
+
for m in ["pad", "backfill"]:
|
| 106 |
+
df.x.fillna(method=m, inplace=True)
|
| 107 |
+
df.x.fillna(method=m)
|
| 108 |
+
|
| 109 |
+
def test_fillna_different_dtype(self):
|
| 110 |
+
# with different dtype (GH#3386)
|
| 111 |
+
df = DataFrame(
|
| 112 |
+
[["a", "a", np.nan, "a"], ["b", "b", np.nan, "b"], ["c", "c", np.nan, "c"]]
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
result = df.fillna({2: "foo"})
|
| 116 |
+
expected = DataFrame(
|
| 117 |
+
[["a", "a", "foo", "a"], ["b", "b", "foo", "b"], ["c", "c", "foo", "c"]]
|
| 118 |
+
)
|
| 119 |
+
tm.assert_frame_equal(result, expected)
|
| 120 |
+
|
| 121 |
+
return_value = df.fillna({2: "foo"}, inplace=True)
|
| 122 |
+
tm.assert_frame_equal(df, expected)
|
| 123 |
+
assert return_value is None
|
| 124 |
+
|
| 125 |
+
def test_fillna_limit_and_value(self):
|
| 126 |
+
# limit and value
|
| 127 |
+
df = DataFrame(np.random.randn(10, 3))
|
| 128 |
+
df.iloc[2:7, 0] = np.nan
|
| 129 |
+
df.iloc[3:5, 2] = np.nan
|
| 130 |
+
|
| 131 |
+
expected = df.copy()
|
| 132 |
+
expected.iloc[2, 0] = 999
|
| 133 |
+
expected.iloc[3, 2] = 999
|
| 134 |
+
result = df.fillna(999, limit=1)
|
| 135 |
+
tm.assert_frame_equal(result, expected)
|
| 136 |
+
|
| 137 |
+
def test_fillna_datelike(self):
|
| 138 |
+
# with datelike
|
| 139 |
+
# GH#6344
|
| 140 |
+
df = DataFrame(
|
| 141 |
+
{
|
| 142 |
+
"Date": [NaT, Timestamp("2014-1-1")],
|
| 143 |
+
"Date2": [Timestamp("2013-1-1"), NaT],
|
| 144 |
+
}
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
expected = df.copy()
|
| 148 |
+
expected["Date"] = expected["Date"].fillna(df.loc[df.index[0], "Date2"])
|
| 149 |
+
result = df.fillna(value={"Date": df["Date2"]})
|
| 150 |
+
tm.assert_frame_equal(result, expected)
|
| 151 |
+
|
| 152 |
+
def test_fillna_tzaware(self):
|
| 153 |
+
# with timezone
|
| 154 |
+
# GH#15855
|
| 155 |
+
df = DataFrame({"A": [Timestamp("2012-11-11 00:00:00+01:00"), NaT]})
|
| 156 |
+
exp = DataFrame(
|
| 157 |
+
{
|
| 158 |
+
"A": [
|
| 159 |
+
Timestamp("2012-11-11 00:00:00+01:00"),
|
| 160 |
+
Timestamp("2012-11-11 00:00:00+01:00"),
|
| 161 |
+
]
|
| 162 |
+
}
|
| 163 |
+
)
|
| 164 |
+
tm.assert_frame_equal(df.fillna(method="pad"), exp)
|
| 165 |
+
|
| 166 |
+
df = DataFrame({"A": [NaT, Timestamp("2012-11-11 00:00:00+01:00")]})
|
| 167 |
+
exp = DataFrame(
|
| 168 |
+
{
|
| 169 |
+
"A": [
|
| 170 |
+
Timestamp("2012-11-11 00:00:00+01:00"),
|
| 171 |
+
Timestamp("2012-11-11 00:00:00+01:00"),
|
| 172 |
+
]
|
| 173 |
+
}
|
| 174 |
+
)
|
| 175 |
+
tm.assert_frame_equal(df.fillna(method="bfill"), exp)
|
| 176 |
+
|
| 177 |
+
def test_fillna_tzaware_different_column(self):
|
| 178 |
+
# with timezone in another column
|
| 179 |
+
# GH#15522
|
| 180 |
+
df = DataFrame(
|
| 181 |
+
{
|
| 182 |
+
"A": date_range("20130101", periods=4, tz="US/Eastern"),
|
| 183 |
+
"B": [1, 2, np.nan, np.nan],
|
| 184 |
+
}
|
| 185 |
+
)
|
| 186 |
+
result = df.fillna(method="pad")
|
| 187 |
+
expected = DataFrame(
|
| 188 |
+
{
|
| 189 |
+
"A": date_range("20130101", periods=4, tz="US/Eastern"),
|
| 190 |
+
"B": [1.0, 2.0, 2.0, 2.0],
|
| 191 |
+
}
|
| 192 |
+
)
|
| 193 |
+
tm.assert_frame_equal(result, expected)
|
| 194 |
+
|
| 195 |
+
def test_na_actions_categorical(self):
|
| 196 |
+
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
|
| 197 |
+
vals = ["a", "b", np.nan, "d"]
|
| 198 |
+
df = DataFrame({"cats": cat, "vals": vals})
|
| 199 |
+
cat2 = Categorical([1, 2, 3, 3], categories=[1, 2, 3])
|
| 200 |
+
vals2 = ["a", "b", "b", "d"]
|
| 201 |
+
df_exp_fill = DataFrame({"cats": cat2, "vals": vals2})
|
| 202 |
+
cat3 = Categorical([1, 2, 3], categories=[1, 2, 3])
|
| 203 |
+
vals3 = ["a", "b", np.nan]
|
| 204 |
+
df_exp_drop_cats = DataFrame({"cats": cat3, "vals": vals3})
|
| 205 |
+
cat4 = Categorical([1, 2], categories=[1, 2, 3])
|
| 206 |
+
vals4 = ["a", "b"]
|
| 207 |
+
df_exp_drop_all = DataFrame({"cats": cat4, "vals": vals4})
|
| 208 |
+
|
| 209 |
+
# fillna
|
| 210 |
+
res = df.fillna(value={"cats": 3, "vals": "b"})
|
| 211 |
+
tm.assert_frame_equal(res, df_exp_fill)
|
| 212 |
+
|
| 213 |
+
msg = "Cannot setitem on a Categorical with a new category"
|
| 214 |
+
with pytest.raises(TypeError, match=msg):
|
| 215 |
+
df.fillna(value={"cats": 4, "vals": "c"})
|
| 216 |
+
|
| 217 |
+
res = df.fillna(method="pad")
|
| 218 |
+
tm.assert_frame_equal(res, df_exp_fill)
|
| 219 |
+
|
| 220 |
+
# dropna
|
| 221 |
+
res = df.dropna(subset=["cats"])
|
| 222 |
+
tm.assert_frame_equal(res, df_exp_drop_cats)
|
| 223 |
+
|
| 224 |
+
res = df.dropna()
|
| 225 |
+
tm.assert_frame_equal(res, df_exp_drop_all)
|
| 226 |
+
|
| 227 |
+
# make sure that fillna takes missing values into account
|
| 228 |
+
c = Categorical([np.nan, "b", np.nan], categories=["a", "b"])
|
| 229 |
+
df = DataFrame({"cats": c, "vals": [1, 2, 3]})
|
| 230 |
+
|
| 231 |
+
cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"])
|
| 232 |
+
df_exp = DataFrame({"cats": cat_exp, "vals": [1, 2, 3]})
|
| 233 |
+
|
| 234 |
+
res = df.fillna("a")
|
| 235 |
+
tm.assert_frame_equal(res, df_exp)
|
| 236 |
+
|
| 237 |
+
def test_fillna_categorical_nan(self):
|
| 238 |
+
# GH#14021
|
| 239 |
+
# np.nan should always be a valid filler
|
| 240 |
+
cat = Categorical([np.nan, 2, np.nan])
|
| 241 |
+
val = Categorical([np.nan, np.nan, np.nan])
|
| 242 |
+
df = DataFrame({"cats": cat, "vals": val})
|
| 243 |
+
|
| 244 |
+
# GH#32950 df.median() is poorly behaved because there is no
|
| 245 |
+
# Categorical.median
|
| 246 |
+
median = Series({"cats": 2.0, "vals": np.nan})
|
| 247 |
+
|
| 248 |
+
res = df.fillna(median)
|
| 249 |
+
v_exp = [np.nan, np.nan, np.nan]
|
| 250 |
+
df_exp = DataFrame({"cats": [2, 2, 2], "vals": v_exp}, dtype="category")
|
| 251 |
+
tm.assert_frame_equal(res, df_exp)
|
| 252 |
+
|
| 253 |
+
result = df.cats.fillna(np.nan)
|
| 254 |
+
tm.assert_series_equal(result, df.cats)
|
| 255 |
+
|
| 256 |
+
result = df.vals.fillna(np.nan)
|
| 257 |
+
tm.assert_series_equal(result, df.vals)
|
| 258 |
+
|
| 259 |
+
idx = DatetimeIndex(
|
| 260 |
+
["2011-01-01 09:00", "2016-01-01 23:45", "2011-01-01 09:00", NaT, NaT]
|
| 261 |
+
)
|
| 262 |
+
df = DataFrame({"a": Categorical(idx)})
|
| 263 |
+
tm.assert_frame_equal(df.fillna(value=NaT), df)
|
| 264 |
+
|
| 265 |
+
idx = PeriodIndex(["2011-01", "2011-01", "2011-01", NaT, NaT], freq="M")
|
| 266 |
+
df = DataFrame({"a": Categorical(idx)})
|
| 267 |
+
tm.assert_frame_equal(df.fillna(value=NaT), df)
|
| 268 |
+
|
| 269 |
+
idx = TimedeltaIndex(["1 days", "2 days", "1 days", NaT, NaT])
|
| 270 |
+
df = DataFrame({"a": Categorical(idx)})
|
| 271 |
+
tm.assert_frame_equal(df.fillna(value=NaT), df)
|
| 272 |
+
|
| 273 |
+
def test_fillna_downcast(self):
|
| 274 |
+
# GH#15277
|
| 275 |
+
# infer int64 from float64
|
| 276 |
+
df = DataFrame({"a": [1.0, np.nan]})
|
| 277 |
+
result = df.fillna(0, downcast="infer")
|
| 278 |
+
expected = DataFrame({"a": [1, 0]})
|
| 279 |
+
tm.assert_frame_equal(result, expected)
|
| 280 |
+
|
| 281 |
+
# infer int64 from float64 when fillna value is a dict
|
| 282 |
+
df = DataFrame({"a": [1.0, np.nan]})
|
| 283 |
+
result = df.fillna({"a": 0}, downcast="infer")
|
| 284 |
+
expected = DataFrame({"a": [1, 0]})
|
| 285 |
+
tm.assert_frame_equal(result, expected)
|
| 286 |
+
|
| 287 |
+
def test_fillna_downcast_false(self, frame_or_series):
|
| 288 |
+
# GH#45603 preserve object dtype with downcast=False
|
| 289 |
+
obj = frame_or_series([1, 2, 3], dtype="object")
|
| 290 |
+
result = obj.fillna("", downcast=False)
|
| 291 |
+
tm.assert_equal(result, obj)
|
| 292 |
+
|
| 293 |
+
def test_fillna_downcast_noop(self, frame_or_series):
|
| 294 |
+
# GH#45423
|
| 295 |
+
# Two relevant paths:
|
| 296 |
+
# 1) not _can_hold_na (e.g. integer)
|
| 297 |
+
# 2) _can_hold_na + noop + not can_hold_element
|
| 298 |
+
|
| 299 |
+
obj = frame_or_series([1, 2, 3], dtype=np.int64)
|
| 300 |
+
res = obj.fillna("foo", downcast=np.dtype(np.int32))
|
| 301 |
+
expected = obj.astype(np.int32)
|
| 302 |
+
tm.assert_equal(res, expected)
|
| 303 |
+
|
| 304 |
+
obj2 = obj.astype(np.float64)
|
| 305 |
+
res2 = obj2.fillna("foo", downcast="infer")
|
| 306 |
+
expected2 = obj # get back int64
|
| 307 |
+
tm.assert_equal(res2, expected2)
|
| 308 |
+
|
| 309 |
+
res3 = obj2.fillna("foo", downcast=np.dtype(np.int32))
|
| 310 |
+
tm.assert_equal(res3, expected)
|
| 311 |
+
|
| 312 |
+
@pytest.mark.parametrize("columns", [["A", "A", "B"], ["A", "A"]])
|
| 313 |
+
def test_fillna_dictlike_value_duplicate_colnames(self, columns):
|
| 314 |
+
# GH#43476
|
| 315 |
+
df = DataFrame(np.nan, index=[0, 1], columns=columns)
|
| 316 |
+
with tm.assert_produces_warning(None):
|
| 317 |
+
result = df.fillna({"A": 0})
|
| 318 |
+
|
| 319 |
+
expected = df.copy()
|
| 320 |
+
expected["A"] = 0.0
|
| 321 |
+
tm.assert_frame_equal(result, expected)
|
| 322 |
+
|
| 323 |
+
def test_fillna_dtype_conversion(self):
|
| 324 |
+
# make sure that fillna on an empty frame works
|
| 325 |
+
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
|
| 326 |
+
result = df.dtypes
|
| 327 |
+
expected = Series([np.dtype("object")] * 5, index=[1, 2, 3, 4, 5])
|
| 328 |
+
tm.assert_series_equal(result, expected)
|
| 329 |
+
|
| 330 |
+
result = df.fillna(1)
|
| 331 |
+
expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
|
| 332 |
+
tm.assert_frame_equal(result, expected)
|
| 333 |
+
|
| 334 |
+
# empty block
|
| 335 |
+
df = DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
|
| 336 |
+
result = df.fillna("nan")
|
| 337 |
+
expected = DataFrame("nan", index=range(3), columns=["A", "B"])
|
| 338 |
+
tm.assert_frame_equal(result, expected)
|
| 339 |
+
|
| 340 |
+
@pytest.mark.parametrize("val", ["", 1, np.nan, 1.0])
|
| 341 |
+
def test_fillna_dtype_conversion_equiv_replace(self, val):
|
| 342 |
+
df = DataFrame({"A": [1, np.nan], "B": [1.0, 2.0]})
|
| 343 |
+
expected = df.replace(np.nan, val)
|
| 344 |
+
result = df.fillna(val)
|
| 345 |
+
tm.assert_frame_equal(result, expected)
|
| 346 |
+
|
| 347 |
+
def test_fillna_datetime_columns(self):
|
| 348 |
+
# GH#7095
|
| 349 |
+
df = DataFrame(
|
| 350 |
+
{
|
| 351 |
+
"A": [-1, -2, np.nan],
|
| 352 |
+
"B": date_range("20130101", periods=3),
|
| 353 |
+
"C": ["foo", "bar", None],
|
| 354 |
+
"D": ["foo2", "bar2", None],
|
| 355 |
+
},
|
| 356 |
+
index=date_range("20130110", periods=3),
|
| 357 |
+
)
|
| 358 |
+
result = df.fillna("?")
|
| 359 |
+
expected = DataFrame(
|
| 360 |
+
{
|
| 361 |
+
"A": [-1, -2, "?"],
|
| 362 |
+
"B": date_range("20130101", periods=3),
|
| 363 |
+
"C": ["foo", "bar", "?"],
|
| 364 |
+
"D": ["foo2", "bar2", "?"],
|
| 365 |
+
},
|
| 366 |
+
index=date_range("20130110", periods=3),
|
| 367 |
+
)
|
| 368 |
+
tm.assert_frame_equal(result, expected)
|
| 369 |
+
|
| 370 |
+
df = DataFrame(
|
| 371 |
+
{
|
| 372 |
+
"A": [-1, -2, np.nan],
|
| 373 |
+
"B": [Timestamp("2013-01-01"), Timestamp("2013-01-02"), NaT],
|
| 374 |
+
"C": ["foo", "bar", None],
|
| 375 |
+
"D": ["foo2", "bar2", None],
|
| 376 |
+
},
|
| 377 |
+
index=date_range("20130110", periods=3),
|
| 378 |
+
)
|
| 379 |
+
result = df.fillna("?")
|
| 380 |
+
expected = DataFrame(
|
| 381 |
+
{
|
| 382 |
+
"A": [-1, -2, "?"],
|
| 383 |
+
"B": [Timestamp("2013-01-01"), Timestamp("2013-01-02"), "?"],
|
| 384 |
+
"C": ["foo", "bar", "?"],
|
| 385 |
+
"D": ["foo2", "bar2", "?"],
|
| 386 |
+
},
|
| 387 |
+
index=date_range("20130110", periods=3),
|
| 388 |
+
)
|
| 389 |
+
tm.assert_frame_equal(result, expected)
|
| 390 |
+
|
| 391 |
+
def test_ffill(self, datetime_frame):
|
| 392 |
+
datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan
|
| 393 |
+
datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan
|
| 394 |
+
|
| 395 |
+
tm.assert_frame_equal(
|
| 396 |
+
datetime_frame.ffill(), datetime_frame.fillna(method="ffill")
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
def test_bfill(self, datetime_frame):
|
| 400 |
+
datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan
|
| 401 |
+
datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan
|
| 402 |
+
|
| 403 |
+
tm.assert_frame_equal(
|
| 404 |
+
datetime_frame.bfill(), datetime_frame.fillna(method="bfill")
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
def test_frame_pad_backfill_limit(self):
|
| 408 |
+
index = np.arange(10)
|
| 409 |
+
df = DataFrame(np.random.randn(10, 4), index=index)
|
| 410 |
+
|
| 411 |
+
result = df[:2].reindex(index, method="pad", limit=5)
|
| 412 |
+
|
| 413 |
+
expected = df[:2].reindex(index).fillna(method="pad")
|
| 414 |
+
expected.iloc[-3:] = np.nan
|
| 415 |
+
tm.assert_frame_equal(result, expected)
|
| 416 |
+
|
| 417 |
+
result = df[-2:].reindex(index, method="backfill", limit=5)
|
| 418 |
+
|
| 419 |
+
expected = df[-2:].reindex(index).fillna(method="backfill")
|
| 420 |
+
expected.iloc[:3] = np.nan
|
| 421 |
+
tm.assert_frame_equal(result, expected)
|
| 422 |
+
|
| 423 |
+
def test_frame_fillna_limit(self):
|
| 424 |
+
index = np.arange(10)
|
| 425 |
+
df = DataFrame(np.random.randn(10, 4), index=index)
|
| 426 |
+
|
| 427 |
+
result = df[:2].reindex(index)
|
| 428 |
+
result = result.fillna(method="pad", limit=5)
|
| 429 |
+
|
| 430 |
+
expected = df[:2].reindex(index).fillna(method="pad")
|
| 431 |
+
expected.iloc[-3:] = np.nan
|
| 432 |
+
tm.assert_frame_equal(result, expected)
|
| 433 |
+
|
| 434 |
+
result = df[-2:].reindex(index)
|
| 435 |
+
result = result.fillna(method="backfill", limit=5)
|
| 436 |
+
|
| 437 |
+
expected = df[-2:].reindex(index).fillna(method="backfill")
|
| 438 |
+
expected.iloc[:3] = np.nan
|
| 439 |
+
tm.assert_frame_equal(result, expected)
|
| 440 |
+
|
| 441 |
+
def test_fillna_skip_certain_blocks(self):
|
| 442 |
+
# don't try to fill boolean, int blocks
|
| 443 |
+
|
| 444 |
+
df = DataFrame(np.random.randn(10, 4).astype(int))
|
| 445 |
+
|
| 446 |
+
# it works!
|
| 447 |
+
df.fillna(np.nan)
|
| 448 |
+
|
| 449 |
+
@pytest.mark.parametrize("type", [int, float])
|
| 450 |
+
def test_fillna_positive_limit(self, type):
|
| 451 |
+
df = DataFrame(np.random.randn(10, 4)).astype(type)
|
| 452 |
+
|
| 453 |
+
msg = "Limit must be greater than 0"
|
| 454 |
+
with pytest.raises(ValueError, match=msg):
|
| 455 |
+
df.fillna(0, limit=-5)
|
| 456 |
+
|
| 457 |
+
@pytest.mark.parametrize("type", [int, float])
|
| 458 |
+
def test_fillna_integer_limit(self, type):
|
| 459 |
+
df = DataFrame(np.random.randn(10, 4)).astype(type)
|
| 460 |
+
|
| 461 |
+
msg = "Limit must be an integer"
|
| 462 |
+
with pytest.raises(ValueError, match=msg):
|
| 463 |
+
df.fillna(0, limit=0.5)
|
| 464 |
+
|
| 465 |
+
def test_fillna_inplace(self):
|
| 466 |
+
df = DataFrame(np.random.randn(10, 4))
|
| 467 |
+
df.loc[:4, 1] = np.nan
|
| 468 |
+
df.loc[-4:, 3] = np.nan
|
| 469 |
+
|
| 470 |
+
expected = df.fillna(value=0)
|
| 471 |
+
assert expected is not df
|
| 472 |
+
|
| 473 |
+
df.fillna(value=0, inplace=True)
|
| 474 |
+
tm.assert_frame_equal(df, expected)
|
| 475 |
+
|
| 476 |
+
expected = df.fillna(value={0: 0}, inplace=True)
|
| 477 |
+
assert expected is None
|
| 478 |
+
|
| 479 |
+
df.loc[:4, 1] = np.nan
|
| 480 |
+
df.loc[-4:, 3] = np.nan
|
| 481 |
+
expected = df.fillna(method="ffill")
|
| 482 |
+
assert expected is not df
|
| 483 |
+
|
| 484 |
+
df.fillna(method="ffill", inplace=True)
|
| 485 |
+
tm.assert_frame_equal(df, expected)
|
| 486 |
+
|
| 487 |
+
def test_fillna_dict_series(self):
|
| 488 |
+
df = DataFrame(
|
| 489 |
+
{
|
| 490 |
+
"a": [np.nan, 1, 2, np.nan, np.nan],
|
| 491 |
+
"b": [1, 2, 3, np.nan, np.nan],
|
| 492 |
+
"c": [np.nan, 1, 2, 3, 4],
|
| 493 |
+
}
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
result = df.fillna({"a": 0, "b": 5})
|
| 497 |
+
|
| 498 |
+
expected = df.copy()
|
| 499 |
+
expected["a"] = expected["a"].fillna(0)
|
| 500 |
+
expected["b"] = expected["b"].fillna(5)
|
| 501 |
+
tm.assert_frame_equal(result, expected)
|
| 502 |
+
|
| 503 |
+
# it works
|
| 504 |
+
result = df.fillna({"a": 0, "b": 5, "d": 7})
|
| 505 |
+
|
| 506 |
+
# Series treated same as dict
|
| 507 |
+
result = df.fillna(df.max())
|
| 508 |
+
expected = df.fillna(df.max().to_dict())
|
| 509 |
+
tm.assert_frame_equal(result, expected)
|
| 510 |
+
|
| 511 |
+
# disable this for now
|
| 512 |
+
with pytest.raises(NotImplementedError, match="column by column"):
|
| 513 |
+
df.fillna(df.max(1), axis=1)
|
| 514 |
+
|
| 515 |
+
def test_fillna_dataframe(self):
|
| 516 |
+
# GH#8377
|
| 517 |
+
df = DataFrame(
|
| 518 |
+
{
|
| 519 |
+
"a": [np.nan, 1, 2, np.nan, np.nan],
|
| 520 |
+
"b": [1, 2, 3, np.nan, np.nan],
|
| 521 |
+
"c": [np.nan, 1, 2, 3, 4],
|
| 522 |
+
},
|
| 523 |
+
index=list("VWXYZ"),
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
# df2 may have different index and columns
|
| 527 |
+
df2 = DataFrame(
|
| 528 |
+
{
|
| 529 |
+
"a": [np.nan, 10, 20, 30, 40],
|
| 530 |
+
"b": [50, 60, 70, 80, 90],
|
| 531 |
+
"foo": ["bar"] * 5,
|
| 532 |
+
},
|
| 533 |
+
index=list("VWXuZ"),
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
result = df.fillna(df2)
|
| 537 |
+
|
| 538 |
+
# only those columns and indices which are shared get filled
|
| 539 |
+
expected = DataFrame(
|
| 540 |
+
{
|
| 541 |
+
"a": [np.nan, 1, 2, np.nan, 40],
|
| 542 |
+
"b": [1, 2, 3, np.nan, 90],
|
| 543 |
+
"c": [np.nan, 1, 2, 3, 4],
|
| 544 |
+
},
|
| 545 |
+
index=list("VWXYZ"),
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
tm.assert_frame_equal(result, expected)
|
| 549 |
+
|
| 550 |
+
def test_fillna_columns(self):
|
| 551 |
+
arr = np.random.randn(10, 10)
|
| 552 |
+
arr[:, ::2] = np.nan
|
| 553 |
+
df = DataFrame(arr)
|
| 554 |
+
|
| 555 |
+
result = df.fillna(method="ffill", axis=1)
|
| 556 |
+
expected = df.T.fillna(method="pad").T
|
| 557 |
+
tm.assert_frame_equal(result, expected)
|
| 558 |
+
|
| 559 |
+
df.insert(6, "foo", 5)
|
| 560 |
+
result = df.fillna(method="ffill", axis=1)
|
| 561 |
+
expected = df.astype(float).fillna(method="ffill", axis=1)
|
| 562 |
+
tm.assert_frame_equal(result, expected)
|
| 563 |
+
|
| 564 |
+
def test_fillna_invalid_method(self, float_frame):
|
| 565 |
+
with pytest.raises(ValueError, match="ffil"):
|
| 566 |
+
float_frame.fillna(method="ffil")
|
| 567 |
+
|
| 568 |
+
def test_fillna_invalid_value(self, float_frame):
|
| 569 |
+
# list
|
| 570 |
+
msg = '"value" parameter must be a scalar or dict, but you passed a "{}"'
|
| 571 |
+
with pytest.raises(TypeError, match=msg.format("list")):
|
| 572 |
+
float_frame.fillna([1, 2])
|
| 573 |
+
# tuple
|
| 574 |
+
with pytest.raises(TypeError, match=msg.format("tuple")):
|
| 575 |
+
float_frame.fillna((1, 2))
|
| 576 |
+
# frame with series
|
| 577 |
+
msg = (
|
| 578 |
+
'"value" parameter must be a scalar, dict or Series, but you '
|
| 579 |
+
'passed a "DataFrame"'
|
| 580 |
+
)
|
| 581 |
+
with pytest.raises(TypeError, match=msg):
|
| 582 |
+
float_frame.iloc[:, 0].fillna(float_frame)
|
| 583 |
+
|
| 584 |
+
def test_fillna_col_reordering(self):
|
| 585 |
+
cols = ["COL." + str(i) for i in range(5, 0, -1)]
|
| 586 |
+
data = np.random.rand(20, 5)
|
| 587 |
+
df = DataFrame(index=range(20), columns=cols, data=data)
|
| 588 |
+
filled = df.fillna(method="ffill")
|
| 589 |
+
assert df.columns.tolist() == filled.columns.tolist()
|
| 590 |
+
|
| 591 |
+
def test_fill_corner(self, float_frame, float_string_frame):
|
| 592 |
+
mf = float_string_frame
|
| 593 |
+
mf.loc[mf.index[5:20], "foo"] = np.nan
|
| 594 |
+
mf.loc[mf.index[-10:], "A"] = np.nan
|
| 595 |
+
|
| 596 |
+
filled = float_string_frame.fillna(value=0)
|
| 597 |
+
assert (filled.loc[filled.index[5:20], "foo"] == 0).all()
|
| 598 |
+
del float_string_frame["foo"]
|
| 599 |
+
|
| 600 |
+
empty_float = float_frame.reindex(columns=[])
|
| 601 |
+
|
| 602 |
+
# TODO(wesm): unused?
|
| 603 |
+
result = empty_float.fillna(value=0) # noqa
|
| 604 |
+
|
| 605 |
+
def test_fillna_downcast_dict(self):
|
| 606 |
+
# GH#40809
|
| 607 |
+
df = DataFrame({"col1": [1, np.nan]})
|
| 608 |
+
result = df.fillna({"col1": 2}, downcast={"col1": "int64"})
|
| 609 |
+
expected = DataFrame({"col1": [1, 2]})
|
| 610 |
+
tm.assert_frame_equal(result, expected)
|
| 611 |
+
|
| 612 |
+
def test_fillna_with_columns_and_limit(self):
|
| 613 |
+
# GH40989
|
| 614 |
+
df = DataFrame(
|
| 615 |
+
[
|
| 616 |
+
[np.nan, 2, np.nan, 0],
|
| 617 |
+
[3, 4, np.nan, 1],
|
| 618 |
+
[np.nan, np.nan, np.nan, 5],
|
| 619 |
+
[np.nan, 3, np.nan, 4],
|
| 620 |
+
],
|
| 621 |
+
columns=list("ABCD"),
|
| 622 |
+
)
|
| 623 |
+
result = df.fillna(axis=1, value=100, limit=1)
|
| 624 |
+
result2 = df.fillna(axis=1, value=100, limit=2)
|
| 625 |
+
|
| 626 |
+
expected = DataFrame(
|
| 627 |
+
{
|
| 628 |
+
"A": Series([100, 3, 100, 100], dtype="float64"),
|
| 629 |
+
"B": [2, 4, np.nan, 3],
|
| 630 |
+
"C": [np.nan, 100, np.nan, np.nan],
|
| 631 |
+
"D": Series([0, 1, 5, 4], dtype="float64"),
|
| 632 |
+
},
|
| 633 |
+
index=[0, 1, 2, 3],
|
| 634 |
+
)
|
| 635 |
+
expected2 = DataFrame(
|
| 636 |
+
{
|
| 637 |
+
"A": Series([100, 3, 100, 100], dtype="float64"),
|
| 638 |
+
"B": Series([2, 4, 100, 3], dtype="float64"),
|
| 639 |
+
"C": [100, 100, np.nan, 100],
|
| 640 |
+
"D": Series([0, 1, 5, 4], dtype="float64"),
|
| 641 |
+
},
|
| 642 |
+
index=[0, 1, 2, 3],
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
tm.assert_frame_equal(result, expected)
|
| 646 |
+
tm.assert_frame_equal(result2, expected2)
|
| 647 |
+
|
| 648 |
+
def test_fillna_datetime_inplace(self):
|
| 649 |
+
# GH#48863
|
| 650 |
+
df = DataFrame(
|
| 651 |
+
{
|
| 652 |
+
"date1": to_datetime(["2018-05-30", None]),
|
| 653 |
+
"date2": to_datetime(["2018-09-30", None]),
|
| 654 |
+
}
|
| 655 |
+
)
|
| 656 |
+
expected = df.copy()
|
| 657 |
+
df.fillna(np.nan, inplace=True)
|
| 658 |
+
tm.assert_frame_equal(df, expected)
|
| 659 |
+
|
| 660 |
+
def test_fillna_inplace_with_columns_limit_and_value(self):
|
| 661 |
+
# GH40989
|
| 662 |
+
df = DataFrame(
|
| 663 |
+
[
|
| 664 |
+
[np.nan, 2, np.nan, 0],
|
| 665 |
+
[3, 4, np.nan, 1],
|
| 666 |
+
[np.nan, np.nan, np.nan, 5],
|
| 667 |
+
[np.nan, 3, np.nan, 4],
|
| 668 |
+
],
|
| 669 |
+
columns=list("ABCD"),
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
expected = df.fillna(axis=1, value=100, limit=1)
|
| 673 |
+
assert expected is not df
|
| 674 |
+
|
| 675 |
+
df.fillna(axis=1, value=100, limit=1, inplace=True)
|
| 676 |
+
tm.assert_frame_equal(df, expected)
|
| 677 |
+
|
| 678 |
+
@td.skip_array_manager_invalid_test
|
| 679 |
+
@pytest.mark.parametrize("val", [-1, {"x": -1, "y": -1}])
|
| 680 |
+
def test_inplace_dict_update_view(self, val, using_copy_on_write):
|
| 681 |
+
# GH#47188
|
| 682 |
+
df = DataFrame({"x": [np.nan, 2], "y": [np.nan, 2]})
|
| 683 |
+
df_orig = df.copy()
|
| 684 |
+
result_view = df[:]
|
| 685 |
+
df.fillna(val, inplace=True)
|
| 686 |
+
expected = DataFrame({"x": [-1, 2.0], "y": [-1.0, 2]})
|
| 687 |
+
tm.assert_frame_equal(df, expected)
|
| 688 |
+
if using_copy_on_write:
|
| 689 |
+
tm.assert_frame_equal(result_view, df_orig)
|
| 690 |
+
else:
|
| 691 |
+
tm.assert_frame_equal(result_view, expected)
|
| 692 |
+
|
| 693 |
+
def test_single_block_df_with_horizontal_axis(self):
|
| 694 |
+
# GH 47713
|
| 695 |
+
df = DataFrame(
|
| 696 |
+
{
|
| 697 |
+
"col1": [5, 0, np.nan, 10, np.nan],
|
| 698 |
+
"col2": [7, np.nan, np.nan, 5, 3],
|
| 699 |
+
"col3": [12, np.nan, 1, 2, 0],
|
| 700 |
+
"col4": [np.nan, 1, 1, np.nan, 18],
|
| 701 |
+
}
|
| 702 |
+
)
|
| 703 |
+
result = df.fillna(50, limit=1, axis=1)
|
| 704 |
+
expected = DataFrame(
|
| 705 |
+
[
|
| 706 |
+
[5.0, 7.0, 12.0, 50.0],
|
| 707 |
+
[0.0, 50.0, np.nan, 1.0],
|
| 708 |
+
[50.0, np.nan, 1.0, 1.0],
|
| 709 |
+
[10.0, 5.0, 2.0, 50.0],
|
| 710 |
+
[50.0, 3.0, 0.0, 18.0],
|
| 711 |
+
],
|
| 712 |
+
columns=["col1", "col2", "col3", "col4"],
|
| 713 |
+
)
|
| 714 |
+
tm.assert_frame_equal(result, expected)
|
| 715 |
+
|
| 716 |
+
def test_fillna_with_multi_index_frame(self):
|
| 717 |
+
# GH 47649
|
| 718 |
+
pdf = DataFrame(
|
| 719 |
+
{
|
| 720 |
+
("x", "a"): [np.nan, 2.0, 3.0],
|
| 721 |
+
("x", "b"): [1.0, 2.0, np.nan],
|
| 722 |
+
("y", "c"): [1.0, 2.0, np.nan],
|
| 723 |
+
}
|
| 724 |
+
)
|
| 725 |
+
expected = DataFrame(
|
| 726 |
+
{
|
| 727 |
+
("x", "a"): [-1.0, 2.0, 3.0],
|
| 728 |
+
("x", "b"): [1.0, 2.0, -1.0],
|
| 729 |
+
("y", "c"): [1.0, 2.0, np.nan],
|
| 730 |
+
}
|
| 731 |
+
)
|
| 732 |
+
tm.assert_frame_equal(pdf.fillna({"x": -1}), expected)
|
| 733 |
+
tm.assert_frame_equal(pdf.fillna({"x": -1, ("x", "b"): -2}), expected)
|
| 734 |
+
|
| 735 |
+
expected = DataFrame(
|
| 736 |
+
{
|
| 737 |
+
("x", "a"): [-1.0, 2.0, 3.0],
|
| 738 |
+
("x", "b"): [1.0, 2.0, -2.0],
|
| 739 |
+
("y", "c"): [1.0, 2.0, np.nan],
|
| 740 |
+
}
|
| 741 |
+
)
|
| 742 |
+
tm.assert_frame_equal(pdf.fillna({("x", "b"): -2, "x": -1}), expected)
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
def test_fillna_nonconsolidated_frame():
|
| 746 |
+
# https://github.com/pandas-dev/pandas/issues/36495
|
| 747 |
+
df = DataFrame(
|
| 748 |
+
[
|
| 749 |
+
[1, 1, 1, 1.0],
|
| 750 |
+
[2, 2, 2, 2.0],
|
| 751 |
+
[3, 3, 3, 3.0],
|
| 752 |
+
],
|
| 753 |
+
columns=["i1", "i2", "i3", "f1"],
|
| 754 |
+
)
|
| 755 |
+
df_nonconsol = df.pivot(index="i1", columns="i2")
|
| 756 |
+
result = df_nonconsol.fillna(0)
|
| 757 |
+
assert result.isna().sum().sum() == 0
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
def test_fillna_nones_inplace():
|
| 761 |
+
# GH 48480
|
| 762 |
+
df = DataFrame(
|
| 763 |
+
[[None, None], [None, None]],
|
| 764 |
+
columns=["A", "B"],
|
| 765 |
+
)
|
| 766 |
+
with tm.assert_produces_warning(False):
|
| 767 |
+
df.fillna(value={"A": 1, "B": 2}, inplace=True)
|
| 768 |
+
|
| 769 |
+
expected = DataFrame([[1, 2], [1, 2]], columns=["A", "B"])
|
| 770 |
+
tm.assert_frame_equal(df, expected)
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
@pytest.mark.parametrize("func", ["pad", "backfill"])
|
| 774 |
+
def test_pad_backfill_deprecated(func):
|
| 775 |
+
# GH#33396
|
| 776 |
+
df = DataFrame({"a": [1, 2, 3]})
|
| 777 |
+
with tm.assert_produces_warning(FutureWarning):
|
| 778 |
+
getattr(df, func)()
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_filter.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import DataFrame
|
| 6 |
+
import pandas._testing as tm
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TestDataFrameFilter:
|
| 10 |
+
def test_filter(self, float_frame, float_string_frame):
|
| 11 |
+
# Items
|
| 12 |
+
filtered = float_frame.filter(["A", "B", "E"])
|
| 13 |
+
assert len(filtered.columns) == 2
|
| 14 |
+
assert "E" not in filtered
|
| 15 |
+
|
| 16 |
+
filtered = float_frame.filter(["A", "B", "E"], axis="columns")
|
| 17 |
+
assert len(filtered.columns) == 2
|
| 18 |
+
assert "E" not in filtered
|
| 19 |
+
|
| 20 |
+
# Other axis
|
| 21 |
+
idx = float_frame.index[0:4]
|
| 22 |
+
filtered = float_frame.filter(idx, axis="index")
|
| 23 |
+
expected = float_frame.reindex(index=idx)
|
| 24 |
+
tm.assert_frame_equal(filtered, expected)
|
| 25 |
+
|
| 26 |
+
# like
|
| 27 |
+
fcopy = float_frame.copy()
|
| 28 |
+
fcopy["AA"] = 1
|
| 29 |
+
|
| 30 |
+
filtered = fcopy.filter(like="A")
|
| 31 |
+
assert len(filtered.columns) == 2
|
| 32 |
+
assert "AA" in filtered
|
| 33 |
+
|
| 34 |
+
# like with ints in column names
|
| 35 |
+
df = DataFrame(0.0, index=[0, 1, 2], columns=[0, 1, "_A", "_B"])
|
| 36 |
+
filtered = df.filter(like="_")
|
| 37 |
+
assert len(filtered.columns) == 2
|
| 38 |
+
|
| 39 |
+
# regex with ints in column names
|
| 40 |
+
# from PR #10384
|
| 41 |
+
df = DataFrame(0.0, index=[0, 1, 2], columns=["A1", 1, "B", 2, "C"])
|
| 42 |
+
expected = DataFrame(
|
| 43 |
+
0.0, index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object)
|
| 44 |
+
)
|
| 45 |
+
filtered = df.filter(regex="^[0-9]+$")
|
| 46 |
+
tm.assert_frame_equal(filtered, expected)
|
| 47 |
+
|
| 48 |
+
expected = DataFrame(0.0, index=[0, 1, 2], columns=[0, "0", 1, "1"])
|
| 49 |
+
# shouldn't remove anything
|
| 50 |
+
filtered = expected.filter(regex="^[0-9]+$")
|
| 51 |
+
tm.assert_frame_equal(filtered, expected)
|
| 52 |
+
|
| 53 |
+
# pass in None
|
| 54 |
+
with pytest.raises(TypeError, match="Must pass"):
|
| 55 |
+
float_frame.filter()
|
| 56 |
+
with pytest.raises(TypeError, match="Must pass"):
|
| 57 |
+
float_frame.filter(items=None)
|
| 58 |
+
with pytest.raises(TypeError, match="Must pass"):
|
| 59 |
+
float_frame.filter(axis=1)
|
| 60 |
+
|
| 61 |
+
# test mutually exclusive arguments
|
| 62 |
+
with pytest.raises(TypeError, match="mutually exclusive"):
|
| 63 |
+
float_frame.filter(items=["one", "three"], regex="e$", like="bbi")
|
| 64 |
+
with pytest.raises(TypeError, match="mutually exclusive"):
|
| 65 |
+
float_frame.filter(items=["one", "three"], regex="e$", axis=1)
|
| 66 |
+
with pytest.raises(TypeError, match="mutually exclusive"):
|
| 67 |
+
float_frame.filter(items=["one", "three"], regex="e$")
|
| 68 |
+
with pytest.raises(TypeError, match="mutually exclusive"):
|
| 69 |
+
float_frame.filter(items=["one", "three"], like="bbi", axis=0)
|
| 70 |
+
with pytest.raises(TypeError, match="mutually exclusive"):
|
| 71 |
+
float_frame.filter(items=["one", "three"], like="bbi")
|
| 72 |
+
|
| 73 |
+
# objects
|
| 74 |
+
filtered = float_string_frame.filter(like="foo")
|
| 75 |
+
assert "foo" in filtered
|
| 76 |
+
|
| 77 |
+
# unicode columns, won't ascii-encode
|
| 78 |
+
df = float_frame.rename(columns={"B": "\u2202"})
|
| 79 |
+
filtered = df.filter(like="C")
|
| 80 |
+
assert "C" in filtered
|
| 81 |
+
|
| 82 |
+
def test_filter_regex_search(self, float_frame):
|
| 83 |
+
fcopy = float_frame.copy()
|
| 84 |
+
fcopy["AA"] = 1
|
| 85 |
+
|
| 86 |
+
# regex
|
| 87 |
+
filtered = fcopy.filter(regex="[A]+")
|
| 88 |
+
assert len(filtered.columns) == 2
|
| 89 |
+
assert "AA" in filtered
|
| 90 |
+
|
| 91 |
+
# doesn't have to be at beginning
|
| 92 |
+
df = DataFrame(
|
| 93 |
+
{"aBBa": [1, 2], "BBaBB": [1, 2], "aCCa": [1, 2], "aCCaBB": [1, 2]}
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
result = df.filter(regex="BB")
|
| 97 |
+
exp = df[[x for x in df.columns if "BB" in x]]
|
| 98 |
+
tm.assert_frame_equal(result, exp)
|
| 99 |
+
|
| 100 |
+
@pytest.mark.parametrize(
|
| 101 |
+
"name,expected",
|
| 102 |
+
[
|
| 103 |
+
("a", DataFrame({"a": [1, 2]})),
|
| 104 |
+
("a", DataFrame({"a": [1, 2]})),
|
| 105 |
+
("あ", DataFrame({"あ": [3, 4]})),
|
| 106 |
+
],
|
| 107 |
+
)
|
| 108 |
+
def test_filter_unicode(self, name, expected):
|
| 109 |
+
# GH13101
|
| 110 |
+
df = DataFrame({"a": [1, 2], "あ": [3, 4]})
|
| 111 |
+
|
| 112 |
+
tm.assert_frame_equal(df.filter(like=name), expected)
|
| 113 |
+
tm.assert_frame_equal(df.filter(regex=name), expected)
|
| 114 |
+
|
| 115 |
+
@pytest.mark.parametrize("name", ["a", "a"])
|
| 116 |
+
def test_filter_bytestring(self, name):
|
| 117 |
+
# GH13101
|
| 118 |
+
df = DataFrame({b"a": [1, 2], b"b": [3, 4]})
|
| 119 |
+
expected = DataFrame({b"a": [1, 2]})
|
| 120 |
+
|
| 121 |
+
tm.assert_frame_equal(df.filter(like=name), expected)
|
| 122 |
+
tm.assert_frame_equal(df.filter(regex=name), expected)
|
| 123 |
+
|
| 124 |
+
def test_filter_corner(self):
|
| 125 |
+
empty = DataFrame()
|
| 126 |
+
|
| 127 |
+
result = empty.filter([])
|
| 128 |
+
tm.assert_frame_equal(result, empty)
|
| 129 |
+
|
| 130 |
+
result = empty.filter(like="foo")
|
| 131 |
+
tm.assert_frame_equal(result, empty)
|
| 132 |
+
|
| 133 |
+
def test_filter_regex_non_string(self):
|
| 134 |
+
# GH#5798 trying to filter on non-string columns should drop,
|
| 135 |
+
# not raise
|
| 136 |
+
df = DataFrame(np.random.random((3, 2)), columns=["STRING", 123])
|
| 137 |
+
result = df.filter(regex="STRING")
|
| 138 |
+
expected = df[["STRING"]]
|
| 139 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_first_and_last.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Note: includes tests for `last`
|
| 3 |
+
"""
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from pandas import (
|
| 8 |
+
DataFrame,
|
| 9 |
+
bdate_range,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestFirst:
|
| 15 |
+
def test_first_subset(self, frame_or_series):
|
| 16 |
+
ts = tm.makeTimeDataFrame(freq="12h")
|
| 17 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 18 |
+
result = ts.first("10d")
|
| 19 |
+
assert len(result) == 20
|
| 20 |
+
|
| 21 |
+
ts = tm.makeTimeDataFrame(freq="D")
|
| 22 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 23 |
+
result = ts.first("10d")
|
| 24 |
+
assert len(result) == 10
|
| 25 |
+
|
| 26 |
+
result = ts.first("3M")
|
| 27 |
+
expected = ts[:"3/31/2000"]
|
| 28 |
+
tm.assert_equal(result, expected)
|
| 29 |
+
|
| 30 |
+
result = ts.first("21D")
|
| 31 |
+
expected = ts[:21]
|
| 32 |
+
tm.assert_equal(result, expected)
|
| 33 |
+
|
| 34 |
+
result = ts[:0].first("3M")
|
| 35 |
+
tm.assert_equal(result, ts[:0])
|
| 36 |
+
|
| 37 |
+
def test_first_last_raises(self, frame_or_series):
|
| 38 |
+
# GH#20725
|
| 39 |
+
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
|
| 40 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 41 |
+
|
| 42 |
+
msg = "'first' only supports a DatetimeIndex index"
|
| 43 |
+
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
|
| 44 |
+
obj.first("1D")
|
| 45 |
+
|
| 46 |
+
msg = "'last' only supports a DatetimeIndex index"
|
| 47 |
+
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
|
| 48 |
+
obj.last("1D")
|
| 49 |
+
|
| 50 |
+
def test_last_subset(self, frame_or_series):
|
| 51 |
+
ts = tm.makeTimeDataFrame(freq="12h")
|
| 52 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 53 |
+
result = ts.last("10d")
|
| 54 |
+
assert len(result) == 20
|
| 55 |
+
|
| 56 |
+
ts = tm.makeTimeDataFrame(nper=30, freq="D")
|
| 57 |
+
ts = tm.get_obj(ts, frame_or_series)
|
| 58 |
+
result = ts.last("10d")
|
| 59 |
+
assert len(result) == 10
|
| 60 |
+
|
| 61 |
+
result = ts.last("21D")
|
| 62 |
+
expected = ts["2000-01-10":]
|
| 63 |
+
tm.assert_equal(result, expected)
|
| 64 |
+
|
| 65 |
+
result = ts.last("21D")
|
| 66 |
+
expected = ts[-21:]
|
| 67 |
+
tm.assert_equal(result, expected)
|
| 68 |
+
|
| 69 |
+
result = ts[:0].last("3M")
|
| 70 |
+
tm.assert_equal(result, ts[:0])
|
| 71 |
+
|
| 72 |
+
@pytest.mark.parametrize("start, periods", [("2010-03-31", 1), ("2010-03-30", 2)])
|
| 73 |
+
def test_first_with_first_day_last_of_month(self, frame_or_series, start, periods):
|
| 74 |
+
# GH#29623
|
| 75 |
+
x = frame_or_series([1] * 100, index=bdate_range(start, periods=100))
|
| 76 |
+
result = x.first("1M")
|
| 77 |
+
expected = frame_or_series(
|
| 78 |
+
[1] * periods, index=bdate_range(start, periods=periods)
|
| 79 |
+
)
|
| 80 |
+
tm.assert_equal(result, expected)
|
| 81 |
+
|
| 82 |
+
def test_first_with_first_day_end_of_frq_n_greater_one(self, frame_or_series):
|
| 83 |
+
# GH#29623
|
| 84 |
+
x = frame_or_series([1] * 100, index=bdate_range("2010-03-31", periods=100))
|
| 85 |
+
result = x.first("2M")
|
| 86 |
+
expected = frame_or_series(
|
| 87 |
+
[1] * 23, index=bdate_range("2010-03-31", "2010-04-30")
|
| 88 |
+
)
|
| 89 |
+
tm.assert_equal(result, expected)
|
| 90 |
+
|
| 91 |
+
@pytest.mark.parametrize("func", ["first", "last"])
|
| 92 |
+
def test_empty_not_input(self, func):
|
| 93 |
+
# GH#51032
|
| 94 |
+
df = DataFrame(index=pd.DatetimeIndex([]))
|
| 95 |
+
result = getattr(df, func)(offset=1)
|
| 96 |
+
tm.assert_frame_equal(df, result)
|
| 97 |
+
assert df is not result
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_first_valid_index.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Includes test for last_valid_index.
|
| 3 |
+
"""
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from pandas import (
|
| 8 |
+
DataFrame,
|
| 9 |
+
Series,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestFirstValidIndex:
|
| 15 |
+
def test_first_valid_index_single_nan(self, frame_or_series):
|
| 16 |
+
# GH#9752 Series/DataFrame should both return None, not raise
|
| 17 |
+
obj = frame_or_series([np.nan])
|
| 18 |
+
|
| 19 |
+
assert obj.first_valid_index() is None
|
| 20 |
+
assert obj.iloc[:0].first_valid_index() is None
|
| 21 |
+
|
| 22 |
+
@pytest.mark.parametrize(
|
| 23 |
+
"empty", [DataFrame(), Series(dtype=object), Series([], index=[], dtype=object)]
|
| 24 |
+
)
|
| 25 |
+
def test_first_valid_index_empty(self, empty):
|
| 26 |
+
# GH#12800
|
| 27 |
+
assert empty.last_valid_index() is None
|
| 28 |
+
assert empty.first_valid_index() is None
|
| 29 |
+
|
| 30 |
+
@pytest.mark.parametrize(
|
| 31 |
+
"data,idx,expected_first,expected_last",
|
| 32 |
+
[
|
| 33 |
+
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
|
| 34 |
+
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
|
| 35 |
+
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
|
| 36 |
+
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
|
| 37 |
+
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
|
| 38 |
+
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
|
| 39 |
+
],
|
| 40 |
+
)
|
| 41 |
+
def test_first_last_valid_frame(self, data, idx, expected_first, expected_last):
|
| 42 |
+
# GH#21441
|
| 43 |
+
df = DataFrame(data, index=idx)
|
| 44 |
+
assert expected_first == df.first_valid_index()
|
| 45 |
+
assert expected_last == df.last_valid_index()
|
| 46 |
+
|
| 47 |
+
@pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex])
|
| 48 |
+
def test_first_last_valid(self, index_func):
|
| 49 |
+
N = 30
|
| 50 |
+
index = index_func(N)
|
| 51 |
+
mat = np.random.randn(N)
|
| 52 |
+
mat[:5] = np.nan
|
| 53 |
+
mat[-5:] = np.nan
|
| 54 |
+
|
| 55 |
+
frame = DataFrame({"foo": mat}, index=index)
|
| 56 |
+
assert frame.first_valid_index() == frame.index[5]
|
| 57 |
+
assert frame.last_valid_index() == frame.index[-6]
|
| 58 |
+
|
| 59 |
+
ser = frame["foo"]
|
| 60 |
+
assert ser.first_valid_index() == frame.index[5]
|
| 61 |
+
assert ser.last_valid_index() == frame.index[-6]
|
| 62 |
+
|
| 63 |
+
@pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex])
|
| 64 |
+
def test_first_last_valid_all_nan(self, index_func):
|
| 65 |
+
# GH#17400: no valid entries
|
| 66 |
+
index = index_func(30)
|
| 67 |
+
frame = DataFrame(np.nan, columns=["foo"], index=index)
|
| 68 |
+
|
| 69 |
+
assert frame.last_valid_index() is None
|
| 70 |
+
assert frame.first_valid_index() is None
|
| 71 |
+
|
| 72 |
+
ser = frame["foo"]
|
| 73 |
+
assert ser.first_valid_index() is None
|
| 74 |
+
assert ser.last_valid_index() is None
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_get_numeric_data.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from pandas import (
|
| 5 |
+
Categorical,
|
| 6 |
+
DataFrame,
|
| 7 |
+
Index,
|
| 8 |
+
Series,
|
| 9 |
+
Timestamp,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
from pandas.core.arrays import IntervalArray
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TestGetNumericData:
|
| 16 |
+
def test_get_numeric_data_preserve_dtype(self):
|
| 17 |
+
# get the numeric data
|
| 18 |
+
obj = DataFrame({"A": [1, "2", 3.0]})
|
| 19 |
+
result = obj._get_numeric_data()
|
| 20 |
+
expected = DataFrame(dtype=object, index=pd.RangeIndex(3), columns=[])
|
| 21 |
+
tm.assert_frame_equal(result, expected)
|
| 22 |
+
|
| 23 |
+
def test_get_numeric_data(self):
|
| 24 |
+
datetime64name = np.dtype("M8[ns]").name
|
| 25 |
+
objectname = np.dtype(np.object_).name
|
| 26 |
+
|
| 27 |
+
df = DataFrame(
|
| 28 |
+
{"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")},
|
| 29 |
+
index=np.arange(10),
|
| 30 |
+
)
|
| 31 |
+
result = df.dtypes
|
| 32 |
+
expected = Series(
|
| 33 |
+
[
|
| 34 |
+
np.dtype("float64"),
|
| 35 |
+
np.dtype("int64"),
|
| 36 |
+
np.dtype(objectname),
|
| 37 |
+
np.dtype(datetime64name),
|
| 38 |
+
],
|
| 39 |
+
index=["a", "b", "c", "f"],
|
| 40 |
+
)
|
| 41 |
+
tm.assert_series_equal(result, expected)
|
| 42 |
+
|
| 43 |
+
df = DataFrame(
|
| 44 |
+
{
|
| 45 |
+
"a": 1.0,
|
| 46 |
+
"b": 2,
|
| 47 |
+
"c": "foo",
|
| 48 |
+
"d": np.array([1.0] * 10, dtype="float32"),
|
| 49 |
+
"e": np.array([1] * 10, dtype="int32"),
|
| 50 |
+
"f": np.array([1] * 10, dtype="int16"),
|
| 51 |
+
"g": Timestamp("20010102"),
|
| 52 |
+
},
|
| 53 |
+
index=np.arange(10),
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
result = df._get_numeric_data()
|
| 57 |
+
expected = df.loc[:, ["a", "b", "d", "e", "f"]]
|
| 58 |
+
tm.assert_frame_equal(result, expected)
|
| 59 |
+
|
| 60 |
+
only_obj = df.loc[:, ["c", "g"]]
|
| 61 |
+
result = only_obj._get_numeric_data()
|
| 62 |
+
expected = df.loc[:, []]
|
| 63 |
+
tm.assert_frame_equal(result, expected)
|
| 64 |
+
|
| 65 |
+
df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]})
|
| 66 |
+
result = df._get_numeric_data()
|
| 67 |
+
expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]})
|
| 68 |
+
tm.assert_frame_equal(result, expected)
|
| 69 |
+
|
| 70 |
+
df = result.copy()
|
| 71 |
+
result = df._get_numeric_data()
|
| 72 |
+
expected = df
|
| 73 |
+
tm.assert_frame_equal(result, expected)
|
| 74 |
+
|
| 75 |
+
def test_get_numeric_data_mixed_dtype(self):
|
| 76 |
+
# numeric and object columns
|
| 77 |
+
|
| 78 |
+
df = DataFrame(
|
| 79 |
+
{
|
| 80 |
+
"a": [1, 2, 3],
|
| 81 |
+
"b": [True, False, True],
|
| 82 |
+
"c": ["foo", "bar", "baz"],
|
| 83 |
+
"d": [None, None, None],
|
| 84 |
+
"e": [3.14, 0.577, 2.773],
|
| 85 |
+
}
|
| 86 |
+
)
|
| 87 |
+
result = df._get_numeric_data()
|
| 88 |
+
tm.assert_index_equal(result.columns, Index(["a", "b", "e"]))
|
| 89 |
+
|
| 90 |
+
def test_get_numeric_data_extension_dtype(self):
|
| 91 |
+
# GH#22290
|
| 92 |
+
df = DataFrame(
|
| 93 |
+
{
|
| 94 |
+
"A": pd.array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"),
|
| 95 |
+
"B": Categorical(list("abcabc")),
|
| 96 |
+
"C": pd.array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"),
|
| 97 |
+
"D": IntervalArray.from_breaks(range(7)),
|
| 98 |
+
}
|
| 99 |
+
)
|
| 100 |
+
result = df._get_numeric_data()
|
| 101 |
+
expected = df.loc[:, ["A", "C"]]
|
| 102 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_head_tail.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from pandas import DataFrame
|
| 4 |
+
import pandas._testing as tm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_head_tail_generic(index, frame_or_series):
|
| 8 |
+
# GH#5370
|
| 9 |
+
|
| 10 |
+
ndim = 2 if frame_or_series is DataFrame else 1
|
| 11 |
+
shape = (len(index),) * ndim
|
| 12 |
+
vals = np.random.randn(*shape)
|
| 13 |
+
obj = frame_or_series(vals, index=index)
|
| 14 |
+
|
| 15 |
+
tm.assert_equal(obj.head(), obj.iloc[:5])
|
| 16 |
+
tm.assert_equal(obj.tail(), obj.iloc[-5:])
|
| 17 |
+
|
| 18 |
+
# 0-len
|
| 19 |
+
tm.assert_equal(obj.head(0), obj.iloc[0:0])
|
| 20 |
+
tm.assert_equal(obj.tail(0), obj.iloc[0:0])
|
| 21 |
+
|
| 22 |
+
# bounded
|
| 23 |
+
tm.assert_equal(obj.head(len(obj) + 1), obj)
|
| 24 |
+
tm.assert_equal(obj.tail(len(obj) + 1), obj)
|
| 25 |
+
|
| 26 |
+
# neg index
|
| 27 |
+
tm.assert_equal(obj.head(-3), obj.head(len(index) - 3))
|
| 28 |
+
tm.assert_equal(obj.tail(-3), obj.tail(len(index) - 3))
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def test_head_tail(float_frame):
|
| 32 |
+
tm.assert_frame_equal(float_frame.head(), float_frame[:5])
|
| 33 |
+
tm.assert_frame_equal(float_frame.tail(), float_frame[-5:])
|
| 34 |
+
|
| 35 |
+
tm.assert_frame_equal(float_frame.head(0), float_frame[0:0])
|
| 36 |
+
tm.assert_frame_equal(float_frame.tail(0), float_frame[0:0])
|
| 37 |
+
|
| 38 |
+
tm.assert_frame_equal(float_frame.head(-1), float_frame[:-1])
|
| 39 |
+
tm.assert_frame_equal(float_frame.tail(-1), float_frame[1:])
|
| 40 |
+
tm.assert_frame_equal(float_frame.head(1), float_frame[:1])
|
| 41 |
+
tm.assert_frame_equal(float_frame.tail(1), float_frame[-1:])
|
| 42 |
+
# with a float index
|
| 43 |
+
df = float_frame.copy()
|
| 44 |
+
df.index = np.arange(len(float_frame)) + 0.1
|
| 45 |
+
tm.assert_frame_equal(df.head(), df.iloc[:5])
|
| 46 |
+
tm.assert_frame_equal(df.tail(), df.iloc[-5:])
|
| 47 |
+
tm.assert_frame_equal(df.head(0), df[0:0])
|
| 48 |
+
tm.assert_frame_equal(df.tail(0), df[0:0])
|
| 49 |
+
tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
|
| 50 |
+
tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def test_head_tail_empty():
|
| 54 |
+
# test empty dataframe
|
| 55 |
+
empty_df = DataFrame()
|
| 56 |
+
tm.assert_frame_equal(empty_df.tail(), empty_df)
|
| 57 |
+
tm.assert_frame_equal(empty_df.head(), empty_df)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_infer_objects.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
from pandas import DataFrame
|
| 4 |
+
import pandas._testing as tm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestInferObjects:
|
| 8 |
+
def test_infer_objects(self):
|
| 9 |
+
# GH#11221
|
| 10 |
+
df = DataFrame(
|
| 11 |
+
{
|
| 12 |
+
"a": ["a", 1, 2, 3],
|
| 13 |
+
"b": ["b", 2.0, 3.0, 4.1],
|
| 14 |
+
"c": [
|
| 15 |
+
"c",
|
| 16 |
+
datetime(2016, 1, 1),
|
| 17 |
+
datetime(2016, 1, 2),
|
| 18 |
+
datetime(2016, 1, 3),
|
| 19 |
+
],
|
| 20 |
+
"d": [1, 2, 3, "d"],
|
| 21 |
+
},
|
| 22 |
+
columns=["a", "b", "c", "d"],
|
| 23 |
+
)
|
| 24 |
+
df = df.iloc[1:].infer_objects()
|
| 25 |
+
|
| 26 |
+
assert df["a"].dtype == "int64"
|
| 27 |
+
assert df["b"].dtype == "float64"
|
| 28 |
+
assert df["c"].dtype == "M8[ns]"
|
| 29 |
+
assert df["d"].dtype == "object"
|
| 30 |
+
|
| 31 |
+
expected = DataFrame(
|
| 32 |
+
{
|
| 33 |
+
"a": [1, 2, 3],
|
| 34 |
+
"b": [2.0, 3.0, 4.1],
|
| 35 |
+
"c": [datetime(2016, 1, 1), datetime(2016, 1, 2), datetime(2016, 1, 3)],
|
| 36 |
+
"d": [2, 3, "d"],
|
| 37 |
+
},
|
| 38 |
+
columns=["a", "b", "c", "d"],
|
| 39 |
+
)
|
| 40 |
+
# reconstruct frame to verify inference is same
|
| 41 |
+
result = df.reset_index(drop=True)
|
| 42 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_interpolate.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas.util._test_decorators as td
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
DataFrame,
|
| 8 |
+
NaT,
|
| 9 |
+
Series,
|
| 10 |
+
date_range,
|
| 11 |
+
)
|
| 12 |
+
import pandas._testing as tm
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TestDataFrameInterpolate:
|
| 16 |
+
def test_interpolate_datetimelike_values(self, frame_or_series):
|
| 17 |
+
# GH#11312, GH#51005
|
| 18 |
+
orig = Series(date_range("2012-01-01", periods=5))
|
| 19 |
+
ser = orig.copy()
|
| 20 |
+
ser[2] = NaT
|
| 21 |
+
|
| 22 |
+
res = frame_or_series(ser).interpolate()
|
| 23 |
+
expected = frame_or_series(orig)
|
| 24 |
+
tm.assert_equal(res, expected)
|
| 25 |
+
|
| 26 |
+
# datetime64tz cast
|
| 27 |
+
ser_tz = ser.dt.tz_localize("US/Pacific")
|
| 28 |
+
res_tz = frame_or_series(ser_tz).interpolate()
|
| 29 |
+
expected_tz = frame_or_series(orig.dt.tz_localize("US/Pacific"))
|
| 30 |
+
tm.assert_equal(res_tz, expected_tz)
|
| 31 |
+
|
| 32 |
+
# timedelta64 cast
|
| 33 |
+
ser_td = ser - ser[0]
|
| 34 |
+
res_td = frame_or_series(ser_td).interpolate()
|
| 35 |
+
expected_td = frame_or_series(orig - orig[0])
|
| 36 |
+
tm.assert_equal(res_td, expected_td)
|
| 37 |
+
|
| 38 |
+
def test_interpolate_inplace(self, frame_or_series, using_array_manager, request):
|
| 39 |
+
# GH#44749
|
| 40 |
+
if using_array_manager and frame_or_series is DataFrame:
|
| 41 |
+
mark = pytest.mark.xfail(reason=".values-based in-place check is invalid")
|
| 42 |
+
request.node.add_marker(mark)
|
| 43 |
+
|
| 44 |
+
obj = frame_or_series([1, np.nan, 2])
|
| 45 |
+
orig = obj.values
|
| 46 |
+
|
| 47 |
+
obj.interpolate(inplace=True)
|
| 48 |
+
expected = frame_or_series([1, 1.5, 2])
|
| 49 |
+
tm.assert_equal(obj, expected)
|
| 50 |
+
|
| 51 |
+
# check we operated *actually* inplace
|
| 52 |
+
assert np.shares_memory(orig, obj.values)
|
| 53 |
+
assert orig.squeeze()[1] == 1.5
|
| 54 |
+
|
| 55 |
+
def test_interp_basic(self, using_copy_on_write):
|
| 56 |
+
df = DataFrame(
|
| 57 |
+
{
|
| 58 |
+
"A": [1, 2, np.nan, 4],
|
| 59 |
+
"B": [1, 4, 9, np.nan],
|
| 60 |
+
"C": [1, 2, 3, 5],
|
| 61 |
+
"D": list("abcd"),
|
| 62 |
+
}
|
| 63 |
+
)
|
| 64 |
+
expected = DataFrame(
|
| 65 |
+
{
|
| 66 |
+
"A": [1.0, 2.0, 3.0, 4.0],
|
| 67 |
+
"B": [1.0, 4.0, 9.0, 9.0],
|
| 68 |
+
"C": [1, 2, 3, 5],
|
| 69 |
+
"D": list("abcd"),
|
| 70 |
+
}
|
| 71 |
+
)
|
| 72 |
+
result = df.interpolate()
|
| 73 |
+
tm.assert_frame_equal(result, expected)
|
| 74 |
+
|
| 75 |
+
# check we didn't operate inplace GH#45791
|
| 76 |
+
cvalues = df["C"]._values
|
| 77 |
+
dvalues = df["D"].values
|
| 78 |
+
if using_copy_on_write:
|
| 79 |
+
assert np.shares_memory(cvalues, result["C"]._values)
|
| 80 |
+
assert np.shares_memory(dvalues, result["D"]._values)
|
| 81 |
+
else:
|
| 82 |
+
assert not np.shares_memory(cvalues, result["C"]._values)
|
| 83 |
+
assert not np.shares_memory(dvalues, result["D"]._values)
|
| 84 |
+
|
| 85 |
+
res = df.interpolate(inplace=True)
|
| 86 |
+
assert res is None
|
| 87 |
+
tm.assert_frame_equal(df, expected)
|
| 88 |
+
|
| 89 |
+
# check we DID operate inplace
|
| 90 |
+
assert np.shares_memory(df["C"]._values, cvalues)
|
| 91 |
+
assert np.shares_memory(df["D"]._values, dvalues)
|
| 92 |
+
|
| 93 |
+
def test_interp_basic_with_non_range_index(self):
|
| 94 |
+
df = DataFrame(
|
| 95 |
+
{
|
| 96 |
+
"A": [1, 2, np.nan, 4],
|
| 97 |
+
"B": [1, 4, 9, np.nan],
|
| 98 |
+
"C": [1, 2, 3, 5],
|
| 99 |
+
"D": list("abcd"),
|
| 100 |
+
}
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
result = df.set_index("C").interpolate()
|
| 104 |
+
expected = df.set_index("C")
|
| 105 |
+
expected.loc[3, "A"] = 3
|
| 106 |
+
expected.loc[5, "B"] = 9
|
| 107 |
+
tm.assert_frame_equal(result, expected)
|
| 108 |
+
|
| 109 |
+
def test_interp_empty(self):
|
| 110 |
+
# https://github.com/pandas-dev/pandas/issues/35598
|
| 111 |
+
df = DataFrame()
|
| 112 |
+
result = df.interpolate()
|
| 113 |
+
assert result is not df
|
| 114 |
+
expected = df
|
| 115 |
+
tm.assert_frame_equal(result, expected)
|
| 116 |
+
|
| 117 |
+
def test_interp_bad_method(self):
|
| 118 |
+
df = DataFrame(
|
| 119 |
+
{
|
| 120 |
+
"A": [1, 2, np.nan, 4],
|
| 121 |
+
"B": [1, 4, 9, np.nan],
|
| 122 |
+
"C": [1, 2, 3, 5],
|
| 123 |
+
"D": list("abcd"),
|
| 124 |
+
}
|
| 125 |
+
)
|
| 126 |
+
msg = (
|
| 127 |
+
r"method must be one of \['linear', 'time', 'index', 'values', "
|
| 128 |
+
r"'nearest', 'zero', 'slinear', 'quadratic', 'cubic', "
|
| 129 |
+
r"'barycentric', 'krogh', 'spline', 'polynomial', "
|
| 130 |
+
r"'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', "
|
| 131 |
+
r"'cubicspline'\]. Got 'not_a_method' instead."
|
| 132 |
+
)
|
| 133 |
+
with pytest.raises(ValueError, match=msg):
|
| 134 |
+
df.interpolate(method="not_a_method")
|
| 135 |
+
|
| 136 |
+
def test_interp_combo(self):
|
| 137 |
+
df = DataFrame(
|
| 138 |
+
{
|
| 139 |
+
"A": [1.0, 2.0, np.nan, 4.0],
|
| 140 |
+
"B": [1, 4, 9, np.nan],
|
| 141 |
+
"C": [1, 2, 3, 5],
|
| 142 |
+
"D": list("abcd"),
|
| 143 |
+
}
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
result = df["A"].interpolate()
|
| 147 |
+
expected = Series([1.0, 2.0, 3.0, 4.0], name="A")
|
| 148 |
+
tm.assert_series_equal(result, expected)
|
| 149 |
+
|
| 150 |
+
result = df["A"].interpolate(downcast="infer")
|
| 151 |
+
expected = Series([1, 2, 3, 4], name="A")
|
| 152 |
+
tm.assert_series_equal(result, expected)
|
| 153 |
+
|
| 154 |
+
def test_interp_nan_idx(self):
|
| 155 |
+
df = DataFrame({"A": [1, 2, np.nan, 4], "B": [np.nan, 2, 3, 4]})
|
| 156 |
+
df = df.set_index("A")
|
| 157 |
+
msg = (
|
| 158 |
+
"Interpolation with NaNs in the index has not been implemented. "
|
| 159 |
+
"Try filling those NaNs before interpolating."
|
| 160 |
+
)
|
| 161 |
+
with pytest.raises(NotImplementedError, match=msg):
|
| 162 |
+
df.interpolate(method="values")
|
| 163 |
+
|
| 164 |
+
@td.skip_if_no_scipy
|
| 165 |
+
def test_interp_various(self):
|
| 166 |
+
df = DataFrame(
|
| 167 |
+
{"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
|
| 168 |
+
)
|
| 169 |
+
df = df.set_index("C")
|
| 170 |
+
expected = df.copy()
|
| 171 |
+
result = df.interpolate(method="polynomial", order=1)
|
| 172 |
+
|
| 173 |
+
expected.loc[3, "A"] = 2.66666667
|
| 174 |
+
expected.loc[13, "A"] = 5.76923076
|
| 175 |
+
tm.assert_frame_equal(result, expected)
|
| 176 |
+
|
| 177 |
+
result = df.interpolate(method="cubic")
|
| 178 |
+
# GH #15662.
|
| 179 |
+
expected.loc[3, "A"] = 2.81547781
|
| 180 |
+
expected.loc[13, "A"] = 5.52964175
|
| 181 |
+
tm.assert_frame_equal(result, expected)
|
| 182 |
+
|
| 183 |
+
result = df.interpolate(method="nearest")
|
| 184 |
+
expected.loc[3, "A"] = 2
|
| 185 |
+
expected.loc[13, "A"] = 5
|
| 186 |
+
tm.assert_frame_equal(result, expected, check_dtype=False)
|
| 187 |
+
|
| 188 |
+
result = df.interpolate(method="quadratic")
|
| 189 |
+
expected.loc[3, "A"] = 2.82150771
|
| 190 |
+
expected.loc[13, "A"] = 6.12648668
|
| 191 |
+
tm.assert_frame_equal(result, expected)
|
| 192 |
+
|
| 193 |
+
result = df.interpolate(method="slinear")
|
| 194 |
+
expected.loc[3, "A"] = 2.66666667
|
| 195 |
+
expected.loc[13, "A"] = 5.76923077
|
| 196 |
+
tm.assert_frame_equal(result, expected)
|
| 197 |
+
|
| 198 |
+
result = df.interpolate(method="zero")
|
| 199 |
+
expected.loc[3, "A"] = 2.0
|
| 200 |
+
expected.loc[13, "A"] = 5
|
| 201 |
+
tm.assert_frame_equal(result, expected, check_dtype=False)
|
| 202 |
+
|
| 203 |
+
@td.skip_if_no_scipy
|
| 204 |
+
def test_interp_alt_scipy(self):
|
| 205 |
+
df = DataFrame(
|
| 206 |
+
{"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
|
| 207 |
+
)
|
| 208 |
+
result = df.interpolate(method="barycentric")
|
| 209 |
+
expected = df.copy()
|
| 210 |
+
expected.loc[2, "A"] = 3
|
| 211 |
+
expected.loc[5, "A"] = 6
|
| 212 |
+
tm.assert_frame_equal(result, expected)
|
| 213 |
+
|
| 214 |
+
result = df.interpolate(method="barycentric", downcast="infer")
|
| 215 |
+
tm.assert_frame_equal(result, expected.astype(np.int64))
|
| 216 |
+
|
| 217 |
+
result = df.interpolate(method="krogh")
|
| 218 |
+
expectedk = df.copy()
|
| 219 |
+
expectedk["A"] = expected["A"]
|
| 220 |
+
tm.assert_frame_equal(result, expectedk)
|
| 221 |
+
|
| 222 |
+
result = df.interpolate(method="pchip")
|
| 223 |
+
expected.loc[2, "A"] = 3
|
| 224 |
+
expected.loc[5, "A"] = 6.0
|
| 225 |
+
|
| 226 |
+
tm.assert_frame_equal(result, expected)
|
| 227 |
+
|
| 228 |
+
def test_interp_rowwise(self):
|
| 229 |
+
df = DataFrame(
|
| 230 |
+
{
|
| 231 |
+
0: [1, 2, np.nan, 4],
|
| 232 |
+
1: [2, 3, 4, np.nan],
|
| 233 |
+
2: [np.nan, 4, 5, 6],
|
| 234 |
+
3: [4, np.nan, 6, 7],
|
| 235 |
+
4: [1, 2, 3, 4],
|
| 236 |
+
}
|
| 237 |
+
)
|
| 238 |
+
result = df.interpolate(axis=1)
|
| 239 |
+
expected = df.copy()
|
| 240 |
+
expected.loc[3, 1] = 5
|
| 241 |
+
expected.loc[0, 2] = 3
|
| 242 |
+
expected.loc[1, 3] = 3
|
| 243 |
+
expected[4] = expected[4].astype(np.float64)
|
| 244 |
+
tm.assert_frame_equal(result, expected)
|
| 245 |
+
|
| 246 |
+
result = df.interpolate(axis=1, method="values")
|
| 247 |
+
tm.assert_frame_equal(result, expected)
|
| 248 |
+
|
| 249 |
+
result = df.interpolate(axis=0)
|
| 250 |
+
expected = df.interpolate()
|
| 251 |
+
tm.assert_frame_equal(result, expected)
|
| 252 |
+
|
| 253 |
+
@pytest.mark.parametrize(
|
| 254 |
+
"axis_name, axis_number",
|
| 255 |
+
[
|
| 256 |
+
pytest.param("rows", 0, id="rows_0"),
|
| 257 |
+
pytest.param("index", 0, id="index_0"),
|
| 258 |
+
pytest.param("columns", 1, id="columns_1"),
|
| 259 |
+
],
|
| 260 |
+
)
|
| 261 |
+
def test_interp_axis_names(self, axis_name, axis_number):
|
| 262 |
+
# GH 29132: test axis names
|
| 263 |
+
data = {0: [0, np.nan, 6], 1: [1, np.nan, 7], 2: [2, 5, 8]}
|
| 264 |
+
|
| 265 |
+
df = DataFrame(data, dtype=np.float64)
|
| 266 |
+
result = df.interpolate(axis=axis_name, method="linear")
|
| 267 |
+
expected = df.interpolate(axis=axis_number, method="linear")
|
| 268 |
+
tm.assert_frame_equal(result, expected)
|
| 269 |
+
|
| 270 |
+
def test_rowwise_alt(self):
|
| 271 |
+
df = DataFrame(
|
| 272 |
+
{
|
| 273 |
+
0: [0, 0.5, 1.0, np.nan, 4, 8, np.nan, np.nan, 64],
|
| 274 |
+
1: [1, 2, 3, 4, 3, 2, 1, 0, -1],
|
| 275 |
+
}
|
| 276 |
+
)
|
| 277 |
+
df.interpolate(axis=0)
|
| 278 |
+
# TODO: assert something?
|
| 279 |
+
|
| 280 |
+
@pytest.mark.parametrize(
|
| 281 |
+
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
|
| 282 |
+
)
|
| 283 |
+
def test_interp_leading_nans(self, check_scipy):
|
| 284 |
+
df = DataFrame(
|
| 285 |
+
{"A": [np.nan, np.nan, 0.5, 0.25, 0], "B": [np.nan, -3, -3.5, np.nan, -4]}
|
| 286 |
+
)
|
| 287 |
+
result = df.interpolate()
|
| 288 |
+
expected = df.copy()
|
| 289 |
+
expected.loc[3, "B"] = -3.75
|
| 290 |
+
tm.assert_frame_equal(result, expected)
|
| 291 |
+
|
| 292 |
+
if check_scipy:
|
| 293 |
+
result = df.interpolate(method="polynomial", order=1)
|
| 294 |
+
tm.assert_frame_equal(result, expected)
|
| 295 |
+
|
| 296 |
+
def test_interp_raise_on_only_mixed(self, axis):
|
| 297 |
+
df = DataFrame(
|
| 298 |
+
{
|
| 299 |
+
"A": [1, 2, np.nan, 4],
|
| 300 |
+
"B": ["a", "b", "c", "d"],
|
| 301 |
+
"C": [np.nan, 2, 5, 7],
|
| 302 |
+
"D": [np.nan, np.nan, 9, 9],
|
| 303 |
+
"E": [1, 2, 3, 4],
|
| 304 |
+
}
|
| 305 |
+
)
|
| 306 |
+
msg = (
|
| 307 |
+
"Cannot interpolate with all object-dtype columns "
|
| 308 |
+
"in the DataFrame. Try setting at least one "
|
| 309 |
+
"column to a numeric dtype."
|
| 310 |
+
)
|
| 311 |
+
with pytest.raises(TypeError, match=msg):
|
| 312 |
+
df.astype("object").interpolate(axis=axis)
|
| 313 |
+
|
| 314 |
+
def test_interp_raise_on_all_object_dtype(self):
|
| 315 |
+
# GH 22985
|
| 316 |
+
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, dtype="object")
|
| 317 |
+
msg = (
|
| 318 |
+
"Cannot interpolate with all object-dtype columns "
|
| 319 |
+
"in the DataFrame. Try setting at least one "
|
| 320 |
+
"column to a numeric dtype."
|
| 321 |
+
)
|
| 322 |
+
with pytest.raises(TypeError, match=msg):
|
| 323 |
+
df.interpolate()
|
| 324 |
+
|
| 325 |
+
def test_interp_inplace(self, using_copy_on_write):
|
| 326 |
+
df = DataFrame({"a": [1.0, 2.0, np.nan, 4.0]})
|
| 327 |
+
expected = DataFrame({"a": [1.0, 2.0, 3.0, 4.0]})
|
| 328 |
+
expected_cow = df.copy()
|
| 329 |
+
result = df.copy()
|
| 330 |
+
return_value = result["a"].interpolate(inplace=True)
|
| 331 |
+
assert return_value is None
|
| 332 |
+
if using_copy_on_write:
|
| 333 |
+
tm.assert_frame_equal(result, expected_cow)
|
| 334 |
+
else:
|
| 335 |
+
tm.assert_frame_equal(result, expected)
|
| 336 |
+
|
| 337 |
+
result = df.copy()
|
| 338 |
+
return_value = result["a"].interpolate(inplace=True, downcast="infer")
|
| 339 |
+
assert return_value is None
|
| 340 |
+
if using_copy_on_write:
|
| 341 |
+
tm.assert_frame_equal(result, expected_cow)
|
| 342 |
+
else:
|
| 343 |
+
tm.assert_frame_equal(result, expected.astype("int64"))
|
| 344 |
+
|
| 345 |
+
def test_interp_inplace_row(self):
|
| 346 |
+
# GH 10395
|
| 347 |
+
result = DataFrame(
|
| 348 |
+
{"a": [1.0, 2.0, 3.0, 4.0], "b": [np.nan, 2.0, 3.0, 4.0], "c": [3, 2, 2, 2]}
|
| 349 |
+
)
|
| 350 |
+
expected = result.interpolate(method="linear", axis=1, inplace=False)
|
| 351 |
+
return_value = result.interpolate(method="linear", axis=1, inplace=True)
|
| 352 |
+
assert return_value is None
|
| 353 |
+
tm.assert_frame_equal(result, expected)
|
| 354 |
+
|
| 355 |
+
def test_interp_ignore_all_good(self):
|
| 356 |
+
# GH
|
| 357 |
+
df = DataFrame(
|
| 358 |
+
{
|
| 359 |
+
"A": [1, 2, np.nan, 4],
|
| 360 |
+
"B": [1, 2, 3, 4],
|
| 361 |
+
"C": [1.0, 2.0, np.nan, 4.0],
|
| 362 |
+
"D": [1.0, 2.0, 3.0, 4.0],
|
| 363 |
+
}
|
| 364 |
+
)
|
| 365 |
+
expected = DataFrame(
|
| 366 |
+
{
|
| 367 |
+
"A": np.array([1, 2, 3, 4], dtype="float64"),
|
| 368 |
+
"B": np.array([1, 2, 3, 4], dtype="int64"),
|
| 369 |
+
"C": np.array([1.0, 2.0, 3, 4.0], dtype="float64"),
|
| 370 |
+
"D": np.array([1.0, 2.0, 3.0, 4.0], dtype="float64"),
|
| 371 |
+
}
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
result = df.interpolate(downcast=None)
|
| 375 |
+
tm.assert_frame_equal(result, expected)
|
| 376 |
+
|
| 377 |
+
# all good
|
| 378 |
+
result = df[["B", "D"]].interpolate(downcast=None)
|
| 379 |
+
tm.assert_frame_equal(result, df[["B", "D"]])
|
| 380 |
+
|
| 381 |
+
def test_interp_time_inplace_axis(self):
|
| 382 |
+
# GH 9687
|
| 383 |
+
periods = 5
|
| 384 |
+
idx = date_range(start="2014-01-01", periods=periods)
|
| 385 |
+
data = np.random.rand(periods, periods)
|
| 386 |
+
data[data < 0.5] = np.nan
|
| 387 |
+
expected = DataFrame(index=idx, columns=idx, data=data)
|
| 388 |
+
|
| 389 |
+
result = expected.interpolate(axis=0, method="time")
|
| 390 |
+
return_value = expected.interpolate(axis=0, method="time", inplace=True)
|
| 391 |
+
assert return_value is None
|
| 392 |
+
tm.assert_frame_equal(result, expected)
|
| 393 |
+
|
| 394 |
+
@pytest.mark.parametrize("axis_name, axis_number", [("index", 0), ("columns", 1)])
|
| 395 |
+
def test_interp_string_axis(self, axis_name, axis_number):
|
| 396 |
+
# https://github.com/pandas-dev/pandas/issues/25190
|
| 397 |
+
x = np.linspace(0, 100, 1000)
|
| 398 |
+
y = np.sin(x)
|
| 399 |
+
df = DataFrame(
|
| 400 |
+
data=np.tile(y, (10, 1)), index=np.arange(10), columns=x
|
| 401 |
+
).reindex(columns=x * 1.005)
|
| 402 |
+
result = df.interpolate(method="linear", axis=axis_name)
|
| 403 |
+
expected = df.interpolate(method="linear", axis=axis_number)
|
| 404 |
+
tm.assert_frame_equal(result, expected)
|
| 405 |
+
|
| 406 |
+
@pytest.mark.parametrize("method", ["ffill", "bfill", "pad"])
|
| 407 |
+
def test_interp_fillna_methods(self, request, axis, method, using_array_manager):
|
| 408 |
+
# GH 12918
|
| 409 |
+
if using_array_manager and axis in (1, "columns"):
|
| 410 |
+
# TODO(ArrayManager) support axis=1
|
| 411 |
+
td.mark_array_manager_not_yet_implemented(request)
|
| 412 |
+
|
| 413 |
+
df = DataFrame(
|
| 414 |
+
{
|
| 415 |
+
"A": [1.0, 2.0, 3.0, 4.0, np.nan, 5.0],
|
| 416 |
+
"B": [2.0, 4.0, 6.0, np.nan, 8.0, 10.0],
|
| 417 |
+
"C": [3.0, 6.0, 9.0, np.nan, np.nan, 30.0],
|
| 418 |
+
}
|
| 419 |
+
)
|
| 420 |
+
expected = df.fillna(axis=axis, method=method)
|
| 421 |
+
result = df.interpolate(method=method, axis=axis)
|
| 422 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_is_homogeneous_dtype.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas.util._test_decorators as td
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
Categorical,
|
| 8 |
+
DataFrame,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
# _is_homogeneous_type always returns True for ArrayManager
|
| 12 |
+
pytestmark = td.skip_array_manager_invalid_test
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@pytest.mark.parametrize(
|
| 16 |
+
"data, expected",
|
| 17 |
+
[
|
| 18 |
+
# empty
|
| 19 |
+
(DataFrame(), True),
|
| 20 |
+
# multi-same
|
| 21 |
+
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
|
| 22 |
+
# multi-object
|
| 23 |
+
(
|
| 24 |
+
DataFrame(
|
| 25 |
+
{
|
| 26 |
+
"A": np.array([1, 2], dtype=object),
|
| 27 |
+
"B": np.array(["a", "b"], dtype=object),
|
| 28 |
+
}
|
| 29 |
+
),
|
| 30 |
+
True,
|
| 31 |
+
),
|
| 32 |
+
# multi-extension
|
| 33 |
+
(
|
| 34 |
+
DataFrame({"A": Categorical(["a", "b"]), "B": Categorical(["a", "b"])}),
|
| 35 |
+
True,
|
| 36 |
+
),
|
| 37 |
+
# differ types
|
| 38 |
+
(DataFrame({"A": [1, 2], "B": [1.0, 2.0]}), False),
|
| 39 |
+
# differ sizes
|
| 40 |
+
(
|
| 41 |
+
DataFrame(
|
| 42 |
+
{
|
| 43 |
+
"A": np.array([1, 2], dtype=np.int32),
|
| 44 |
+
"B": np.array([1, 2], dtype=np.int64),
|
| 45 |
+
}
|
| 46 |
+
),
|
| 47 |
+
False,
|
| 48 |
+
),
|
| 49 |
+
# multi-extension differ
|
| 50 |
+
(
|
| 51 |
+
DataFrame({"A": Categorical(["a", "b"]), "B": Categorical(["b", "c"])}),
|
| 52 |
+
False,
|
| 53 |
+
),
|
| 54 |
+
],
|
| 55 |
+
)
|
| 56 |
+
def test_is_homogeneous_type(data, expected):
|
| 57 |
+
assert data._is_homogeneous_type is expected
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_isetitem.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pandas import (
|
| 2 |
+
DataFrame,
|
| 3 |
+
Series,
|
| 4 |
+
)
|
| 5 |
+
import pandas._testing as tm
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class TestDataFrameSetItem:
|
| 9 |
+
def test_isetitem_ea_df(self):
|
| 10 |
+
# GH#49922
|
| 11 |
+
df = DataFrame([[1, 2, 3], [4, 5, 6]])
|
| 12 |
+
rhs = DataFrame([[11, 12], [13, 14]], dtype="Int64")
|
| 13 |
+
|
| 14 |
+
df.isetitem([0, 1], rhs)
|
| 15 |
+
expected = DataFrame(
|
| 16 |
+
{
|
| 17 |
+
0: Series([11, 13], dtype="Int64"),
|
| 18 |
+
1: Series([12, 14], dtype="Int64"),
|
| 19 |
+
2: [3, 6],
|
| 20 |
+
}
|
| 21 |
+
)
|
| 22 |
+
tm.assert_frame_equal(df, expected)
|
| 23 |
+
|
| 24 |
+
def test_isetitem_ea_df_scalar_indexer(self):
|
| 25 |
+
# GH#49922
|
| 26 |
+
df = DataFrame([[1, 2, 3], [4, 5, 6]])
|
| 27 |
+
rhs = DataFrame([[11], [13]], dtype="Int64")
|
| 28 |
+
|
| 29 |
+
df.isetitem(2, rhs)
|
| 30 |
+
expected = DataFrame(
|
| 31 |
+
{
|
| 32 |
+
0: [1, 4],
|
| 33 |
+
1: [2, 5],
|
| 34 |
+
2: Series([11, 13], dtype="Int64"),
|
| 35 |
+
}
|
| 36 |
+
)
|
| 37 |
+
tm.assert_frame_equal(df, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_join.py
ADDED
|
@@ -0,0 +1,571 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from pandas.errors import MergeError
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from pandas import (
|
| 10 |
+
DataFrame,
|
| 11 |
+
Index,
|
| 12 |
+
MultiIndex,
|
| 13 |
+
date_range,
|
| 14 |
+
period_range,
|
| 15 |
+
)
|
| 16 |
+
import pandas._testing as tm
|
| 17 |
+
from pandas.core.reshape.concat import concat
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@pytest.fixture
|
| 21 |
+
def frame_with_period_index():
|
| 22 |
+
return DataFrame(
|
| 23 |
+
data=np.arange(20).reshape(4, 5),
|
| 24 |
+
columns=list("abcde"),
|
| 25 |
+
index=period_range(start="2000", freq="A", periods=4),
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@pytest.fixture
|
| 30 |
+
def left():
|
| 31 |
+
return DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0])
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@pytest.fixture
|
| 35 |
+
def right():
|
| 36 |
+
return DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2])
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@pytest.fixture
|
| 40 |
+
def left_no_dup():
|
| 41 |
+
return DataFrame(
|
| 42 |
+
{"a": ["a", "b", "c", "d"], "b": ["cat", "dog", "weasel", "horse"]},
|
| 43 |
+
index=range(4),
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@pytest.fixture
|
| 48 |
+
def right_no_dup():
|
| 49 |
+
return DataFrame(
|
| 50 |
+
{
|
| 51 |
+
"a": ["a", "b", "c", "d", "e"],
|
| 52 |
+
"c": ["meow", "bark", "um... weasel noise?", "nay", "chirp"],
|
| 53 |
+
},
|
| 54 |
+
index=range(5),
|
| 55 |
+
).set_index("a")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@pytest.fixture
|
| 59 |
+
def left_w_dups(left_no_dup):
|
| 60 |
+
return concat(
|
| 61 |
+
[left_no_dup, DataFrame({"a": ["a"], "b": ["cow"]}, index=[3])], sort=True
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@pytest.fixture
|
| 66 |
+
def right_w_dups(right_no_dup):
|
| 67 |
+
return concat(
|
| 68 |
+
[right_no_dup, DataFrame({"a": ["e"], "c": ["moo"]}, index=[3])]
|
| 69 |
+
).set_index("a")
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@pytest.mark.parametrize(
|
| 73 |
+
"how, sort, expected",
|
| 74 |
+
[
|
| 75 |
+
("inner", False, DataFrame({"a": [20, 10], "b": [200, 100]}, index=[2, 1])),
|
| 76 |
+
("inner", True, DataFrame({"a": [10, 20], "b": [100, 200]}, index=[1, 2])),
|
| 77 |
+
(
|
| 78 |
+
"left",
|
| 79 |
+
False,
|
| 80 |
+
DataFrame({"a": [20, 10, 0], "b": [200, 100, np.nan]}, index=[2, 1, 0]),
|
| 81 |
+
),
|
| 82 |
+
(
|
| 83 |
+
"left",
|
| 84 |
+
True,
|
| 85 |
+
DataFrame({"a": [0, 10, 20], "b": [np.nan, 100, 200]}, index=[0, 1, 2]),
|
| 86 |
+
),
|
| 87 |
+
(
|
| 88 |
+
"right",
|
| 89 |
+
False,
|
| 90 |
+
DataFrame({"a": [np.nan, 10, 20], "b": [300, 100, 200]}, index=[3, 1, 2]),
|
| 91 |
+
),
|
| 92 |
+
(
|
| 93 |
+
"right",
|
| 94 |
+
True,
|
| 95 |
+
DataFrame({"a": [10, 20, np.nan], "b": [100, 200, 300]}, index=[1, 2, 3]),
|
| 96 |
+
),
|
| 97 |
+
(
|
| 98 |
+
"outer",
|
| 99 |
+
False,
|
| 100 |
+
DataFrame(
|
| 101 |
+
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
|
| 102 |
+
index=[0, 1, 2, 3],
|
| 103 |
+
),
|
| 104 |
+
),
|
| 105 |
+
(
|
| 106 |
+
"outer",
|
| 107 |
+
True,
|
| 108 |
+
DataFrame(
|
| 109 |
+
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
|
| 110 |
+
index=[0, 1, 2, 3],
|
| 111 |
+
),
|
| 112 |
+
),
|
| 113 |
+
],
|
| 114 |
+
)
|
| 115 |
+
def test_join(left, right, how, sort, expected):
|
| 116 |
+
result = left.join(right, how=how, sort=sort, validate="1:1")
|
| 117 |
+
tm.assert_frame_equal(result, expected)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def test_suffix_on_list_join():
|
| 121 |
+
first = DataFrame({"key": [1, 2, 3, 4, 5]})
|
| 122 |
+
second = DataFrame({"key": [1, 8, 3, 2, 5], "v1": [1, 2, 3, 4, 5]})
|
| 123 |
+
third = DataFrame({"keys": [5, 2, 3, 4, 1], "v2": [1, 2, 3, 4, 5]})
|
| 124 |
+
|
| 125 |
+
# check proper errors are raised
|
| 126 |
+
msg = "Suffixes not supported when joining multiple DataFrames"
|
| 127 |
+
with pytest.raises(ValueError, match=msg):
|
| 128 |
+
first.join([second], lsuffix="y")
|
| 129 |
+
with pytest.raises(ValueError, match=msg):
|
| 130 |
+
first.join([second, third], rsuffix="x")
|
| 131 |
+
with pytest.raises(ValueError, match=msg):
|
| 132 |
+
first.join([second, third], lsuffix="y", rsuffix="x")
|
| 133 |
+
with pytest.raises(ValueError, match="Indexes have overlapping values"):
|
| 134 |
+
first.join([second, third])
|
| 135 |
+
|
| 136 |
+
# no errors should be raised
|
| 137 |
+
arr_joined = first.join([third])
|
| 138 |
+
norm_joined = first.join(third)
|
| 139 |
+
tm.assert_frame_equal(arr_joined, norm_joined)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def test_join_invalid_validate(left_no_dup, right_no_dup):
|
| 143 |
+
# GH 46622
|
| 144 |
+
# Check invalid arguments
|
| 145 |
+
msg = (
|
| 146 |
+
'"invalid" is not a valid argument. '
|
| 147 |
+
"Valid arguments are:\n"
|
| 148 |
+
'- "1:1"\n'
|
| 149 |
+
'- "1:m"\n'
|
| 150 |
+
'- "m:1"\n'
|
| 151 |
+
'- "m:m"\n'
|
| 152 |
+
'- "one_to_one"\n'
|
| 153 |
+
'- "one_to_many"\n'
|
| 154 |
+
'- "many_to_one"\n'
|
| 155 |
+
'- "many_to_many"'
|
| 156 |
+
)
|
| 157 |
+
with pytest.raises(ValueError, match=msg):
|
| 158 |
+
left_no_dup.merge(right_no_dup, on="a", validate="invalid")
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def test_join_on_single_col_dup_on_right(left_no_dup, right_w_dups):
|
| 162 |
+
# GH 46622
|
| 163 |
+
# Dups on right allowed by one_to_many constraint
|
| 164 |
+
left_no_dup.join(
|
| 165 |
+
right_w_dups,
|
| 166 |
+
on="a",
|
| 167 |
+
validate="one_to_many",
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# Dups on right not allowed by one_to_one constraint
|
| 171 |
+
msg = "Merge keys are not unique in right dataset; not a one-to-one merge"
|
| 172 |
+
with pytest.raises(MergeError, match=msg):
|
| 173 |
+
left_no_dup.join(
|
| 174 |
+
right_w_dups,
|
| 175 |
+
on="a",
|
| 176 |
+
validate="one_to_one",
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def test_join_on_single_col_dup_on_left(left_w_dups, right_no_dup):
|
| 181 |
+
# GH 46622
|
| 182 |
+
# Dups on left allowed by many_to_one constraint
|
| 183 |
+
left_w_dups.join(
|
| 184 |
+
right_no_dup,
|
| 185 |
+
on="a",
|
| 186 |
+
validate="many_to_one",
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
# Dups on left not allowed by one_to_one constraint
|
| 190 |
+
msg = "Merge keys are not unique in left dataset; not a one-to-one merge"
|
| 191 |
+
with pytest.raises(MergeError, match=msg):
|
| 192 |
+
left_w_dups.join(
|
| 193 |
+
right_no_dup,
|
| 194 |
+
on="a",
|
| 195 |
+
validate="one_to_one",
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def test_join_on_single_col_dup_on_both(left_w_dups, right_w_dups):
|
| 200 |
+
# GH 46622
|
| 201 |
+
# Dups on both allowed by many_to_many constraint
|
| 202 |
+
left_w_dups.join(right_w_dups, on="a", validate="many_to_many")
|
| 203 |
+
|
| 204 |
+
# Dups on both not allowed by many_to_one constraint
|
| 205 |
+
msg = "Merge keys are not unique in right dataset; not a many-to-one merge"
|
| 206 |
+
with pytest.raises(MergeError, match=msg):
|
| 207 |
+
left_w_dups.join(
|
| 208 |
+
right_w_dups,
|
| 209 |
+
on="a",
|
| 210 |
+
validate="many_to_one",
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
# Dups on both not allowed by one_to_many constraint
|
| 214 |
+
msg = "Merge keys are not unique in left dataset; not a one-to-many merge"
|
| 215 |
+
with pytest.raises(MergeError, match=msg):
|
| 216 |
+
left_w_dups.join(
|
| 217 |
+
right_w_dups,
|
| 218 |
+
on="a",
|
| 219 |
+
validate="one_to_many",
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def test_join_on_multi_col_check_dup():
|
| 224 |
+
# GH 46622
|
| 225 |
+
# Two column join, dups in both, but jointly no dups
|
| 226 |
+
left = DataFrame(
|
| 227 |
+
{
|
| 228 |
+
"a": ["a", "a", "b", "b"],
|
| 229 |
+
"b": [0, 1, 0, 1],
|
| 230 |
+
"c": ["cat", "dog", "weasel", "horse"],
|
| 231 |
+
},
|
| 232 |
+
index=range(4),
|
| 233 |
+
).set_index(["a", "b"])
|
| 234 |
+
|
| 235 |
+
right = DataFrame(
|
| 236 |
+
{
|
| 237 |
+
"a": ["a", "a", "b"],
|
| 238 |
+
"b": [0, 1, 0],
|
| 239 |
+
"d": ["meow", "bark", "um... weasel noise?"],
|
| 240 |
+
},
|
| 241 |
+
index=range(3),
|
| 242 |
+
).set_index(["a", "b"])
|
| 243 |
+
|
| 244 |
+
expected_multi = DataFrame(
|
| 245 |
+
{
|
| 246 |
+
"a": ["a", "a", "b"],
|
| 247 |
+
"b": [0, 1, 0],
|
| 248 |
+
"c": ["cat", "dog", "weasel"],
|
| 249 |
+
"d": ["meow", "bark", "um... weasel noise?"],
|
| 250 |
+
},
|
| 251 |
+
index=range(3),
|
| 252 |
+
).set_index(["a", "b"])
|
| 253 |
+
|
| 254 |
+
# Jointly no dups allowed by one_to_one constraint
|
| 255 |
+
result = left.join(right, how="inner", validate="1:1")
|
| 256 |
+
tm.assert_frame_equal(result, expected_multi)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def test_join_index(float_frame):
|
| 260 |
+
# left / right
|
| 261 |
+
|
| 262 |
+
f = float_frame.loc[float_frame.index[:10], ["A", "B"]]
|
| 263 |
+
f2 = float_frame.loc[float_frame.index[5:], ["C", "D"]].iloc[::-1]
|
| 264 |
+
|
| 265 |
+
joined = f.join(f2)
|
| 266 |
+
tm.assert_index_equal(f.index, joined.index)
|
| 267 |
+
expected_columns = Index(["A", "B", "C", "D"])
|
| 268 |
+
tm.assert_index_equal(joined.columns, expected_columns)
|
| 269 |
+
|
| 270 |
+
joined = f.join(f2, how="left")
|
| 271 |
+
tm.assert_index_equal(joined.index, f.index)
|
| 272 |
+
tm.assert_index_equal(joined.columns, expected_columns)
|
| 273 |
+
|
| 274 |
+
joined = f.join(f2, how="right")
|
| 275 |
+
tm.assert_index_equal(joined.index, f2.index)
|
| 276 |
+
tm.assert_index_equal(joined.columns, expected_columns)
|
| 277 |
+
|
| 278 |
+
# inner
|
| 279 |
+
|
| 280 |
+
joined = f.join(f2, how="inner")
|
| 281 |
+
tm.assert_index_equal(joined.index, f.index[5:10])
|
| 282 |
+
tm.assert_index_equal(joined.columns, expected_columns)
|
| 283 |
+
|
| 284 |
+
# outer
|
| 285 |
+
|
| 286 |
+
joined = f.join(f2, how="outer")
|
| 287 |
+
tm.assert_index_equal(joined.index, float_frame.index.sort_values())
|
| 288 |
+
tm.assert_index_equal(joined.columns, expected_columns)
|
| 289 |
+
|
| 290 |
+
with pytest.raises(ValueError, match="join method"):
|
| 291 |
+
f.join(f2, how="foo")
|
| 292 |
+
|
| 293 |
+
# corner case - overlapping columns
|
| 294 |
+
msg = "columns overlap but no suffix"
|
| 295 |
+
for how in ("outer", "left", "inner"):
|
| 296 |
+
with pytest.raises(ValueError, match=msg):
|
| 297 |
+
float_frame.join(float_frame, how=how)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def test_join_index_more(float_frame):
|
| 301 |
+
af = float_frame.loc[:, ["A", "B"]]
|
| 302 |
+
bf = float_frame.loc[::2, ["C", "D"]]
|
| 303 |
+
|
| 304 |
+
expected = af.copy()
|
| 305 |
+
expected["C"] = float_frame["C"][::2]
|
| 306 |
+
expected["D"] = float_frame["D"][::2]
|
| 307 |
+
|
| 308 |
+
result = af.join(bf)
|
| 309 |
+
tm.assert_frame_equal(result, expected)
|
| 310 |
+
|
| 311 |
+
result = af.join(bf, how="right")
|
| 312 |
+
tm.assert_frame_equal(result, expected[::2])
|
| 313 |
+
|
| 314 |
+
result = bf.join(af, how="right")
|
| 315 |
+
tm.assert_frame_equal(result, expected.loc[:, result.columns])
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def test_join_index_series(float_frame):
|
| 319 |
+
df = float_frame.copy()
|
| 320 |
+
ser = df.pop(float_frame.columns[-1])
|
| 321 |
+
joined = df.join(ser)
|
| 322 |
+
|
| 323 |
+
tm.assert_frame_equal(joined, float_frame)
|
| 324 |
+
|
| 325 |
+
ser.name = None
|
| 326 |
+
with pytest.raises(ValueError, match="must have a name"):
|
| 327 |
+
df.join(ser)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def test_join_overlap(float_frame):
|
| 331 |
+
df1 = float_frame.loc[:, ["A", "B", "C"]]
|
| 332 |
+
df2 = float_frame.loc[:, ["B", "C", "D"]]
|
| 333 |
+
|
| 334 |
+
joined = df1.join(df2, lsuffix="_df1", rsuffix="_df2")
|
| 335 |
+
df1_suf = df1.loc[:, ["B", "C"]].add_suffix("_df1")
|
| 336 |
+
df2_suf = df2.loc[:, ["B", "C"]].add_suffix("_df2")
|
| 337 |
+
|
| 338 |
+
no_overlap = float_frame.loc[:, ["A", "D"]]
|
| 339 |
+
expected = df1_suf.join(df2_suf).join(no_overlap)
|
| 340 |
+
|
| 341 |
+
# column order not necessarily sorted
|
| 342 |
+
tm.assert_frame_equal(joined, expected.loc[:, joined.columns])
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def test_join_period_index(frame_with_period_index):
|
| 346 |
+
other = frame_with_period_index.rename(columns=lambda key: f"{key}{key}")
|
| 347 |
+
|
| 348 |
+
joined_values = np.concatenate([frame_with_period_index.values] * 2, axis=1)
|
| 349 |
+
|
| 350 |
+
joined_cols = frame_with_period_index.columns.append(other.columns)
|
| 351 |
+
|
| 352 |
+
joined = frame_with_period_index.join(other)
|
| 353 |
+
expected = DataFrame(
|
| 354 |
+
data=joined_values, columns=joined_cols, index=frame_with_period_index.index
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
tm.assert_frame_equal(joined, expected)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def test_join_left_sequence_non_unique_index():
|
| 361 |
+
# https://github.com/pandas-dev/pandas/issues/19607
|
| 362 |
+
df1 = DataFrame({"a": [0, 10, 20]}, index=[1, 2, 3])
|
| 363 |
+
df2 = DataFrame({"b": [100, 200, 300]}, index=[4, 3, 2])
|
| 364 |
+
df3 = DataFrame({"c": [400, 500, 600]}, index=[2, 2, 4])
|
| 365 |
+
|
| 366 |
+
joined = df1.join([df2, df3], how="left")
|
| 367 |
+
|
| 368 |
+
expected = DataFrame(
|
| 369 |
+
{
|
| 370 |
+
"a": [0, 10, 10, 20],
|
| 371 |
+
"b": [np.nan, 300, 300, 200],
|
| 372 |
+
"c": [np.nan, 400, 500, np.nan],
|
| 373 |
+
},
|
| 374 |
+
index=[1, 2, 2, 3],
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
tm.assert_frame_equal(joined, expected)
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def test_join_list_series(float_frame):
|
| 381 |
+
# GH#46850
|
| 382 |
+
# Join a DataFrame with a list containing both a Series and a DataFrame
|
| 383 |
+
left = float_frame.A.to_frame()
|
| 384 |
+
right = [float_frame.B, float_frame[["C", "D"]]]
|
| 385 |
+
result = left.join(right)
|
| 386 |
+
tm.assert_frame_equal(result, float_frame)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
@pytest.mark.parametrize("sort_kw", [True, False])
|
| 390 |
+
def test_suppress_future_warning_with_sort_kw(sort_kw):
|
| 391 |
+
a = DataFrame({"col1": [1, 2]}, index=["c", "a"])
|
| 392 |
+
|
| 393 |
+
b = DataFrame({"col2": [4, 5]}, index=["b", "a"])
|
| 394 |
+
|
| 395 |
+
c = DataFrame({"col3": [7, 8]}, index=["a", "b"])
|
| 396 |
+
|
| 397 |
+
expected = DataFrame(
|
| 398 |
+
{
|
| 399 |
+
"col1": {"a": 2.0, "b": float("nan"), "c": 1.0},
|
| 400 |
+
"col2": {"a": 5.0, "b": 4.0, "c": float("nan")},
|
| 401 |
+
"col3": {"a": 7.0, "b": 8.0, "c": float("nan")},
|
| 402 |
+
}
|
| 403 |
+
)
|
| 404 |
+
if sort_kw is False:
|
| 405 |
+
expected = expected.reindex(index=["c", "a", "b"])
|
| 406 |
+
|
| 407 |
+
with tm.assert_produces_warning(None):
|
| 408 |
+
result = a.join([b, c], how="outer", sort=sort_kw)
|
| 409 |
+
tm.assert_frame_equal(result, expected)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
class TestDataFrameJoin:
|
| 413 |
+
def test_join(self, multiindex_dataframe_random_data):
|
| 414 |
+
frame = multiindex_dataframe_random_data
|
| 415 |
+
|
| 416 |
+
a = frame.loc[frame.index[:5], ["A"]]
|
| 417 |
+
b = frame.loc[frame.index[2:], ["B", "C"]]
|
| 418 |
+
|
| 419 |
+
joined = a.join(b, how="outer").reindex(frame.index)
|
| 420 |
+
expected = frame.copy().values.copy()
|
| 421 |
+
expected[np.isnan(joined.values)] = np.nan
|
| 422 |
+
expected = DataFrame(expected, index=frame.index, columns=frame.columns)
|
| 423 |
+
|
| 424 |
+
assert not np.isnan(joined.values).all()
|
| 425 |
+
|
| 426 |
+
tm.assert_frame_equal(joined, expected)
|
| 427 |
+
|
| 428 |
+
def test_join_segfault(self):
|
| 429 |
+
# GH#1532
|
| 430 |
+
df1 = DataFrame({"a": [1, 1], "b": [1, 2], "x": [1, 2]})
|
| 431 |
+
df2 = DataFrame({"a": [2, 2], "b": [1, 2], "y": [1, 2]})
|
| 432 |
+
df1 = df1.set_index(["a", "b"])
|
| 433 |
+
df2 = df2.set_index(["a", "b"])
|
| 434 |
+
# it works!
|
| 435 |
+
for how in ["left", "right", "outer"]:
|
| 436 |
+
df1.join(df2, how=how)
|
| 437 |
+
|
| 438 |
+
def test_join_str_datetime(self):
|
| 439 |
+
str_dates = ["20120209", "20120222"]
|
| 440 |
+
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
|
| 441 |
+
|
| 442 |
+
A = DataFrame(str_dates, index=range(2), columns=["aa"])
|
| 443 |
+
C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
|
| 444 |
+
|
| 445 |
+
tst = A.join(C, on="aa")
|
| 446 |
+
|
| 447 |
+
assert len(tst.columns) == 3
|
| 448 |
+
|
| 449 |
+
def test_join_multiindex_leftright(self):
|
| 450 |
+
# GH 10741
|
| 451 |
+
df1 = DataFrame(
|
| 452 |
+
[
|
| 453 |
+
["a", "x", 0.471780],
|
| 454 |
+
["a", "y", 0.774908],
|
| 455 |
+
["a", "z", 0.563634],
|
| 456 |
+
["b", "x", -0.353756],
|
| 457 |
+
["b", "y", 0.368062],
|
| 458 |
+
["b", "z", -1.721840],
|
| 459 |
+
["c", "x", 1],
|
| 460 |
+
["c", "y", 2],
|
| 461 |
+
["c", "z", 3],
|
| 462 |
+
],
|
| 463 |
+
columns=["first", "second", "value1"],
|
| 464 |
+
).set_index(["first", "second"])
|
| 465 |
+
|
| 466 |
+
df2 = DataFrame([["a", 10], ["b", 20]], columns=["first", "value2"]).set_index(
|
| 467 |
+
["first"]
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
exp = DataFrame(
|
| 471 |
+
[
|
| 472 |
+
[0.471780, 10],
|
| 473 |
+
[0.774908, 10],
|
| 474 |
+
[0.563634, 10],
|
| 475 |
+
[-0.353756, 20],
|
| 476 |
+
[0.368062, 20],
|
| 477 |
+
[-1.721840, 20],
|
| 478 |
+
[1.000000, np.nan],
|
| 479 |
+
[2.000000, np.nan],
|
| 480 |
+
[3.000000, np.nan],
|
| 481 |
+
],
|
| 482 |
+
index=df1.index,
|
| 483 |
+
columns=["value1", "value2"],
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
# these must be the same results (but columns are flipped)
|
| 487 |
+
tm.assert_frame_equal(df1.join(df2, how="left"), exp)
|
| 488 |
+
tm.assert_frame_equal(df2.join(df1, how="right"), exp[["value2", "value1"]])
|
| 489 |
+
|
| 490 |
+
exp_idx = MultiIndex.from_product(
|
| 491 |
+
[["a", "b"], ["x", "y", "z"]], names=["first", "second"]
|
| 492 |
+
)
|
| 493 |
+
exp = DataFrame(
|
| 494 |
+
[
|
| 495 |
+
[0.471780, 10],
|
| 496 |
+
[0.774908, 10],
|
| 497 |
+
[0.563634, 10],
|
| 498 |
+
[-0.353756, 20],
|
| 499 |
+
[0.368062, 20],
|
| 500 |
+
[-1.721840, 20],
|
| 501 |
+
],
|
| 502 |
+
index=exp_idx,
|
| 503 |
+
columns=["value1", "value2"],
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
tm.assert_frame_equal(df1.join(df2, how="right"), exp)
|
| 507 |
+
tm.assert_frame_equal(df2.join(df1, how="left"), exp[["value2", "value1"]])
|
| 508 |
+
|
| 509 |
+
def test_join_multiindex_dates(self):
|
| 510 |
+
# GH 33692
|
| 511 |
+
date = pd.Timestamp(2000, 1, 1).date()
|
| 512 |
+
|
| 513 |
+
df1_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"])
|
| 514 |
+
df1 = DataFrame({"col1": [0]}, index=df1_index)
|
| 515 |
+
df2_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"])
|
| 516 |
+
df2 = DataFrame({"col2": [0]}, index=df2_index)
|
| 517 |
+
df3_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"])
|
| 518 |
+
df3 = DataFrame({"col3": [0]}, index=df3_index)
|
| 519 |
+
|
| 520 |
+
result = df1.join([df2, df3])
|
| 521 |
+
|
| 522 |
+
expected_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"])
|
| 523 |
+
expected = DataFrame(
|
| 524 |
+
{"col1": [0], "col2": [0], "col3": [0]}, index=expected_index
|
| 525 |
+
)
|
| 526 |
+
|
| 527 |
+
tm.assert_equal(result, expected)
|
| 528 |
+
|
| 529 |
+
def test_merge_join_different_levels_raises(self):
|
| 530 |
+
# GH#9455
|
| 531 |
+
# GH 40993: For raising, enforced in 2.0
|
| 532 |
+
|
| 533 |
+
# first dataframe
|
| 534 |
+
df1 = DataFrame(columns=["a", "b"], data=[[1, 11], [0, 22]])
|
| 535 |
+
|
| 536 |
+
# second dataframe
|
| 537 |
+
columns = MultiIndex.from_tuples([("a", ""), ("c", "c1")])
|
| 538 |
+
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
|
| 539 |
+
|
| 540 |
+
# merge
|
| 541 |
+
with pytest.raises(
|
| 542 |
+
MergeError, match="Not allowed to merge between different levels"
|
| 543 |
+
):
|
| 544 |
+
pd.merge(df1, df2, on="a")
|
| 545 |
+
|
| 546 |
+
# join, see discussion in GH#12219
|
| 547 |
+
with pytest.raises(
|
| 548 |
+
MergeError, match="Not allowed to merge between different levels"
|
| 549 |
+
):
|
| 550 |
+
df1.join(df2, on="a")
|
| 551 |
+
|
| 552 |
+
def test_frame_join_tzaware(self):
|
| 553 |
+
test1 = DataFrame(
|
| 554 |
+
np.zeros((6, 3)),
|
| 555 |
+
index=date_range(
|
| 556 |
+
"2012-11-15 00:00:00", periods=6, freq="100L", tz="US/Central"
|
| 557 |
+
),
|
| 558 |
+
)
|
| 559 |
+
test2 = DataFrame(
|
| 560 |
+
np.zeros((3, 3)),
|
| 561 |
+
index=date_range(
|
| 562 |
+
"2012-11-15 00:00:00", periods=3, freq="250L", tz="US/Central"
|
| 563 |
+
),
|
| 564 |
+
columns=range(3, 6),
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
result = test1.join(test2, how="outer")
|
| 568 |
+
expected = test1.index.union(test2.index)
|
| 569 |
+
|
| 570 |
+
tm.assert_index_equal(result.index, expected)
|
| 571 |
+
assert result.index.tz.zone == "US/Central"
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_matmul.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
DataFrame,
|
| 8 |
+
Index,
|
| 9 |
+
Series,
|
| 10 |
+
)
|
| 11 |
+
import pandas._testing as tm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestMatMul:
|
| 15 |
+
def test_matmul(self):
|
| 16 |
+
# matmul test is for GH#10259
|
| 17 |
+
a = DataFrame(
|
| 18 |
+
np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"]
|
| 19 |
+
)
|
| 20 |
+
b = DataFrame(
|
| 21 |
+
np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"]
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
# DataFrame @ DataFrame
|
| 25 |
+
result = operator.matmul(a, b)
|
| 26 |
+
expected = DataFrame(
|
| 27 |
+
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
|
| 28 |
+
)
|
| 29 |
+
tm.assert_frame_equal(result, expected)
|
| 30 |
+
|
| 31 |
+
# DataFrame @ Series
|
| 32 |
+
result = operator.matmul(a, b.one)
|
| 33 |
+
expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"])
|
| 34 |
+
tm.assert_series_equal(result, expected)
|
| 35 |
+
|
| 36 |
+
# np.array @ DataFrame
|
| 37 |
+
result = operator.matmul(a.values, b)
|
| 38 |
+
assert isinstance(result, DataFrame)
|
| 39 |
+
assert result.columns.equals(b.columns)
|
| 40 |
+
assert result.index.equals(Index(range(3)))
|
| 41 |
+
expected = np.dot(a.values, b.values)
|
| 42 |
+
tm.assert_almost_equal(result.values, expected)
|
| 43 |
+
|
| 44 |
+
# nested list @ DataFrame (__rmatmul__)
|
| 45 |
+
result = operator.matmul(a.values.tolist(), b)
|
| 46 |
+
expected = DataFrame(
|
| 47 |
+
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
|
| 48 |
+
)
|
| 49 |
+
tm.assert_almost_equal(result.values, expected.values)
|
| 50 |
+
|
| 51 |
+
# mixed dtype DataFrame @ DataFrame
|
| 52 |
+
a["q"] = a.q.round().astype(int)
|
| 53 |
+
result = operator.matmul(a, b)
|
| 54 |
+
expected = DataFrame(
|
| 55 |
+
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
|
| 56 |
+
)
|
| 57 |
+
tm.assert_frame_equal(result, expected)
|
| 58 |
+
|
| 59 |
+
# different dtypes DataFrame @ DataFrame
|
| 60 |
+
a = a.astype(int)
|
| 61 |
+
result = operator.matmul(a, b)
|
| 62 |
+
expected = DataFrame(
|
| 63 |
+
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
|
| 64 |
+
)
|
| 65 |
+
tm.assert_frame_equal(result, expected)
|
| 66 |
+
|
| 67 |
+
# unaligned
|
| 68 |
+
df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4))
|
| 69 |
+
df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3])
|
| 70 |
+
|
| 71 |
+
with pytest.raises(ValueError, match="aligned"):
|
| 72 |
+
operator.matmul(df, df2)
|
| 73 |
+
|
| 74 |
+
def test_matmul_message_shapes(self):
|
| 75 |
+
# GH#21581 exception message should reflect original shapes,
|
| 76 |
+
# not transposed shapes
|
| 77 |
+
a = np.random.rand(10, 4)
|
| 78 |
+
b = np.random.rand(5, 3)
|
| 79 |
+
|
| 80 |
+
df = DataFrame(b)
|
| 81 |
+
|
| 82 |
+
msg = r"shapes \(10, 4\) and \(5, 3\) not aligned"
|
| 83 |
+
with pytest.raises(ValueError, match=msg):
|
| 84 |
+
a @ df
|
| 85 |
+
with pytest.raises(ValueError, match=msg):
|
| 86 |
+
a.tolist() @ df
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_pct_change.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
DataFrame,
|
| 6 |
+
Series,
|
| 7 |
+
)
|
| 8 |
+
import pandas._testing as tm
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TestDataFramePctChange:
|
| 12 |
+
@pytest.mark.parametrize(
|
| 13 |
+
"periods,fill_method,limit,exp",
|
| 14 |
+
[
|
| 15 |
+
(1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
|
| 16 |
+
(1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
|
| 17 |
+
(1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
|
| 18 |
+
(1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
|
| 19 |
+
(-1, "ffill", None, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, 0, np.nan]),
|
| 20 |
+
(-1, "ffill", 1, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, np.nan, np.nan]),
|
| 21 |
+
(-1, "bfill", None, [0, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
|
| 22 |
+
(-1, "bfill", 1, [np.nan, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
|
| 23 |
+
],
|
| 24 |
+
)
|
| 25 |
+
def test_pct_change_with_nas(
|
| 26 |
+
self, periods, fill_method, limit, exp, frame_or_series
|
| 27 |
+
):
|
| 28 |
+
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
|
| 29 |
+
obj = frame_or_series(vals)
|
| 30 |
+
|
| 31 |
+
res = obj.pct_change(periods=periods, fill_method=fill_method, limit=limit)
|
| 32 |
+
tm.assert_equal(res, frame_or_series(exp))
|
| 33 |
+
|
| 34 |
+
def test_pct_change_numeric(self):
|
| 35 |
+
# GH#11150
|
| 36 |
+
pnl = DataFrame(
|
| 37 |
+
[np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(0, 40, 10)]
|
| 38 |
+
).astype(np.float64)
|
| 39 |
+
pnl.iat[1, 0] = np.nan
|
| 40 |
+
pnl.iat[1, 1] = np.nan
|
| 41 |
+
pnl.iat[2, 3] = 60
|
| 42 |
+
|
| 43 |
+
for axis in range(2):
|
| 44 |
+
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(axis=axis) - 1
|
| 45 |
+
result = pnl.pct_change(axis=axis, fill_method="pad")
|
| 46 |
+
|
| 47 |
+
tm.assert_frame_equal(result, expected)
|
| 48 |
+
|
| 49 |
+
def test_pct_change(self, datetime_frame):
|
| 50 |
+
rs = datetime_frame.pct_change(fill_method=None)
|
| 51 |
+
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
|
| 52 |
+
|
| 53 |
+
rs = datetime_frame.pct_change(2)
|
| 54 |
+
filled = datetime_frame.fillna(method="pad")
|
| 55 |
+
tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
|
| 56 |
+
|
| 57 |
+
rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
|
| 58 |
+
filled = datetime_frame.fillna(method="bfill", limit=1)
|
| 59 |
+
tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
|
| 60 |
+
|
| 61 |
+
rs = datetime_frame.pct_change(freq="5D")
|
| 62 |
+
filled = datetime_frame.fillna(method="pad")
|
| 63 |
+
tm.assert_frame_equal(
|
| 64 |
+
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
def test_pct_change_shift_over_nas(self):
|
| 68 |
+
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
|
| 69 |
+
|
| 70 |
+
df = DataFrame({"a": s, "b": s})
|
| 71 |
+
|
| 72 |
+
chg = df.pct_change()
|
| 73 |
+
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
|
| 74 |
+
edf = DataFrame({"a": expected, "b": expected})
|
| 75 |
+
tm.assert_frame_equal(chg, edf)
|
| 76 |
+
|
| 77 |
+
@pytest.mark.parametrize(
|
| 78 |
+
"freq, periods, fill_method, limit",
|
| 79 |
+
[
|
| 80 |
+
("5B", 5, None, None),
|
| 81 |
+
("3B", 3, None, None),
|
| 82 |
+
("3B", 3, "bfill", None),
|
| 83 |
+
("7B", 7, "pad", 1),
|
| 84 |
+
("7B", 7, "bfill", 3),
|
| 85 |
+
("14B", 14, None, None),
|
| 86 |
+
],
|
| 87 |
+
)
|
| 88 |
+
def test_pct_change_periods_freq(
|
| 89 |
+
self, datetime_frame, freq, periods, fill_method, limit
|
| 90 |
+
):
|
| 91 |
+
# GH#7292
|
| 92 |
+
rs_freq = datetime_frame.pct_change(
|
| 93 |
+
freq=freq, fill_method=fill_method, limit=limit
|
| 94 |
+
)
|
| 95 |
+
rs_periods = datetime_frame.pct_change(
|
| 96 |
+
periods, fill_method=fill_method, limit=limit
|
| 97 |
+
)
|
| 98 |
+
tm.assert_frame_equal(rs_freq, rs_periods)
|
| 99 |
+
|
| 100 |
+
empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
|
| 101 |
+
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
|
| 102 |
+
rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
|
| 103 |
+
tm.assert_frame_equal(rs_freq, rs_periods)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@pytest.mark.parametrize("fill_method", ["pad", "ffill", None])
|
| 107 |
+
def test_pct_change_with_duplicated_indices(fill_method):
|
| 108 |
+
# GH30463
|
| 109 |
+
data = DataFrame(
|
| 110 |
+
{0: [np.nan, 1, 2, 3, 9, 18], 1: [0, 1, np.nan, 3, 9, 18]}, index=["a", "b"] * 3
|
| 111 |
+
)
|
| 112 |
+
result = data.pct_change(fill_method=fill_method)
|
| 113 |
+
if fill_method is None:
|
| 114 |
+
second_column = [np.nan, np.inf, np.nan, np.nan, 2.0, 1.0]
|
| 115 |
+
else:
|
| 116 |
+
second_column = [np.nan, np.inf, 0.0, 2.0, 2.0, 1.0]
|
| 117 |
+
expected = DataFrame(
|
| 118 |
+
{0: [np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], 1: second_column},
|
| 119 |
+
index=["a", "b"] * 3,
|
| 120 |
+
)
|
| 121 |
+
tm.assert_frame_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_pipe.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from pandas import (
|
| 4 |
+
DataFrame,
|
| 5 |
+
Series,
|
| 6 |
+
)
|
| 7 |
+
import pandas._testing as tm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestPipe:
|
| 11 |
+
def test_pipe(self, frame_or_series):
|
| 12 |
+
obj = DataFrame({"A": [1, 2, 3]})
|
| 13 |
+
expected = DataFrame({"A": [1, 4, 9]})
|
| 14 |
+
if frame_or_series is Series:
|
| 15 |
+
obj = obj["A"]
|
| 16 |
+
expected = expected["A"]
|
| 17 |
+
|
| 18 |
+
f = lambda x, y: x**y
|
| 19 |
+
result = obj.pipe(f, 2)
|
| 20 |
+
tm.assert_equal(result, expected)
|
| 21 |
+
|
| 22 |
+
def test_pipe_tuple(self, frame_or_series):
|
| 23 |
+
obj = DataFrame({"A": [1, 2, 3]})
|
| 24 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 25 |
+
|
| 26 |
+
f = lambda x, y: y
|
| 27 |
+
result = obj.pipe((f, "y"), 0)
|
| 28 |
+
tm.assert_equal(result, obj)
|
| 29 |
+
|
| 30 |
+
def test_pipe_tuple_error(self, frame_or_series):
|
| 31 |
+
obj = DataFrame({"A": [1, 2, 3]})
|
| 32 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 33 |
+
|
| 34 |
+
f = lambda x, y: y
|
| 35 |
+
|
| 36 |
+
msg = "y is both the pipe target and a keyword argument"
|
| 37 |
+
|
| 38 |
+
with pytest.raises(ValueError, match=msg):
|
| 39 |
+
obj.pipe((f, "y"), x=1, y=0)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_pop.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from pandas import (
|
| 4 |
+
DataFrame,
|
| 5 |
+
MultiIndex,
|
| 6 |
+
Series,
|
| 7 |
+
)
|
| 8 |
+
import pandas._testing as tm
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TestDataFramePop:
|
| 12 |
+
def test_pop(self, float_frame):
|
| 13 |
+
float_frame.columns.name = "baz"
|
| 14 |
+
|
| 15 |
+
float_frame.pop("A")
|
| 16 |
+
assert "A" not in float_frame
|
| 17 |
+
|
| 18 |
+
float_frame["foo"] = "bar"
|
| 19 |
+
float_frame.pop("foo")
|
| 20 |
+
assert "foo" not in float_frame
|
| 21 |
+
assert float_frame.columns.name == "baz"
|
| 22 |
+
|
| 23 |
+
# gh-10912: inplace ops cause caching issue
|
| 24 |
+
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"], index=["X", "Y"])
|
| 25 |
+
b = a.pop("B")
|
| 26 |
+
b += 1
|
| 27 |
+
|
| 28 |
+
# original frame
|
| 29 |
+
expected = DataFrame([[1, 3], [4, 6]], columns=["A", "C"], index=["X", "Y"])
|
| 30 |
+
tm.assert_frame_equal(a, expected)
|
| 31 |
+
|
| 32 |
+
# result
|
| 33 |
+
expected = Series([2, 5], index=["X", "Y"], name="B") + 1
|
| 34 |
+
tm.assert_series_equal(b, expected)
|
| 35 |
+
|
| 36 |
+
def test_pop_non_unique_cols(self):
|
| 37 |
+
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
|
| 38 |
+
df.columns = ["a", "b", "a"]
|
| 39 |
+
|
| 40 |
+
res = df.pop("a")
|
| 41 |
+
assert type(res) == DataFrame
|
| 42 |
+
assert len(res) == 2
|
| 43 |
+
assert len(df.columns) == 1
|
| 44 |
+
assert "b" in df.columns
|
| 45 |
+
assert "a" not in df.columns
|
| 46 |
+
assert len(df.index) == 2
|
| 47 |
+
|
| 48 |
+
def test_mixed_depth_pop(self):
|
| 49 |
+
arrays = [
|
| 50 |
+
["a", "top", "top", "routine1", "routine1", "routine2"],
|
| 51 |
+
["", "OD", "OD", "result1", "result2", "result1"],
|
| 52 |
+
["", "wx", "wy", "", "", ""],
|
| 53 |
+
]
|
| 54 |
+
|
| 55 |
+
tuples = sorted(zip(*arrays))
|
| 56 |
+
index = MultiIndex.from_tuples(tuples)
|
| 57 |
+
df = DataFrame(np.random.randn(4, 6), columns=index)
|
| 58 |
+
|
| 59 |
+
df1 = df.copy()
|
| 60 |
+
df2 = df.copy()
|
| 61 |
+
result = df1.pop("a")
|
| 62 |
+
expected = df2.pop(("a", "", ""))
|
| 63 |
+
tm.assert_series_equal(expected, result, check_names=False)
|
| 64 |
+
tm.assert_frame_equal(df1, df2)
|
| 65 |
+
assert result.name == "a"
|
| 66 |
+
|
| 67 |
+
expected = df1["top"]
|
| 68 |
+
df1 = df1.drop(["top"], axis=1)
|
| 69 |
+
result = df2.pop("top")
|
| 70 |
+
tm.assert_frame_equal(expected, result)
|
| 71 |
+
tm.assert_frame_equal(df1, df2)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_quantile.py
ADDED
|
@@ -0,0 +1,989 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas.compat.numpy import (
|
| 5 |
+
np_percentile_argname,
|
| 6 |
+
np_version_under1p21,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from pandas import (
|
| 11 |
+
DataFrame,
|
| 12 |
+
Index,
|
| 13 |
+
Series,
|
| 14 |
+
Timestamp,
|
| 15 |
+
)
|
| 16 |
+
import pandas._testing as tm
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@pytest.fixture(
|
| 20 |
+
params=[["linear", "single"], ["nearest", "table"]], ids=lambda x: "-".join(x)
|
| 21 |
+
)
|
| 22 |
+
def interp_method(request):
|
| 23 |
+
"""(interpolation, method) arguments for quantile"""
|
| 24 |
+
return request.param
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class TestDataFrameQuantile:
|
| 28 |
+
@pytest.mark.parametrize(
|
| 29 |
+
"df,expected",
|
| 30 |
+
[
|
| 31 |
+
[
|
| 32 |
+
DataFrame(
|
| 33 |
+
{
|
| 34 |
+
0: Series(pd.arrays.SparseArray([1, 2])),
|
| 35 |
+
1: Series(pd.arrays.SparseArray([3, 4])),
|
| 36 |
+
}
|
| 37 |
+
),
|
| 38 |
+
Series([1.5, 3.5], name=0.5),
|
| 39 |
+
],
|
| 40 |
+
[
|
| 41 |
+
DataFrame(Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")),
|
| 42 |
+
Series([1.0], name=0.5),
|
| 43 |
+
],
|
| 44 |
+
],
|
| 45 |
+
)
|
| 46 |
+
def test_quantile_sparse(self, df, expected):
|
| 47 |
+
# GH#17198
|
| 48 |
+
# GH#24600
|
| 49 |
+
result = df.quantile()
|
| 50 |
+
expected = expected.astype("Sparse[float]")
|
| 51 |
+
tm.assert_series_equal(result, expected)
|
| 52 |
+
|
| 53 |
+
def test_quantile(
|
| 54 |
+
self, datetime_frame, interp_method, using_array_manager, request
|
| 55 |
+
):
|
| 56 |
+
interpolation, method = interp_method
|
| 57 |
+
df = datetime_frame
|
| 58 |
+
result = df.quantile(
|
| 59 |
+
0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method
|
| 60 |
+
)
|
| 61 |
+
expected = Series(
|
| 62 |
+
[np.percentile(df[col], 10) for col in df.columns],
|
| 63 |
+
index=df.columns,
|
| 64 |
+
name=0.1,
|
| 65 |
+
)
|
| 66 |
+
if interpolation == "linear":
|
| 67 |
+
# np.percentile values only comparable to linear interpolation
|
| 68 |
+
tm.assert_series_equal(result, expected)
|
| 69 |
+
else:
|
| 70 |
+
tm.assert_index_equal(result.index, expected.index)
|
| 71 |
+
request.node.add_marker(
|
| 72 |
+
pytest.mark.xfail(
|
| 73 |
+
using_array_manager, reason="Name set incorrectly for arraymanager"
|
| 74 |
+
)
|
| 75 |
+
)
|
| 76 |
+
assert result.name == expected.name
|
| 77 |
+
|
| 78 |
+
result = df.quantile(
|
| 79 |
+
0.9, axis=1, numeric_only=True, interpolation=interpolation, method=method
|
| 80 |
+
)
|
| 81 |
+
expected = Series(
|
| 82 |
+
[np.percentile(df.loc[date], 90) for date in df.index],
|
| 83 |
+
index=df.index,
|
| 84 |
+
name=0.9,
|
| 85 |
+
)
|
| 86 |
+
if interpolation == "linear":
|
| 87 |
+
# np.percentile values only comparable to linear interpolation
|
| 88 |
+
tm.assert_series_equal(result, expected)
|
| 89 |
+
else:
|
| 90 |
+
tm.assert_index_equal(result.index, expected.index)
|
| 91 |
+
request.node.add_marker(
|
| 92 |
+
pytest.mark.xfail(
|
| 93 |
+
using_array_manager, reason="Name set incorrectly for arraymanager"
|
| 94 |
+
)
|
| 95 |
+
)
|
| 96 |
+
assert result.name == expected.name
|
| 97 |
+
|
| 98 |
+
def test_empty(self, interp_method):
|
| 99 |
+
interpolation, method = interp_method
|
| 100 |
+
q = DataFrame({"x": [], "y": []}).quantile(
|
| 101 |
+
0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method
|
| 102 |
+
)
|
| 103 |
+
assert np.isnan(q["x"]) and np.isnan(q["y"])
|
| 104 |
+
|
| 105 |
+
def test_non_numeric_exclusion(self, interp_method, request, using_array_manager):
|
| 106 |
+
interpolation, method = interp_method
|
| 107 |
+
df = DataFrame({"col1": ["A", "A", "B", "B"], "col2": [1, 2, 3, 4]})
|
| 108 |
+
rs = df.quantile(
|
| 109 |
+
0.5, numeric_only=True, interpolation=interpolation, method=method
|
| 110 |
+
)
|
| 111 |
+
xp = df.median(numeric_only=True).rename(0.5)
|
| 112 |
+
if interpolation == "nearest":
|
| 113 |
+
xp = (xp + 0.5).astype(np.int64)
|
| 114 |
+
if method == "table" and using_array_manager:
|
| 115 |
+
request.node.add_marker(
|
| 116 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 117 |
+
)
|
| 118 |
+
tm.assert_series_equal(rs, xp)
|
| 119 |
+
|
| 120 |
+
def test_axis(self, interp_method, request, using_array_manager):
|
| 121 |
+
# axis
|
| 122 |
+
interpolation, method = interp_method
|
| 123 |
+
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
|
| 124 |
+
result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method)
|
| 125 |
+
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
|
| 126 |
+
if interpolation == "nearest":
|
| 127 |
+
expected = expected.astype(np.int64)
|
| 128 |
+
if method == "table" and using_array_manager:
|
| 129 |
+
request.node.add_marker(
|
| 130 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 131 |
+
)
|
| 132 |
+
tm.assert_series_equal(result, expected)
|
| 133 |
+
|
| 134 |
+
result = df.quantile(
|
| 135 |
+
[0.5, 0.75], axis=1, interpolation=interpolation, method=method
|
| 136 |
+
)
|
| 137 |
+
expected = DataFrame(
|
| 138 |
+
{1: [1.5, 1.75], 2: [2.5, 2.75], 3: [3.5, 3.75]}, index=[0.5, 0.75]
|
| 139 |
+
)
|
| 140 |
+
if interpolation == "nearest":
|
| 141 |
+
expected.iloc[0, :] -= 0.5
|
| 142 |
+
expected.iloc[1, :] += 0.25
|
| 143 |
+
expected = expected.astype(np.int64)
|
| 144 |
+
tm.assert_frame_equal(result, expected, check_index_type=True)
|
| 145 |
+
|
| 146 |
+
def test_axis_numeric_only_true(self, interp_method, request, using_array_manager):
|
| 147 |
+
# We may want to break API in the future to change this
|
| 148 |
+
# so that we exclude non-numeric along the same axis
|
| 149 |
+
# See GH #7312
|
| 150 |
+
interpolation, method = interp_method
|
| 151 |
+
df = DataFrame([[1, 2, 3], ["a", "b", 4]])
|
| 152 |
+
result = df.quantile(
|
| 153 |
+
0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method
|
| 154 |
+
)
|
| 155 |
+
expected = Series([3.0, 4.0], index=[0, 1], name=0.5)
|
| 156 |
+
if interpolation == "nearest":
|
| 157 |
+
expected = expected.astype(np.int64)
|
| 158 |
+
if method == "table" and using_array_manager:
|
| 159 |
+
request.node.add_marker(
|
| 160 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 161 |
+
)
|
| 162 |
+
tm.assert_series_equal(result, expected)
|
| 163 |
+
|
| 164 |
+
def test_quantile_date_range(self, interp_method, request, using_array_manager):
|
| 165 |
+
# GH 2460
|
| 166 |
+
interpolation, method = interp_method
|
| 167 |
+
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
|
| 168 |
+
ser = Series(dti)
|
| 169 |
+
df = DataFrame(ser)
|
| 170 |
+
|
| 171 |
+
result = df.quantile(
|
| 172 |
+
numeric_only=False, interpolation=interpolation, method=method
|
| 173 |
+
)
|
| 174 |
+
expected = Series(
|
| 175 |
+
["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]"
|
| 176 |
+
)
|
| 177 |
+
if method == "table" and using_array_manager:
|
| 178 |
+
request.node.add_marker(
|
| 179 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
tm.assert_series_equal(result, expected)
|
| 183 |
+
|
| 184 |
+
def test_quantile_axis_mixed(self, interp_method, request, using_array_manager):
|
| 185 |
+
# mixed on axis=1
|
| 186 |
+
interpolation, method = interp_method
|
| 187 |
+
df = DataFrame(
|
| 188 |
+
{
|
| 189 |
+
"A": [1, 2, 3],
|
| 190 |
+
"B": [2.0, 3.0, 4.0],
|
| 191 |
+
"C": pd.date_range("20130101", periods=3),
|
| 192 |
+
"D": ["foo", "bar", "baz"],
|
| 193 |
+
}
|
| 194 |
+
)
|
| 195 |
+
result = df.quantile(
|
| 196 |
+
0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method
|
| 197 |
+
)
|
| 198 |
+
expected = Series([1.5, 2.5, 3.5], name=0.5)
|
| 199 |
+
if interpolation == "nearest":
|
| 200 |
+
expected -= 0.5
|
| 201 |
+
if method == "table" and using_array_manager:
|
| 202 |
+
request.node.add_marker(
|
| 203 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 204 |
+
)
|
| 205 |
+
tm.assert_series_equal(result, expected)
|
| 206 |
+
|
| 207 |
+
# must raise
|
| 208 |
+
msg = "'<' not supported between instances of 'Timestamp' and 'float'"
|
| 209 |
+
with pytest.raises(TypeError, match=msg):
|
| 210 |
+
df.quantile(0.5, axis=1, numeric_only=False)
|
| 211 |
+
|
| 212 |
+
def test_quantile_axis_parameter(self, interp_method, request, using_array_manager):
|
| 213 |
+
# GH 9543/9544
|
| 214 |
+
interpolation, method = interp_method
|
| 215 |
+
if method == "table" and using_array_manager:
|
| 216 |
+
request.node.add_marker(
|
| 217 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 218 |
+
)
|
| 219 |
+
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
|
| 220 |
+
|
| 221 |
+
result = df.quantile(0.5, axis=0, interpolation=interpolation, method=method)
|
| 222 |
+
|
| 223 |
+
expected = Series([2.0, 3.0], index=["A", "B"], name=0.5)
|
| 224 |
+
if interpolation == "nearest":
|
| 225 |
+
expected = expected.astype(np.int64)
|
| 226 |
+
tm.assert_series_equal(result, expected)
|
| 227 |
+
|
| 228 |
+
expected = df.quantile(
|
| 229 |
+
0.5, axis="index", interpolation=interpolation, method=method
|
| 230 |
+
)
|
| 231 |
+
if interpolation == "nearest":
|
| 232 |
+
expected = expected.astype(np.int64)
|
| 233 |
+
tm.assert_series_equal(result, expected)
|
| 234 |
+
|
| 235 |
+
result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method)
|
| 236 |
+
|
| 237 |
+
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
|
| 238 |
+
if interpolation == "nearest":
|
| 239 |
+
expected = expected.astype(np.int64)
|
| 240 |
+
tm.assert_series_equal(result, expected)
|
| 241 |
+
|
| 242 |
+
result = df.quantile(
|
| 243 |
+
0.5, axis="columns", interpolation=interpolation, method=method
|
| 244 |
+
)
|
| 245 |
+
tm.assert_series_equal(result, expected)
|
| 246 |
+
|
| 247 |
+
msg = "No axis named -1 for object type DataFrame"
|
| 248 |
+
with pytest.raises(ValueError, match=msg):
|
| 249 |
+
df.quantile(0.1, axis=-1, interpolation=interpolation, method=method)
|
| 250 |
+
msg = "No axis named column for object type DataFrame"
|
| 251 |
+
with pytest.raises(ValueError, match=msg):
|
| 252 |
+
df.quantile(0.1, axis="column")
|
| 253 |
+
|
| 254 |
+
def test_quantile_interpolation(self):
|
| 255 |
+
# see gh-10174
|
| 256 |
+
|
| 257 |
+
# interpolation method other than default linear
|
| 258 |
+
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
|
| 259 |
+
result = df.quantile(0.5, axis=1, interpolation="nearest")
|
| 260 |
+
expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5)
|
| 261 |
+
tm.assert_series_equal(result, expected)
|
| 262 |
+
|
| 263 |
+
# cross-check interpolation=nearest results in original dtype
|
| 264 |
+
exp = np.percentile(
|
| 265 |
+
np.array([[1, 2, 3], [2, 3, 4]]),
|
| 266 |
+
0.5,
|
| 267 |
+
axis=0,
|
| 268 |
+
**{np_percentile_argname: "nearest"},
|
| 269 |
+
)
|
| 270 |
+
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="int64")
|
| 271 |
+
tm.assert_series_equal(result, expected)
|
| 272 |
+
|
| 273 |
+
# float
|
| 274 |
+
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": [2.0, 3.0, 4.0]}, index=[1, 2, 3])
|
| 275 |
+
result = df.quantile(0.5, axis=1, interpolation="nearest")
|
| 276 |
+
expected = Series([1.0, 2.0, 3.0], index=[1, 2, 3], name=0.5)
|
| 277 |
+
tm.assert_series_equal(result, expected)
|
| 278 |
+
exp = np.percentile(
|
| 279 |
+
np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]),
|
| 280 |
+
0.5,
|
| 281 |
+
axis=0,
|
| 282 |
+
**{np_percentile_argname: "nearest"},
|
| 283 |
+
)
|
| 284 |
+
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="float64")
|
| 285 |
+
tm.assert_series_equal(result, expected)
|
| 286 |
+
|
| 287 |
+
# axis
|
| 288 |
+
result = df.quantile([0.5, 0.75], axis=1, interpolation="lower")
|
| 289 |
+
expected = DataFrame(
|
| 290 |
+
{1: [1.0, 1.0], 2: [2.0, 2.0], 3: [3.0, 3.0]}, index=[0.5, 0.75]
|
| 291 |
+
)
|
| 292 |
+
tm.assert_frame_equal(result, expected)
|
| 293 |
+
|
| 294 |
+
# test degenerate case
|
| 295 |
+
df = DataFrame({"x": [], "y": []})
|
| 296 |
+
q = df.quantile(0.1, axis=0, interpolation="higher")
|
| 297 |
+
assert np.isnan(q["x"]) and np.isnan(q["y"])
|
| 298 |
+
|
| 299 |
+
# multi
|
| 300 |
+
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"])
|
| 301 |
+
result = df.quantile([0.25, 0.5], interpolation="midpoint")
|
| 302 |
+
|
| 303 |
+
# https://github.com/numpy/numpy/issues/7163
|
| 304 |
+
expected = DataFrame(
|
| 305 |
+
[[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
|
| 306 |
+
index=[0.25, 0.5],
|
| 307 |
+
columns=["a", "b", "c"],
|
| 308 |
+
)
|
| 309 |
+
tm.assert_frame_equal(result, expected)
|
| 310 |
+
|
| 311 |
+
def test_quantile_interpolation_datetime(self, datetime_frame):
|
| 312 |
+
# see gh-10174
|
| 313 |
+
|
| 314 |
+
# interpolation = linear (default case)
|
| 315 |
+
df = datetime_frame
|
| 316 |
+
q = df.quantile(0.1, axis=0, numeric_only=True, interpolation="linear")
|
| 317 |
+
assert q["A"] == np.percentile(df["A"], 10)
|
| 318 |
+
|
| 319 |
+
def test_quantile_interpolation_int(self, int_frame):
|
| 320 |
+
# see gh-10174
|
| 321 |
+
|
| 322 |
+
df = int_frame
|
| 323 |
+
# interpolation = linear (default case)
|
| 324 |
+
q = df.quantile(0.1)
|
| 325 |
+
assert q["A"] == np.percentile(df["A"], 10)
|
| 326 |
+
|
| 327 |
+
# test with and without interpolation keyword
|
| 328 |
+
q1 = df.quantile(0.1, axis=0, interpolation="linear")
|
| 329 |
+
assert q1["A"] == np.percentile(df["A"], 10)
|
| 330 |
+
tm.assert_series_equal(q, q1)
|
| 331 |
+
|
| 332 |
+
def test_quantile_multi(self, interp_method, request, using_array_manager):
|
| 333 |
+
interpolation, method = interp_method
|
| 334 |
+
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"])
|
| 335 |
+
result = df.quantile([0.25, 0.5], interpolation=interpolation, method=method)
|
| 336 |
+
expected = DataFrame(
|
| 337 |
+
[[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
|
| 338 |
+
index=[0.25, 0.5],
|
| 339 |
+
columns=["a", "b", "c"],
|
| 340 |
+
)
|
| 341 |
+
if interpolation == "nearest":
|
| 342 |
+
expected = expected.astype(np.int64)
|
| 343 |
+
if method == "table" and using_array_manager:
|
| 344 |
+
request.node.add_marker(
|
| 345 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 346 |
+
)
|
| 347 |
+
tm.assert_frame_equal(result, expected)
|
| 348 |
+
|
| 349 |
+
def test_quantile_multi_axis_1(self, interp_method, request, using_array_manager):
|
| 350 |
+
interpolation, method = interp_method
|
| 351 |
+
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"])
|
| 352 |
+
result = df.quantile(
|
| 353 |
+
[0.25, 0.5], axis=1, interpolation=interpolation, method=method
|
| 354 |
+
)
|
| 355 |
+
expected = DataFrame(
|
| 356 |
+
[[1.0, 2.0, 3.0]] * 2, index=[0.25, 0.5], columns=[0, 1, 2]
|
| 357 |
+
)
|
| 358 |
+
if interpolation == "nearest":
|
| 359 |
+
expected = expected.astype(np.int64)
|
| 360 |
+
if method == "table" and using_array_manager:
|
| 361 |
+
request.node.add_marker(
|
| 362 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 363 |
+
)
|
| 364 |
+
tm.assert_frame_equal(result, expected)
|
| 365 |
+
|
| 366 |
+
def test_quantile_multi_empty(self, interp_method):
|
| 367 |
+
interpolation, method = interp_method
|
| 368 |
+
result = DataFrame({"x": [], "y": []}).quantile(
|
| 369 |
+
[0.1, 0.9], axis=0, interpolation=interpolation, method=method
|
| 370 |
+
)
|
| 371 |
+
expected = DataFrame(
|
| 372 |
+
{"x": [np.nan, np.nan], "y": [np.nan, np.nan]}, index=[0.1, 0.9]
|
| 373 |
+
)
|
| 374 |
+
tm.assert_frame_equal(result, expected)
|
| 375 |
+
|
| 376 |
+
def test_quantile_datetime(self):
|
| 377 |
+
df = DataFrame({"a": pd.to_datetime(["2010", "2011"]), "b": [0, 5]})
|
| 378 |
+
|
| 379 |
+
# exclude datetime
|
| 380 |
+
result = df.quantile(0.5, numeric_only=True)
|
| 381 |
+
expected = Series([2.5], index=["b"], name=0.5)
|
| 382 |
+
tm.assert_series_equal(result, expected)
|
| 383 |
+
|
| 384 |
+
# datetime
|
| 385 |
+
result = df.quantile(0.5, numeric_only=False)
|
| 386 |
+
expected = Series(
|
| 387 |
+
[Timestamp("2010-07-02 12:00:00"), 2.5], index=["a", "b"], name=0.5
|
| 388 |
+
)
|
| 389 |
+
tm.assert_series_equal(result, expected)
|
| 390 |
+
|
| 391 |
+
# datetime w/ multi
|
| 392 |
+
result = df.quantile([0.5], numeric_only=False)
|
| 393 |
+
expected = DataFrame(
|
| 394 |
+
[[Timestamp("2010-07-02 12:00:00"), 2.5]], index=[0.5], columns=["a", "b"]
|
| 395 |
+
)
|
| 396 |
+
tm.assert_frame_equal(result, expected)
|
| 397 |
+
|
| 398 |
+
# axis = 1
|
| 399 |
+
df["c"] = pd.to_datetime(["2011", "2012"])
|
| 400 |
+
result = df[["a", "c"]].quantile(0.5, axis=1, numeric_only=False)
|
| 401 |
+
expected = Series(
|
| 402 |
+
[Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")],
|
| 403 |
+
index=[0, 1],
|
| 404 |
+
name=0.5,
|
| 405 |
+
)
|
| 406 |
+
tm.assert_series_equal(result, expected)
|
| 407 |
+
|
| 408 |
+
result = df[["a", "c"]].quantile([0.5], axis=1, numeric_only=False)
|
| 409 |
+
expected = DataFrame(
|
| 410 |
+
[[Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")]],
|
| 411 |
+
index=[0.5],
|
| 412 |
+
columns=[0, 1],
|
| 413 |
+
)
|
| 414 |
+
tm.assert_frame_equal(result, expected)
|
| 415 |
+
|
| 416 |
+
# empty when numeric_only=True
|
| 417 |
+
result = df[["a", "c"]].quantile(0.5, numeric_only=True)
|
| 418 |
+
expected = Series([], index=[], dtype=np.float64, name=0.5)
|
| 419 |
+
tm.assert_series_equal(result, expected)
|
| 420 |
+
|
| 421 |
+
result = df[["a", "c"]].quantile([0.5], numeric_only=True)
|
| 422 |
+
expected = DataFrame(index=[0.5], columns=[])
|
| 423 |
+
tm.assert_frame_equal(result, expected)
|
| 424 |
+
|
| 425 |
+
@pytest.mark.parametrize(
|
| 426 |
+
"dtype",
|
| 427 |
+
[
|
| 428 |
+
"datetime64[ns]",
|
| 429 |
+
"datetime64[ns, US/Pacific]",
|
| 430 |
+
"timedelta64[ns]",
|
| 431 |
+
"Period[D]",
|
| 432 |
+
],
|
| 433 |
+
)
|
| 434 |
+
def test_quantile_dt64_empty(self, dtype, interp_method):
|
| 435 |
+
# GH#41544
|
| 436 |
+
interpolation, method = interp_method
|
| 437 |
+
df = DataFrame(columns=["a", "b"], dtype=dtype)
|
| 438 |
+
|
| 439 |
+
res = df.quantile(
|
| 440 |
+
0.5, axis=1, numeric_only=False, interpolation=interpolation, method=method
|
| 441 |
+
)
|
| 442 |
+
expected = Series([], index=[], name=0.5, dtype=dtype)
|
| 443 |
+
tm.assert_series_equal(res, expected)
|
| 444 |
+
|
| 445 |
+
# no columns in result, so no dtype preservation
|
| 446 |
+
res = df.quantile(
|
| 447 |
+
[0.5],
|
| 448 |
+
axis=1,
|
| 449 |
+
numeric_only=False,
|
| 450 |
+
interpolation=interpolation,
|
| 451 |
+
method=method,
|
| 452 |
+
)
|
| 453 |
+
expected = DataFrame(index=[0.5], columns=[])
|
| 454 |
+
tm.assert_frame_equal(res, expected)
|
| 455 |
+
|
| 456 |
+
@pytest.mark.parametrize("invalid", [-1, 2, [0.5, -1], [0.5, 2]])
|
| 457 |
+
def test_quantile_invalid(self, invalid, datetime_frame, interp_method):
|
| 458 |
+
msg = "percentiles should all be in the interval \\[0, 1\\]"
|
| 459 |
+
interpolation, method = interp_method
|
| 460 |
+
with pytest.raises(ValueError, match=msg):
|
| 461 |
+
datetime_frame.quantile(invalid, interpolation=interpolation, method=method)
|
| 462 |
+
|
| 463 |
+
def test_quantile_box(self, interp_method, request, using_array_manager):
|
| 464 |
+
interpolation, method = interp_method
|
| 465 |
+
if method == "table" and using_array_manager:
|
| 466 |
+
request.node.add_marker(
|
| 467 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 468 |
+
)
|
| 469 |
+
df = DataFrame(
|
| 470 |
+
{
|
| 471 |
+
"A": [
|
| 472 |
+
Timestamp("2011-01-01"),
|
| 473 |
+
Timestamp("2011-01-02"),
|
| 474 |
+
Timestamp("2011-01-03"),
|
| 475 |
+
],
|
| 476 |
+
"B": [
|
| 477 |
+
Timestamp("2011-01-01", tz="US/Eastern"),
|
| 478 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
| 479 |
+
Timestamp("2011-01-03", tz="US/Eastern"),
|
| 480 |
+
],
|
| 481 |
+
"C": [
|
| 482 |
+
pd.Timedelta("1 days"),
|
| 483 |
+
pd.Timedelta("2 days"),
|
| 484 |
+
pd.Timedelta("3 days"),
|
| 485 |
+
],
|
| 486 |
+
}
|
| 487 |
+
)
|
| 488 |
+
|
| 489 |
+
res = df.quantile(
|
| 490 |
+
0.5, numeric_only=False, interpolation=interpolation, method=method
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
exp = Series(
|
| 494 |
+
[
|
| 495 |
+
Timestamp("2011-01-02"),
|
| 496 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
| 497 |
+
pd.Timedelta("2 days"),
|
| 498 |
+
],
|
| 499 |
+
name=0.5,
|
| 500 |
+
index=["A", "B", "C"],
|
| 501 |
+
)
|
| 502 |
+
tm.assert_series_equal(res, exp)
|
| 503 |
+
|
| 504 |
+
res = df.quantile(
|
| 505 |
+
[0.5], numeric_only=False, interpolation=interpolation, method=method
|
| 506 |
+
)
|
| 507 |
+
exp = DataFrame(
|
| 508 |
+
[
|
| 509 |
+
[
|
| 510 |
+
Timestamp("2011-01-02"),
|
| 511 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
| 512 |
+
pd.Timedelta("2 days"),
|
| 513 |
+
]
|
| 514 |
+
],
|
| 515 |
+
index=[0.5],
|
| 516 |
+
columns=["A", "B", "C"],
|
| 517 |
+
)
|
| 518 |
+
tm.assert_frame_equal(res, exp)
|
| 519 |
+
|
| 520 |
+
def test_quantile_box_nat(self):
|
| 521 |
+
# DatetimeLikeBlock may be consolidated and contain NaT in different loc
|
| 522 |
+
df = DataFrame(
|
| 523 |
+
{
|
| 524 |
+
"A": [
|
| 525 |
+
Timestamp("2011-01-01"),
|
| 526 |
+
pd.NaT,
|
| 527 |
+
Timestamp("2011-01-02"),
|
| 528 |
+
Timestamp("2011-01-03"),
|
| 529 |
+
],
|
| 530 |
+
"a": [
|
| 531 |
+
Timestamp("2011-01-01"),
|
| 532 |
+
Timestamp("2011-01-02"),
|
| 533 |
+
pd.NaT,
|
| 534 |
+
Timestamp("2011-01-03"),
|
| 535 |
+
],
|
| 536 |
+
"B": [
|
| 537 |
+
Timestamp("2011-01-01", tz="US/Eastern"),
|
| 538 |
+
pd.NaT,
|
| 539 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
| 540 |
+
Timestamp("2011-01-03", tz="US/Eastern"),
|
| 541 |
+
],
|
| 542 |
+
"b": [
|
| 543 |
+
Timestamp("2011-01-01", tz="US/Eastern"),
|
| 544 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
| 545 |
+
pd.NaT,
|
| 546 |
+
Timestamp("2011-01-03", tz="US/Eastern"),
|
| 547 |
+
],
|
| 548 |
+
"C": [
|
| 549 |
+
pd.Timedelta("1 days"),
|
| 550 |
+
pd.Timedelta("2 days"),
|
| 551 |
+
pd.Timedelta("3 days"),
|
| 552 |
+
pd.NaT,
|
| 553 |
+
],
|
| 554 |
+
"c": [
|
| 555 |
+
pd.NaT,
|
| 556 |
+
pd.Timedelta("1 days"),
|
| 557 |
+
pd.Timedelta("2 days"),
|
| 558 |
+
pd.Timedelta("3 days"),
|
| 559 |
+
],
|
| 560 |
+
},
|
| 561 |
+
columns=list("AaBbCc"),
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
res = df.quantile(0.5, numeric_only=False)
|
| 565 |
+
exp = Series(
|
| 566 |
+
[
|
| 567 |
+
Timestamp("2011-01-02"),
|
| 568 |
+
Timestamp("2011-01-02"),
|
| 569 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
| 570 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
| 571 |
+
pd.Timedelta("2 days"),
|
| 572 |
+
pd.Timedelta("2 days"),
|
| 573 |
+
],
|
| 574 |
+
name=0.5,
|
| 575 |
+
index=list("AaBbCc"),
|
| 576 |
+
)
|
| 577 |
+
tm.assert_series_equal(res, exp)
|
| 578 |
+
|
| 579 |
+
res = df.quantile([0.5], numeric_only=False)
|
| 580 |
+
exp = DataFrame(
|
| 581 |
+
[
|
| 582 |
+
[
|
| 583 |
+
Timestamp("2011-01-02"),
|
| 584 |
+
Timestamp("2011-01-02"),
|
| 585 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
| 586 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
| 587 |
+
pd.Timedelta("2 days"),
|
| 588 |
+
pd.Timedelta("2 days"),
|
| 589 |
+
]
|
| 590 |
+
],
|
| 591 |
+
index=[0.5],
|
| 592 |
+
columns=list("AaBbCc"),
|
| 593 |
+
)
|
| 594 |
+
tm.assert_frame_equal(res, exp)
|
| 595 |
+
|
| 596 |
+
def test_quantile_nan(self, interp_method, request, using_array_manager):
|
| 597 |
+
interpolation, method = interp_method
|
| 598 |
+
if method == "table" and using_array_manager:
|
| 599 |
+
request.node.add_marker(
|
| 600 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 601 |
+
)
|
| 602 |
+
# GH 14357 - float block where some cols have missing values
|
| 603 |
+
df = DataFrame({"a": np.arange(1, 6.0), "b": np.arange(1, 6.0)})
|
| 604 |
+
df.iloc[-1, 1] = np.nan
|
| 605 |
+
|
| 606 |
+
res = df.quantile(0.5, interpolation=interpolation, method=method)
|
| 607 |
+
exp = Series(
|
| 608 |
+
[3.0, 2.5 if interpolation == "linear" else 3.0], index=["a", "b"], name=0.5
|
| 609 |
+
)
|
| 610 |
+
tm.assert_series_equal(res, exp)
|
| 611 |
+
|
| 612 |
+
res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method)
|
| 613 |
+
exp = DataFrame(
|
| 614 |
+
{
|
| 615 |
+
"a": [3.0, 4.0],
|
| 616 |
+
"b": [2.5, 3.25] if interpolation == "linear" else [3.0, 4.0],
|
| 617 |
+
},
|
| 618 |
+
index=[0.5, 0.75],
|
| 619 |
+
)
|
| 620 |
+
tm.assert_frame_equal(res, exp)
|
| 621 |
+
|
| 622 |
+
res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method)
|
| 623 |
+
exp = Series(np.arange(1.0, 6.0), name=0.5)
|
| 624 |
+
tm.assert_series_equal(res, exp)
|
| 625 |
+
|
| 626 |
+
res = df.quantile(
|
| 627 |
+
[0.5, 0.75], axis=1, interpolation=interpolation, method=method
|
| 628 |
+
)
|
| 629 |
+
exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75])
|
| 630 |
+
if interpolation == "nearest":
|
| 631 |
+
exp.iloc[1, -1] = np.nan
|
| 632 |
+
tm.assert_frame_equal(res, exp)
|
| 633 |
+
|
| 634 |
+
# full-nan column
|
| 635 |
+
df["b"] = np.nan
|
| 636 |
+
|
| 637 |
+
res = df.quantile(0.5, interpolation=interpolation, method=method)
|
| 638 |
+
exp = Series([3.0, np.nan], index=["a", "b"], name=0.5)
|
| 639 |
+
tm.assert_series_equal(res, exp)
|
| 640 |
+
|
| 641 |
+
res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method)
|
| 642 |
+
exp = DataFrame({"a": [3.0, 4.0], "b": [np.nan, np.nan]}, index=[0.5, 0.75])
|
| 643 |
+
tm.assert_frame_equal(res, exp)
|
| 644 |
+
|
| 645 |
+
def test_quantile_nat(self, interp_method, request, using_array_manager):
|
| 646 |
+
interpolation, method = interp_method
|
| 647 |
+
if method == "table" and using_array_manager:
|
| 648 |
+
request.node.add_marker(
|
| 649 |
+
pytest.mark.xfail(reason="Axis name incorrectly set.")
|
| 650 |
+
)
|
| 651 |
+
# full NaT column
|
| 652 |
+
df = DataFrame({"a": [pd.NaT, pd.NaT, pd.NaT]})
|
| 653 |
+
|
| 654 |
+
res = df.quantile(
|
| 655 |
+
0.5, numeric_only=False, interpolation=interpolation, method=method
|
| 656 |
+
)
|
| 657 |
+
exp = Series([pd.NaT], index=["a"], name=0.5)
|
| 658 |
+
tm.assert_series_equal(res, exp)
|
| 659 |
+
|
| 660 |
+
res = df.quantile(
|
| 661 |
+
[0.5], numeric_only=False, interpolation=interpolation, method=method
|
| 662 |
+
)
|
| 663 |
+
exp = DataFrame({"a": [pd.NaT]}, index=[0.5])
|
| 664 |
+
tm.assert_frame_equal(res, exp)
|
| 665 |
+
|
| 666 |
+
# mixed non-null / full null column
|
| 667 |
+
df = DataFrame(
|
| 668 |
+
{
|
| 669 |
+
"a": [
|
| 670 |
+
Timestamp("2012-01-01"),
|
| 671 |
+
Timestamp("2012-01-02"),
|
| 672 |
+
Timestamp("2012-01-03"),
|
| 673 |
+
],
|
| 674 |
+
"b": [pd.NaT, pd.NaT, pd.NaT],
|
| 675 |
+
}
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
res = df.quantile(
|
| 679 |
+
0.5, numeric_only=False, interpolation=interpolation, method=method
|
| 680 |
+
)
|
| 681 |
+
exp = Series([Timestamp("2012-01-02"), pd.NaT], index=["a", "b"], name=0.5)
|
| 682 |
+
tm.assert_series_equal(res, exp)
|
| 683 |
+
|
| 684 |
+
res = df.quantile(
|
| 685 |
+
[0.5], numeric_only=False, interpolation=interpolation, method=method
|
| 686 |
+
)
|
| 687 |
+
exp = DataFrame(
|
| 688 |
+
[[Timestamp("2012-01-02"), pd.NaT]], index=[0.5], columns=["a", "b"]
|
| 689 |
+
)
|
| 690 |
+
tm.assert_frame_equal(res, exp)
|
| 691 |
+
|
| 692 |
+
def test_quantile_empty_no_rows_floats(self, interp_method):
|
| 693 |
+
interpolation, method = interp_method
|
| 694 |
+
|
| 695 |
+
df = DataFrame(columns=["a", "b"], dtype="float64")
|
| 696 |
+
|
| 697 |
+
res = df.quantile(0.5, interpolation=interpolation, method=method)
|
| 698 |
+
exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5)
|
| 699 |
+
tm.assert_series_equal(res, exp)
|
| 700 |
+
|
| 701 |
+
res = df.quantile([0.5], interpolation=interpolation, method=method)
|
| 702 |
+
exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5])
|
| 703 |
+
tm.assert_frame_equal(res, exp)
|
| 704 |
+
|
| 705 |
+
res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method)
|
| 706 |
+
exp = Series([], index=[], dtype="float64", name=0.5)
|
| 707 |
+
tm.assert_series_equal(res, exp)
|
| 708 |
+
|
| 709 |
+
res = df.quantile([0.5], axis=1, interpolation=interpolation, method=method)
|
| 710 |
+
exp = DataFrame(columns=[], index=[0.5])
|
| 711 |
+
tm.assert_frame_equal(res, exp)
|
| 712 |
+
|
| 713 |
+
def test_quantile_empty_no_rows_ints(self, interp_method):
|
| 714 |
+
interpolation, method = interp_method
|
| 715 |
+
df = DataFrame(columns=["a", "b"], dtype="int64")
|
| 716 |
+
|
| 717 |
+
res = df.quantile(0.5, interpolation=interpolation, method=method)
|
| 718 |
+
exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5)
|
| 719 |
+
tm.assert_series_equal(res, exp)
|
| 720 |
+
|
| 721 |
+
def test_quantile_empty_no_rows_dt64(self, interp_method):
|
| 722 |
+
interpolation, method = interp_method
|
| 723 |
+
# datetimes
|
| 724 |
+
df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]")
|
| 725 |
+
|
| 726 |
+
res = df.quantile(
|
| 727 |
+
0.5, numeric_only=False, interpolation=interpolation, method=method
|
| 728 |
+
)
|
| 729 |
+
exp = Series(
|
| 730 |
+
[pd.NaT, pd.NaT], index=["a", "b"], dtype="datetime64[ns]", name=0.5
|
| 731 |
+
)
|
| 732 |
+
tm.assert_series_equal(res, exp)
|
| 733 |
+
|
| 734 |
+
# Mixed dt64/dt64tz
|
| 735 |
+
df["a"] = df["a"].dt.tz_localize("US/Central")
|
| 736 |
+
res = df.quantile(
|
| 737 |
+
0.5, numeric_only=False, interpolation=interpolation, method=method
|
| 738 |
+
)
|
| 739 |
+
exp = exp.astype(object)
|
| 740 |
+
tm.assert_series_equal(res, exp)
|
| 741 |
+
|
| 742 |
+
# both dt64tz
|
| 743 |
+
df["b"] = df["b"].dt.tz_localize("US/Central")
|
| 744 |
+
res = df.quantile(
|
| 745 |
+
0.5, numeric_only=False, interpolation=interpolation, method=method
|
| 746 |
+
)
|
| 747 |
+
exp = exp.astype(df["b"].dtype)
|
| 748 |
+
tm.assert_series_equal(res, exp)
|
| 749 |
+
|
| 750 |
+
def test_quantile_empty_no_columns(self, interp_method):
|
| 751 |
+
# GH#23925 _get_numeric_data may drop all columns
|
| 752 |
+
interpolation, method = interp_method
|
| 753 |
+
df = DataFrame(pd.date_range("1/1/18", periods=5))
|
| 754 |
+
df.columns.name = "captain tightpants"
|
| 755 |
+
result = df.quantile(
|
| 756 |
+
0.5, numeric_only=True, interpolation=interpolation, method=method
|
| 757 |
+
)
|
| 758 |
+
expected = Series([], index=[], name=0.5, dtype=np.float64)
|
| 759 |
+
expected.index.name = "captain tightpants"
|
| 760 |
+
tm.assert_series_equal(result, expected)
|
| 761 |
+
|
| 762 |
+
result = df.quantile(
|
| 763 |
+
[0.5], numeric_only=True, interpolation=interpolation, method=method
|
| 764 |
+
)
|
| 765 |
+
expected = DataFrame([], index=[0.5], columns=[])
|
| 766 |
+
expected.columns.name = "captain tightpants"
|
| 767 |
+
tm.assert_frame_equal(result, expected)
|
| 768 |
+
|
| 769 |
+
def test_quantile_item_cache(
|
| 770 |
+
self, using_array_manager, interp_method, using_copy_on_write
|
| 771 |
+
):
|
| 772 |
+
# previous behavior incorrect retained an invalid _item_cache entry
|
| 773 |
+
interpolation, method = interp_method
|
| 774 |
+
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
|
| 775 |
+
df["D"] = df["A"] * 2
|
| 776 |
+
ser = df["A"]
|
| 777 |
+
if not using_array_manager:
|
| 778 |
+
assert len(df._mgr.blocks) == 2
|
| 779 |
+
|
| 780 |
+
df.quantile(numeric_only=False, interpolation=interpolation, method=method)
|
| 781 |
+
|
| 782 |
+
if using_copy_on_write:
|
| 783 |
+
ser.iloc[0] = 99
|
| 784 |
+
assert df.iloc[0, 0] == df["A"][0]
|
| 785 |
+
assert df.iloc[0, 0] != 99
|
| 786 |
+
else:
|
| 787 |
+
ser.values[0] = 99
|
| 788 |
+
assert df.iloc[0, 0] == df["A"][0]
|
| 789 |
+
assert df.iloc[0, 0] == 99
|
| 790 |
+
|
| 791 |
+
def test_invalid_method(self):
|
| 792 |
+
with pytest.raises(ValueError, match="Invalid method: foo"):
|
| 793 |
+
DataFrame(range(1)).quantile(0.5, method="foo")
|
| 794 |
+
|
| 795 |
+
def test_table_invalid_interpolation(self):
|
| 796 |
+
with pytest.raises(ValueError, match="Invalid interpolation: foo"):
|
| 797 |
+
DataFrame(range(1)).quantile(0.5, method="table", interpolation="foo")
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
class TestQuantileExtensionDtype:
|
| 801 |
+
# TODO: tests for axis=1?
|
| 802 |
+
# TODO: empty case?
|
| 803 |
+
|
| 804 |
+
@pytest.fixture(
|
| 805 |
+
params=[
|
| 806 |
+
pytest.param(
|
| 807 |
+
pd.IntervalIndex.from_breaks(range(10)),
|
| 808 |
+
marks=pytest.mark.xfail(reason="raises when trying to add Intervals"),
|
| 809 |
+
),
|
| 810 |
+
pd.period_range("2016-01-01", periods=9, freq="D"),
|
| 811 |
+
pd.date_range("2016-01-01", periods=9, tz="US/Pacific"),
|
| 812 |
+
pd.timedelta_range("1 Day", periods=9),
|
| 813 |
+
pd.array(np.arange(9), dtype="Int64"),
|
| 814 |
+
pd.array(np.arange(9), dtype="Float64"),
|
| 815 |
+
],
|
| 816 |
+
ids=lambda x: str(x.dtype),
|
| 817 |
+
)
|
| 818 |
+
def index(self, request):
|
| 819 |
+
# NB: not actually an Index object
|
| 820 |
+
idx = request.param
|
| 821 |
+
idx.name = "A"
|
| 822 |
+
return idx
|
| 823 |
+
|
| 824 |
+
@pytest.fixture
|
| 825 |
+
def obj(self, index, frame_or_series):
|
| 826 |
+
# bc index is not always an Index (yet), we need to re-patch .name
|
| 827 |
+
obj = frame_or_series(index).copy()
|
| 828 |
+
|
| 829 |
+
if frame_or_series is Series:
|
| 830 |
+
obj.name = "A"
|
| 831 |
+
else:
|
| 832 |
+
obj.columns = ["A"]
|
| 833 |
+
return obj
|
| 834 |
+
|
| 835 |
+
def compute_quantile(self, obj, qs):
|
| 836 |
+
if isinstance(obj, Series):
|
| 837 |
+
result = obj.quantile(qs)
|
| 838 |
+
else:
|
| 839 |
+
result = obj.quantile(qs, numeric_only=False)
|
| 840 |
+
return result
|
| 841 |
+
|
| 842 |
+
def test_quantile_ea(self, request, obj, index):
|
| 843 |
+
# result should be invariant to shuffling
|
| 844 |
+
indexer = np.arange(len(index), dtype=np.intp)
|
| 845 |
+
np.random.shuffle(indexer)
|
| 846 |
+
obj = obj.iloc[indexer]
|
| 847 |
+
|
| 848 |
+
qs = [0.5, 0, 1]
|
| 849 |
+
result = self.compute_quantile(obj, qs)
|
| 850 |
+
|
| 851 |
+
if np_version_under1p21 and index.dtype == "timedelta64[ns]":
|
| 852 |
+
msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood"
|
| 853 |
+
mark = pytest.mark.xfail(reason=msg, raises=TypeError)
|
| 854 |
+
request.node.add_marker(mark)
|
| 855 |
+
|
| 856 |
+
exp_dtype = index.dtype
|
| 857 |
+
if index.dtype == "Int64":
|
| 858 |
+
# match non-nullable casting behavior
|
| 859 |
+
exp_dtype = "Float64"
|
| 860 |
+
|
| 861 |
+
# expected here assumes len(index) == 9
|
| 862 |
+
expected = Series(
|
| 863 |
+
[index[4], index[0], index[-1]], dtype=exp_dtype, index=qs, name="A"
|
| 864 |
+
)
|
| 865 |
+
expected = type(obj)(expected)
|
| 866 |
+
|
| 867 |
+
tm.assert_equal(result, expected)
|
| 868 |
+
|
| 869 |
+
def test_quantile_ea_with_na(self, obj, index):
|
| 870 |
+
obj.iloc[0] = index._na_value
|
| 871 |
+
obj.iloc[-1] = index._na_value
|
| 872 |
+
|
| 873 |
+
# result should be invariant to shuffling
|
| 874 |
+
indexer = np.arange(len(index), dtype=np.intp)
|
| 875 |
+
np.random.shuffle(indexer)
|
| 876 |
+
obj = obj.iloc[indexer]
|
| 877 |
+
|
| 878 |
+
qs = [0.5, 0, 1]
|
| 879 |
+
result = self.compute_quantile(obj, qs)
|
| 880 |
+
|
| 881 |
+
# expected here assumes len(index) == 9
|
| 882 |
+
expected = Series(
|
| 883 |
+
[index[4], index[1], index[-2]], dtype=index.dtype, index=qs, name="A"
|
| 884 |
+
)
|
| 885 |
+
expected = type(obj)(expected)
|
| 886 |
+
tm.assert_equal(result, expected)
|
| 887 |
+
|
| 888 |
+
def test_quantile_ea_all_na(self, request, obj, index):
|
| 889 |
+
obj.iloc[:] = index._na_value
|
| 890 |
+
# Check dtypes were preserved; this was once a problem see GH#39763
|
| 891 |
+
assert np.all(obj.dtypes == index.dtype)
|
| 892 |
+
|
| 893 |
+
# result should be invariant to shuffling
|
| 894 |
+
indexer = np.arange(len(index), dtype=np.intp)
|
| 895 |
+
np.random.shuffle(indexer)
|
| 896 |
+
obj = obj.iloc[indexer]
|
| 897 |
+
|
| 898 |
+
qs = [0.5, 0, 1]
|
| 899 |
+
result = self.compute_quantile(obj, qs)
|
| 900 |
+
|
| 901 |
+
expected = index.take([-1, -1, -1], allow_fill=True, fill_value=index._na_value)
|
| 902 |
+
expected = Series(expected, index=qs, name="A")
|
| 903 |
+
expected = type(obj)(expected)
|
| 904 |
+
tm.assert_equal(result, expected)
|
| 905 |
+
|
| 906 |
+
def test_quantile_ea_scalar(self, request, obj, index):
|
| 907 |
+
# scalar qs
|
| 908 |
+
|
| 909 |
+
# result should be invariant to shuffling
|
| 910 |
+
indexer = np.arange(len(index), dtype=np.intp)
|
| 911 |
+
np.random.shuffle(indexer)
|
| 912 |
+
obj = obj.iloc[indexer]
|
| 913 |
+
|
| 914 |
+
qs = 0.5
|
| 915 |
+
result = self.compute_quantile(obj, qs)
|
| 916 |
+
|
| 917 |
+
if np_version_under1p21 and index.dtype == "timedelta64[ns]":
|
| 918 |
+
msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood"
|
| 919 |
+
mark = pytest.mark.xfail(reason=msg, raises=TypeError)
|
| 920 |
+
request.node.add_marker(mark)
|
| 921 |
+
|
| 922 |
+
exp_dtype = index.dtype
|
| 923 |
+
if index.dtype == "Int64":
|
| 924 |
+
exp_dtype = "Float64"
|
| 925 |
+
|
| 926 |
+
expected = Series({"A": index[4]}, dtype=exp_dtype, name=0.5)
|
| 927 |
+
if isinstance(obj, Series):
|
| 928 |
+
expected = expected["A"]
|
| 929 |
+
assert result == expected
|
| 930 |
+
else:
|
| 931 |
+
tm.assert_series_equal(result, expected)
|
| 932 |
+
|
| 933 |
+
@pytest.mark.parametrize(
|
| 934 |
+
"dtype, expected_data, expected_index, axis",
|
| 935 |
+
[
|
| 936 |
+
["float64", [], [], 1],
|
| 937 |
+
["int64", [], [], 1],
|
| 938 |
+
["float64", [np.nan, np.nan], ["a", "b"], 0],
|
| 939 |
+
["int64", [np.nan, np.nan], ["a", "b"], 0],
|
| 940 |
+
],
|
| 941 |
+
)
|
| 942 |
+
def test_empty_numeric(self, dtype, expected_data, expected_index, axis):
|
| 943 |
+
# GH 14564
|
| 944 |
+
df = DataFrame(columns=["a", "b"], dtype=dtype)
|
| 945 |
+
result = df.quantile(0.5, axis=axis)
|
| 946 |
+
expected = Series(
|
| 947 |
+
expected_data, name=0.5, index=Index(expected_index), dtype="float64"
|
| 948 |
+
)
|
| 949 |
+
tm.assert_series_equal(result, expected)
|
| 950 |
+
|
| 951 |
+
@pytest.mark.parametrize(
|
| 952 |
+
"dtype, expected_data, expected_index, axis, expected_dtype",
|
| 953 |
+
[
|
| 954 |
+
["datetime64[ns]", [], [], 1, "datetime64[ns]"],
|
| 955 |
+
["datetime64[ns]", [pd.NaT, pd.NaT], ["a", "b"], 0, "datetime64[ns]"],
|
| 956 |
+
],
|
| 957 |
+
)
|
| 958 |
+
def test_empty_datelike(
|
| 959 |
+
self, dtype, expected_data, expected_index, axis, expected_dtype
|
| 960 |
+
):
|
| 961 |
+
# GH 14564
|
| 962 |
+
df = DataFrame(columns=["a", "b"], dtype=dtype)
|
| 963 |
+
result = df.quantile(0.5, axis=axis, numeric_only=False)
|
| 964 |
+
expected = Series(
|
| 965 |
+
expected_data, name=0.5, index=Index(expected_index), dtype=expected_dtype
|
| 966 |
+
)
|
| 967 |
+
tm.assert_series_equal(result, expected)
|
| 968 |
+
|
| 969 |
+
@pytest.mark.parametrize(
|
| 970 |
+
"expected_data, expected_index, axis",
|
| 971 |
+
[
|
| 972 |
+
[[np.nan, np.nan], range(2), 1],
|
| 973 |
+
[[], [], 0],
|
| 974 |
+
],
|
| 975 |
+
)
|
| 976 |
+
def test_datelike_numeric_only(self, expected_data, expected_index, axis):
|
| 977 |
+
# GH 14564
|
| 978 |
+
df = DataFrame(
|
| 979 |
+
{
|
| 980 |
+
"a": pd.to_datetime(["2010", "2011"]),
|
| 981 |
+
"b": [0, 5],
|
| 982 |
+
"c": pd.to_datetime(["2011", "2012"]),
|
| 983 |
+
}
|
| 984 |
+
)
|
| 985 |
+
result = df[["a", "c"]].quantile(0.5, axis=axis, numeric_only=True)
|
| 986 |
+
expected = Series(
|
| 987 |
+
expected_data, name=0.5, index=Index(expected_index), dtype=np.float64
|
| 988 |
+
)
|
| 989 |
+
tm.assert_series_equal(result, expected)
|
videochat2/lib/python3.10/site-packages/pandas/tests/frame/methods/test_rank.py
ADDED
|
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import (
|
| 2 |
+
datetime,
|
| 3 |
+
timedelta,
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pytest
|
| 8 |
+
|
| 9 |
+
from pandas._libs.algos import (
|
| 10 |
+
Infinity,
|
| 11 |
+
NegInfinity,
|
| 12 |
+
)
|
| 13 |
+
import pandas.util._test_decorators as td
|
| 14 |
+
|
| 15 |
+
from pandas import (
|
| 16 |
+
DataFrame,
|
| 17 |
+
Series,
|
| 18 |
+
)
|
| 19 |
+
import pandas._testing as tm
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class TestRank:
|
| 23 |
+
s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])
|
| 24 |
+
df = DataFrame({"A": s, "B": s})
|
| 25 |
+
|
| 26 |
+
results = {
|
| 27 |
+
"average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]),
|
| 28 |
+
"min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),
|
| 29 |
+
"max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),
|
| 30 |
+
"first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),
|
| 31 |
+
"dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
@pytest.fixture(params=["average", "min", "max", "first", "dense"])
|
| 35 |
+
def method(self, request):
|
| 36 |
+
"""
|
| 37 |
+
Fixture for trying all rank methods
|
| 38 |
+
"""
|
| 39 |
+
return request.param
|
| 40 |
+
|
| 41 |
+
@td.skip_if_no_scipy
|
| 42 |
+
def test_rank(self, float_frame):
|
| 43 |
+
import scipy.stats # noqa:F401
|
| 44 |
+
from scipy.stats import rankdata
|
| 45 |
+
|
| 46 |
+
float_frame.loc[::2, "A"] = np.nan
|
| 47 |
+
float_frame.loc[::3, "B"] = np.nan
|
| 48 |
+
float_frame.loc[::4, "C"] = np.nan
|
| 49 |
+
float_frame.loc[::5, "D"] = np.nan
|
| 50 |
+
|
| 51 |
+
ranks0 = float_frame.rank()
|
| 52 |
+
ranks1 = float_frame.rank(1)
|
| 53 |
+
mask = np.isnan(float_frame.values)
|
| 54 |
+
|
| 55 |
+
fvals = float_frame.fillna(np.inf).values
|
| 56 |
+
|
| 57 |
+
exp0 = np.apply_along_axis(rankdata, 0, fvals)
|
| 58 |
+
exp0[mask] = np.nan
|
| 59 |
+
|
| 60 |
+
exp1 = np.apply_along_axis(rankdata, 1, fvals)
|
| 61 |
+
exp1[mask] = np.nan
|
| 62 |
+
|
| 63 |
+
tm.assert_almost_equal(ranks0.values, exp0)
|
| 64 |
+
tm.assert_almost_equal(ranks1.values, exp1)
|
| 65 |
+
|
| 66 |
+
# integers
|
| 67 |
+
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
|
| 68 |
+
|
| 69 |
+
result = df.rank()
|
| 70 |
+
exp = df.astype(float).rank()
|
| 71 |
+
tm.assert_frame_equal(result, exp)
|
| 72 |
+
|
| 73 |
+
result = df.rank(1)
|
| 74 |
+
exp = df.astype(float).rank(1)
|
| 75 |
+
tm.assert_frame_equal(result, exp)
|
| 76 |
+
|
| 77 |
+
def test_rank2(self):
|
| 78 |
+
df = DataFrame([[1, 3, 2], [1, 2, 3]])
|
| 79 |
+
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
|
| 80 |
+
result = df.rank(1, pct=True)
|
| 81 |
+
tm.assert_frame_equal(result, expected)
|
| 82 |
+
|
| 83 |
+
df = DataFrame([[1, 3, 2], [1, 2, 3]])
|
| 84 |
+
expected = df.rank(0) / 2.0
|
| 85 |
+
result = df.rank(0, pct=True)
|
| 86 |
+
tm.assert_frame_equal(result, expected)
|
| 87 |
+
|
| 88 |
+
df = DataFrame([["b", "c", "a"], ["a", "c", "b"]])
|
| 89 |
+
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
|
| 90 |
+
result = df.rank(1, numeric_only=False)
|
| 91 |
+
tm.assert_frame_equal(result, expected)
|
| 92 |
+
|
| 93 |
+
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
|
| 94 |
+
result = df.rank(0, numeric_only=False)
|
| 95 |
+
tm.assert_frame_equal(result, expected)
|
| 96 |
+
|
| 97 |
+
df = DataFrame([["b", np.nan, "a"], ["a", "c", "b"]])
|
| 98 |
+
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]])
|
| 99 |
+
result = df.rank(1, numeric_only=False)
|
| 100 |
+
tm.assert_frame_equal(result, expected)
|
| 101 |
+
|
| 102 |
+
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]])
|
| 103 |
+
result = df.rank(0, numeric_only=False)
|
| 104 |
+
tm.assert_frame_equal(result, expected)
|
| 105 |
+
|
| 106 |
+
# f7u12, this does not work without extensive workaround
|
| 107 |
+
data = [
|
| 108 |
+
[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)],
|
| 109 |
+
[datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)],
|
| 110 |
+
]
|
| 111 |
+
df = DataFrame(data)
|
| 112 |
+
|
| 113 |
+
# check the rank
|
| 114 |
+
expected = DataFrame([[2.0, np.nan, 1.0], [2.0, 3.0, 1.0]])
|
| 115 |
+
result = df.rank(1, numeric_only=False, ascending=True)
|
| 116 |
+
tm.assert_frame_equal(result, expected)
|
| 117 |
+
|
| 118 |
+
expected = DataFrame([[1.0, np.nan, 2.0], [2.0, 1.0, 3.0]])
|
| 119 |
+
result = df.rank(1, numeric_only=False, ascending=False)
|
| 120 |
+
tm.assert_frame_equal(result, expected)
|
| 121 |
+
|
| 122 |
+
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10, 1e60, 1e80, 1e-30]})
|
| 123 |
+
exp = DataFrame({"a": [3.5, 1.0, 3.5, 5.0, 6.0, 7.0, 2.0]})
|
| 124 |
+
tm.assert_frame_equal(df.rank(), exp)
|
| 125 |
+
|
| 126 |
+
def test_rank_does_not_mutate(self):
|
| 127 |
+
# GH#18521
|
| 128 |
+
# Check rank does not mutate DataFrame
|
| 129 |
+
df = DataFrame(np.random.randn(10, 3), dtype="float64")
|
| 130 |
+
expected = df.copy()
|
| 131 |
+
df.rank()
|
| 132 |
+
result = df
|
| 133 |
+
tm.assert_frame_equal(result, expected)
|
| 134 |
+
|
| 135 |
+
def test_rank_mixed_frame(self, float_string_frame):
|
| 136 |
+
float_string_frame["datetime"] = datetime.now()
|
| 137 |
+
float_string_frame["timedelta"] = timedelta(days=1, seconds=1)
|
| 138 |
+
|
| 139 |
+
float_string_frame.rank(numeric_only=False)
|
| 140 |
+
with pytest.raises(TypeError, match="not supported between instances of"):
|
| 141 |
+
float_string_frame.rank(axis=1)
|
| 142 |
+
|
| 143 |
+
@td.skip_if_no_scipy
|
| 144 |
+
def test_rank_na_option(self, float_frame):
|
| 145 |
+
import scipy.stats # noqa:F401
|
| 146 |
+
from scipy.stats import rankdata
|
| 147 |
+
|
| 148 |
+
float_frame.loc[::2, "A"] = np.nan
|
| 149 |
+
float_frame.loc[::3, "B"] = np.nan
|
| 150 |
+
float_frame.loc[::4, "C"] = np.nan
|
| 151 |
+
float_frame.loc[::5, "D"] = np.nan
|
| 152 |
+
|
| 153 |
+
# bottom
|
| 154 |
+
ranks0 = float_frame.rank(na_option="bottom")
|
| 155 |
+
ranks1 = float_frame.rank(1, na_option="bottom")
|
| 156 |
+
|
| 157 |
+
fvals = float_frame.fillna(np.inf).values
|
| 158 |
+
|
| 159 |
+
exp0 = np.apply_along_axis(rankdata, 0, fvals)
|
| 160 |
+
exp1 = np.apply_along_axis(rankdata, 1, fvals)
|
| 161 |
+
|
| 162 |
+
tm.assert_almost_equal(ranks0.values, exp0)
|
| 163 |
+
tm.assert_almost_equal(ranks1.values, exp1)
|
| 164 |
+
|
| 165 |
+
# top
|
| 166 |
+
ranks0 = float_frame.rank(na_option="top")
|
| 167 |
+
ranks1 = float_frame.rank(1, na_option="top")
|
| 168 |
+
|
| 169 |
+
fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
|
| 170 |
+
fval1 = float_frame.T
|
| 171 |
+
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
|
| 172 |
+
fval1 = fval1.fillna(np.inf).values
|
| 173 |
+
|
| 174 |
+
exp0 = np.apply_along_axis(rankdata, 0, fval0)
|
| 175 |
+
exp1 = np.apply_along_axis(rankdata, 1, fval1)
|
| 176 |
+
|
| 177 |
+
tm.assert_almost_equal(ranks0.values, exp0)
|
| 178 |
+
tm.assert_almost_equal(ranks1.values, exp1)
|
| 179 |
+
|
| 180 |
+
# descending
|
| 181 |
+
|
| 182 |
+
# bottom
|
| 183 |
+
ranks0 = float_frame.rank(na_option="top", ascending=False)
|
| 184 |
+
ranks1 = float_frame.rank(1, na_option="top", ascending=False)
|
| 185 |
+
|
| 186 |
+
fvals = float_frame.fillna(np.inf).values
|
| 187 |
+
|
| 188 |
+
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
|
| 189 |
+
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
|
| 190 |
+
|
| 191 |
+
tm.assert_almost_equal(ranks0.values, exp0)
|
| 192 |
+
tm.assert_almost_equal(ranks1.values, exp1)
|
| 193 |
+
|
| 194 |
+
# descending
|
| 195 |
+
|
| 196 |
+
# top
|
| 197 |
+
ranks0 = float_frame.rank(na_option="bottom", ascending=False)
|
| 198 |
+
ranks1 = float_frame.rank(1, na_option="bottom", ascending=False)
|
| 199 |
+
|
| 200 |
+
fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
|
| 201 |
+
fval1 = float_frame.T
|
| 202 |
+
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
|
| 203 |
+
fval1 = fval1.fillna(np.inf).values
|
| 204 |
+
|
| 205 |
+
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
|
| 206 |
+
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
|
| 207 |
+
|
| 208 |
+
tm.assert_numpy_array_equal(ranks0.values, exp0)
|
| 209 |
+
tm.assert_numpy_array_equal(ranks1.values, exp1)
|
| 210 |
+
|
| 211 |
+
# bad values throw error
|
| 212 |
+
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
|
| 213 |
+
|
| 214 |
+
with pytest.raises(ValueError, match=msg):
|
| 215 |
+
float_frame.rank(na_option="bad", ascending=False)
|
| 216 |
+
|
| 217 |
+
# invalid type
|
| 218 |
+
with pytest.raises(ValueError, match=msg):
|
| 219 |
+
float_frame.rank(na_option=True, ascending=False)
|
| 220 |
+
|
| 221 |
+
def test_rank_axis(self):
|
| 222 |
+
# check if using axes' names gives the same result
|
| 223 |
+
df = DataFrame([[2, 1], [4, 3]])
|
| 224 |
+
tm.assert_frame_equal(df.rank(axis=0), df.rank(axis="index"))
|
| 225 |
+
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis="columns"))
|
| 226 |
+
|
| 227 |
+
@td.skip_if_no_scipy
|
| 228 |
+
def test_rank_methods_frame(self):
|
| 229 |
+
import scipy.stats # noqa:F401
|
| 230 |
+
from scipy.stats import rankdata
|
| 231 |
+
|
| 232 |
+
xs = np.random.randint(0, 21, (100, 26))
|
| 233 |
+
xs = (xs - 10.0) / 10.0
|
| 234 |
+
cols = [chr(ord("z") - i) for i in range(xs.shape[1])]
|
| 235 |
+
|
| 236 |
+
for vals in [xs, xs + 1e6, xs * 1e-6]:
|
| 237 |
+
df = DataFrame(vals, columns=cols)
|
| 238 |
+
|
| 239 |
+
for ax in [0, 1]:
|
| 240 |
+
for m in ["average", "min", "max", "first", "dense"]:
|
| 241 |
+
result = df.rank(axis=ax, method=m)
|
| 242 |
+
sprank = np.apply_along_axis(
|
| 243 |
+
rankdata, ax, vals, m if m != "first" else "ordinal"
|
| 244 |
+
)
|
| 245 |
+
sprank = sprank.astype(np.float64)
|
| 246 |
+
expected = DataFrame(sprank, columns=cols).astype("float64")
|
| 247 |
+
tm.assert_frame_equal(result, expected)
|
| 248 |
+
|
| 249 |
+
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
|
| 250 |
+
def test_rank_descending(self, method, dtype):
|
| 251 |
+
if "i" in dtype:
|
| 252 |
+
df = self.df.dropna().astype(dtype)
|
| 253 |
+
else:
|
| 254 |
+
df = self.df.astype(dtype)
|
| 255 |
+
|
| 256 |
+
res = df.rank(ascending=False)
|
| 257 |
+
expected = (df.max() - df).rank()
|
| 258 |
+
tm.assert_frame_equal(res, expected)
|
| 259 |
+
|
| 260 |
+
expected = (df.max() - df).rank(method=method)
|
| 261 |
+
|
| 262 |
+
if dtype != "O":
|
| 263 |
+
res2 = df.rank(method=method, ascending=False, numeric_only=True)
|
| 264 |
+
tm.assert_frame_equal(res2, expected)
|
| 265 |
+
|
| 266 |
+
res3 = df.rank(method=method, ascending=False, numeric_only=False)
|
| 267 |
+
tm.assert_frame_equal(res3, expected)
|
| 268 |
+
|
| 269 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
| 270 |
+
@pytest.mark.parametrize("dtype", [None, object])
|
| 271 |
+
def test_rank_2d_tie_methods(self, method, axis, dtype):
|
| 272 |
+
df = self.df
|
| 273 |
+
|
| 274 |
+
def _check2d(df, expected, method="average", axis=0):
|
| 275 |
+
exp_df = DataFrame({"A": expected, "B": expected})
|
| 276 |
+
|
| 277 |
+
if axis == 1:
|
| 278 |
+
df = df.T
|
| 279 |
+
exp_df = exp_df.T
|
| 280 |
+
|
| 281 |
+
result = df.rank(method=method, axis=axis)
|
| 282 |
+
tm.assert_frame_equal(result, exp_df)
|
| 283 |
+
|
| 284 |
+
frame = df if dtype is None else df.astype(dtype)
|
| 285 |
+
_check2d(frame, self.results[method], method=method, axis=axis)
|
| 286 |
+
|
| 287 |
+
@pytest.mark.parametrize(
|
| 288 |
+
"method,exp",
|
| 289 |
+
[
|
| 290 |
+
("dense", [[1.0, 1.0, 1.0], [1.0, 0.5, 2.0 / 3], [1.0, 0.5, 1.0 / 3]]),
|
| 291 |
+
(
|
| 292 |
+
"min",
|
| 293 |
+
[
|
| 294 |
+
[1.0 / 3, 1.0, 1.0],
|
| 295 |
+
[1.0 / 3, 1.0 / 3, 2.0 / 3],
|
| 296 |
+
[1.0 / 3, 1.0 / 3, 1.0 / 3],
|
| 297 |
+
],
|
| 298 |
+
),
|
| 299 |
+
(
|
| 300 |
+
"max",
|
| 301 |
+
[[1.0, 1.0, 1.0], [1.0, 2.0 / 3, 2.0 / 3], [1.0, 2.0 / 3, 1.0 / 3]],
|
| 302 |
+
),
|
| 303 |
+
(
|
| 304 |
+
"average",
|
| 305 |
+
[[2.0 / 3, 1.0, 1.0], [2.0 / 3, 0.5, 2.0 / 3], [2.0 / 3, 0.5, 1.0 / 3]],
|
| 306 |
+
),
|
| 307 |
+
(
|
| 308 |
+
"first",
|
| 309 |
+
[
|
| 310 |
+
[1.0 / 3, 1.0, 1.0],
|
| 311 |
+
[2.0 / 3, 1.0 / 3, 2.0 / 3],
|
| 312 |
+
[3.0 / 3, 2.0 / 3, 1.0 / 3],
|
| 313 |
+
],
|
| 314 |
+
),
|
| 315 |
+
],
|
| 316 |
+
)
|
| 317 |
+
def test_rank_pct_true(self, method, exp):
|
| 318 |
+
# see gh-15630.
|
| 319 |
+
|
| 320 |
+
df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]])
|
| 321 |
+
result = df.rank(method=method, pct=True)
|
| 322 |
+
|
| 323 |
+
expected = DataFrame(exp)
|
| 324 |
+
tm.assert_frame_equal(result, expected)
|
| 325 |
+
|
| 326 |
+
@pytest.mark.single_cpu
|
| 327 |
+
def test_pct_max_many_rows(self):
|
| 328 |
+
# GH 18271
|
| 329 |
+
df = DataFrame(
|
| 330 |
+
{"A": np.arange(2**24 + 1), "B": np.arange(2**24 + 1, 0, -1)}
|
| 331 |
+
)
|
| 332 |
+
result = df.rank(pct=True).max()
|
| 333 |
+
assert (result == 1).all()
|
| 334 |
+
|
| 335 |
+
@pytest.mark.parametrize(
|
| 336 |
+
"contents,dtype",
|
| 337 |
+
[
|
| 338 |
+
(
|
| 339 |
+
[
|
| 340 |
+
-np.inf,
|
| 341 |
+
-50,
|
| 342 |
+
-1,
|
| 343 |
+
-1e-20,
|
| 344 |
+
-1e-25,
|
| 345 |
+
-1e-50,
|
| 346 |
+
0,
|
| 347 |
+
1e-40,
|
| 348 |
+
1e-20,
|
| 349 |
+
1e-10,
|
| 350 |
+
2,
|
| 351 |
+
40,
|
| 352 |
+
np.inf,
|
| 353 |
+
],
|
| 354 |
+
"float64",
|
| 355 |
+
),
|
| 356 |
+
(
|
| 357 |
+
[
|
| 358 |
+
-np.inf,
|
| 359 |
+
-50,
|
| 360 |
+
-1,
|
| 361 |
+
-1e-20,
|
| 362 |
+
-1e-25,
|
| 363 |
+
-1e-45,
|
| 364 |
+
0,
|
| 365 |
+
1e-40,
|
| 366 |
+
1e-20,
|
| 367 |
+
1e-10,
|
| 368 |
+
2,
|
| 369 |
+
40,
|
| 370 |
+
np.inf,
|
| 371 |
+
],
|
| 372 |
+
"float32",
|
| 373 |
+
),
|
| 374 |
+
([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max], "uint8"),
|
| 375 |
+
(
|
| 376 |
+
[
|
| 377 |
+
np.iinfo(np.int64).min,
|
| 378 |
+
-100,
|
| 379 |
+
0,
|
| 380 |
+
1,
|
| 381 |
+
9999,
|
| 382 |
+
100000,
|
| 383 |
+
1e10,
|
| 384 |
+
np.iinfo(np.int64).max,
|
| 385 |
+
],
|
| 386 |
+
"int64",
|
| 387 |
+
),
|
| 388 |
+
([NegInfinity(), "1", "A", "BA", "Ba", "C", Infinity()], "object"),
|
| 389 |
+
(
|
| 390 |
+
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 5)],
|
| 391 |
+
"datetime64",
|
| 392 |
+
),
|
| 393 |
+
],
|
| 394 |
+
)
|
| 395 |
+
def test_rank_inf_and_nan(self, contents, dtype, frame_or_series):
|
| 396 |
+
dtype_na_map = {
|
| 397 |
+
"float64": np.nan,
|
| 398 |
+
"float32": np.nan,
|
| 399 |
+
"object": None,
|
| 400 |
+
"datetime64": np.datetime64("nat"),
|
| 401 |
+
}
|
| 402 |
+
# Insert nans at random positions if underlying dtype has missing
|
| 403 |
+
# value. Then adjust the expected order by adding nans accordingly
|
| 404 |
+
# This is for testing whether rank calculation is affected
|
| 405 |
+
# when values are interwined with nan values.
|
| 406 |
+
values = np.array(contents, dtype=dtype)
|
| 407 |
+
exp_order = np.array(range(len(values)), dtype="float64") + 1.0
|
| 408 |
+
if dtype in dtype_na_map:
|
| 409 |
+
na_value = dtype_na_map[dtype]
|
| 410 |
+
nan_indices = np.random.choice(range(len(values)), 5)
|
| 411 |
+
values = np.insert(values, nan_indices, na_value)
|
| 412 |
+
exp_order = np.insert(exp_order, nan_indices, np.nan)
|
| 413 |
+
|
| 414 |
+
# Shuffle the testing array and expected results in the same way
|
| 415 |
+
random_order = np.random.permutation(len(values))
|
| 416 |
+
obj = frame_or_series(values[random_order])
|
| 417 |
+
expected = frame_or_series(exp_order[random_order], dtype="float64")
|
| 418 |
+
result = obj.rank()
|
| 419 |
+
tm.assert_equal(result, expected)
|
| 420 |
+
|
| 421 |
+
def test_df_series_inf_nan_consistency(self):
|
| 422 |
+
# GH#32593
|
| 423 |
+
index = [5, 4, 3, 2, 1, 6, 7, 8, 9, 10]
|
| 424 |
+
col1 = [5, 4, 3, 5, 8, 5, 2, 1, 6, 6]
|
| 425 |
+
col2 = [5, 4, np.nan, 5, 8, 5, np.inf, np.nan, 6, -np.inf]
|
| 426 |
+
df = DataFrame(
|
| 427 |
+
data={
|
| 428 |
+
"col1": col1,
|
| 429 |
+
"col2": col2,
|
| 430 |
+
},
|
| 431 |
+
index=index,
|
| 432 |
+
dtype="f8",
|
| 433 |
+
)
|
| 434 |
+
df_result = df.rank()
|
| 435 |
+
|
| 436 |
+
series_result = df.copy()
|
| 437 |
+
series_result["col1"] = df["col1"].rank()
|
| 438 |
+
series_result["col2"] = df["col2"].rank()
|
| 439 |
+
|
| 440 |
+
tm.assert_frame_equal(df_result, series_result)
|
| 441 |
+
|
| 442 |
+
def test_rank_both_inf(self):
|
| 443 |
+
# GH#32593
|
| 444 |
+
df = DataFrame({"a": [-np.inf, 0, np.inf]})
|
| 445 |
+
expected = DataFrame({"a": [1.0, 2.0, 3.0]})
|
| 446 |
+
result = df.rank()
|
| 447 |
+
tm.assert_frame_equal(result, expected)
|
| 448 |
+
|
| 449 |
+
@pytest.mark.parametrize(
|
| 450 |
+
"na_option,ascending,expected",
|
| 451 |
+
[
|
| 452 |
+
("top", True, [3.0, 1.0, 2.0]),
|
| 453 |
+
("top", False, [2.0, 1.0, 3.0]),
|
| 454 |
+
("bottom", True, [2.0, 3.0, 1.0]),
|
| 455 |
+
("bottom", False, [1.0, 3.0, 2.0]),
|
| 456 |
+
],
|
| 457 |
+
)
|
| 458 |
+
def test_rank_inf_nans_na_option(
|
| 459 |
+
self, frame_or_series, method, na_option, ascending, expected
|
| 460 |
+
):
|
| 461 |
+
obj = frame_or_series([np.inf, np.nan, -np.inf])
|
| 462 |
+
result = obj.rank(method=method, na_option=na_option, ascending=ascending)
|
| 463 |
+
expected = frame_or_series(expected)
|
| 464 |
+
tm.assert_equal(result, expected)
|
| 465 |
+
|
| 466 |
+
@pytest.mark.parametrize(
|
| 467 |
+
"na_option,ascending,expected",
|
| 468 |
+
[
|
| 469 |
+
("bottom", True, [1.0, 2.0, 4.0, 3.0]),
|
| 470 |
+
("bottom", False, [1.0, 2.0, 4.0, 3.0]),
|
| 471 |
+
("top", True, [2.0, 3.0, 1.0, 4.0]),
|
| 472 |
+
("top", False, [2.0, 3.0, 1.0, 4.0]),
|
| 473 |
+
],
|
| 474 |
+
)
|
| 475 |
+
def test_rank_object_first(self, frame_or_series, na_option, ascending, expected):
|
| 476 |
+
obj = frame_or_series(["foo", "foo", None, "foo"])
|
| 477 |
+
result = obj.rank(method="first", na_option=na_option, ascending=ascending)
|
| 478 |
+
expected = frame_or_series(expected)
|
| 479 |
+
tm.assert_equal(result, expected)
|
| 480 |
+
|
| 481 |
+
@pytest.mark.parametrize(
|
| 482 |
+
"data,expected",
|
| 483 |
+
[
|
| 484 |
+
({"a": [1, 2, "a"], "b": [4, 5, 6]}, DataFrame({"b": [1.0, 2.0, 3.0]})),
|
| 485 |
+
({"a": [1, 2, "a"]}, DataFrame(index=range(3), columns=[])),
|
| 486 |
+
],
|
| 487 |
+
)
|
| 488 |
+
def test_rank_mixed_axis_zero(self, data, expected):
|
| 489 |
+
df = DataFrame(data)
|
| 490 |
+
with pytest.raises(TypeError, match="'<' not supported between instances of"):
|
| 491 |
+
df.rank()
|
| 492 |
+
result = df.rank(numeric_only=True)
|
| 493 |
+
tm.assert_frame_equal(result, expected)
|