ZTWHHH commited on
Commit
07b7a81
·
verified ·
1 Parent(s): 115a9f8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. videochat2/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_constructors.cpython-310.pyc +3 -0
  3. videochat2/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/__init__.cpython-310.pyc +0 -0
  4. videochat2/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_datetime.cpython-310.pyc +0 -0
  5. videochat2/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_iloc.cpython-310.pyc +0 -0
  6. videochat2/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_indexing_slow.cpython-310.pyc +0 -0
  7. videochat2/lib/python3.10/site-packages/pandas/tests/internals/__init__.py +0 -0
  8. videochat2/lib/python3.10/site-packages/pandas/tests/internals/test_api.py +55 -0
  9. videochat2/lib/python3.10/site-packages/pandas/tests/internals/test_internals.py +1437 -0
  10. videochat2/lib/python3.10/site-packages/pandas/tests/internals/test_managers.py +70 -0
  11. videochat2/lib/python3.10/site-packages/pandas/tests/io/data/parquet/simple.parquet +3 -0
  12. videochat2/lib/python3.10/site-packages/pandas/tests/io/data/pickle/test_mi_py27.pkl +3 -0
  13. videochat2/lib/python3.10/site-packages/pandas/tests/io/data/pickle/test_py27.pkl +3 -0
  14. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py +0 -0
  15. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__init__.py +0 -0
  16. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc +0 -0
  17. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc +0 -0
  18. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append.cpython-310.pyc +0 -0
  19. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc +0 -0
  20. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc +0 -0
  21. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc +0 -0
  22. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc +0 -0
  23. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc +0 -0
  24. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc +0 -0
  25. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc +0 -0
  26. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc +0 -0
  27. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc +0 -0
  28. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc +0 -0
  29. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py +7 -0
  30. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py +377 -0
  31. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py +749 -0
  32. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py +253 -0
  33. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py +787 -0
  34. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py +230 -0
  35. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py +540 -0
  36. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py +287 -0
  37. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py +466 -0
  38. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py +54 -0
  39. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py +153 -0
  40. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_sort.py +118 -0
  41. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__init__.py +0 -0
  42. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_join.cpython-310.pyc +0 -0
  43. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge.cpython-310.pyc +0 -0
  44. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_cross.cpython-310.pyc +0 -0
  45. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_index_as_string.cpython-310.pyc +0 -0
  46. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_ordered.cpython-310.pyc +0 -0
  47. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_multi.cpython-310.pyc +0 -0
  48. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_join.py +994 -0
  49. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge.py +2781 -0
  50. videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_asof.py +1591 -0
.gitattributes CHANGED
@@ -1284,3 +1284,4 @@ videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpyt
1284
  videochat2/lib/python3.10/site-packages/pandas/io/sas/_sas.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1285
  videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1286
  videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1284
  videochat2/lib/python3.10/site-packages/pandas/io/sas/_sas.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1285
  videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1286
  videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1287
+ videochat2/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_constructors.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_constructors.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c5945616be3a4d0d714c99dc9e697c7083ec978885c7332374d77c8f60f15e1
3
+ size 108834
videochat2/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_datetime.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_iloc.cpython-310.pyc ADDED
Binary file (5.75 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_indexing_slow.cpython-310.pyc ADDED
Binary file (2.57 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/internals/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/internals/test_api.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for the pseudo-public API implemented in internals/api.py and exposed
3
+ in core.internals
4
+ """
5
+
6
+ import pandas as pd
7
+ from pandas.core import internals
8
+ from pandas.core.internals import api
9
+
10
+
11
+ def test_internals_api():
12
+ assert internals.make_block is api.make_block
13
+
14
+
15
+ def test_namespace():
16
+ # SUBJECT TO CHANGE
17
+
18
+ modules = [
19
+ "blocks",
20
+ "concat",
21
+ "managers",
22
+ "construction",
23
+ "array_manager",
24
+ "base",
25
+ "api",
26
+ "ops",
27
+ ]
28
+ expected = [
29
+ "Block",
30
+ "NumericBlock",
31
+ "DatetimeTZBlock",
32
+ "ExtensionBlock",
33
+ "ObjectBlock",
34
+ "make_block",
35
+ "DataManager",
36
+ "ArrayManager",
37
+ "BlockManager",
38
+ "SingleDataManager",
39
+ "SingleBlockManager",
40
+ "SingleArrayManager",
41
+ "concatenate_managers",
42
+ "create_block_manager_from_blocks",
43
+ ]
44
+
45
+ result = [x for x in dir(internals) if not x.startswith("__")]
46
+ assert set(result) == set(expected + modules)
47
+
48
+
49
+ def test_make_block_2d_with_dti():
50
+ # GH#41168
51
+ dti = pd.date_range("2012", periods=3, tz="UTC")
52
+ blk = api.make_block(dti, placement=[0])
53
+
54
+ assert blk.shape == (1, 3)
55
+ assert blk.values.shape == (1, 3)
videochat2/lib/python3.10/site-packages/pandas/tests/internals/test_internals.py ADDED
@@ -0,0 +1,1437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ datetime,
4
+ )
5
+ import itertools
6
+ import re
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas._libs.internals import BlockPlacement
12
+ from pandas.compat import IS64
13
+ import pandas.util._test_decorators as td
14
+
15
+ from pandas.core.dtypes.common import is_scalar
16
+
17
+ import pandas as pd
18
+ from pandas import (
19
+ Categorical,
20
+ DataFrame,
21
+ DatetimeIndex,
22
+ Index,
23
+ IntervalIndex,
24
+ Series,
25
+ Timedelta,
26
+ Timestamp,
27
+ period_range,
28
+ )
29
+ import pandas._testing as tm
30
+ import pandas.core.algorithms as algos
31
+ from pandas.core.arrays import (
32
+ DatetimeArray,
33
+ SparseArray,
34
+ TimedeltaArray,
35
+ )
36
+ from pandas.core.internals import (
37
+ BlockManager,
38
+ SingleBlockManager,
39
+ make_block,
40
+ )
41
+ from pandas.core.internals.blocks import (
42
+ ensure_block_shape,
43
+ new_block,
44
+ )
45
+
46
+ # this file contains BlockManager specific tests
47
+ # TODO(ArrayManager) factor out interleave_dtype tests
48
+ pytestmark = td.skip_array_manager_invalid_test
49
+
50
+
51
+ @pytest.fixture(params=[new_block, make_block])
52
+ def block_maker(request):
53
+ """
54
+ Fixture to test both the internal new_block and pseudo-public make_block.
55
+ """
56
+ return request.param
57
+
58
+
59
+ @pytest.fixture
60
+ def mgr():
61
+ return create_mgr(
62
+ "a: f8; b: object; c: f8; d: object; e: f8;"
63
+ "f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;"
64
+ "k: M8[ns, US/Eastern]; l: M8[ns, CET];"
65
+ )
66
+
67
+
68
+ def assert_block_equal(left, right):
69
+ tm.assert_numpy_array_equal(left.values, right.values)
70
+ assert left.dtype == right.dtype
71
+ assert isinstance(left.mgr_locs, BlockPlacement)
72
+ assert isinstance(right.mgr_locs, BlockPlacement)
73
+ tm.assert_numpy_array_equal(left.mgr_locs.as_array, right.mgr_locs.as_array)
74
+
75
+
76
+ def get_numeric_mat(shape):
77
+ arr = np.arange(shape[0])
78
+ return np.lib.stride_tricks.as_strided(
79
+ x=arr, shape=shape, strides=(arr.itemsize,) + (0,) * (len(shape) - 1)
80
+ ).copy()
81
+
82
+
83
+ N = 10
84
+
85
+
86
+ def create_block(typestr, placement, item_shape=None, num_offset=0, maker=new_block):
87
+ """
88
+ Supported typestr:
89
+
90
+ * float, f8, f4, f2
91
+ * int, i8, i4, i2, i1
92
+ * uint, u8, u4, u2, u1
93
+ * complex, c16, c8
94
+ * bool
95
+ * object, string, O
96
+ * datetime, dt, M8[ns], M8[ns, tz]
97
+ * timedelta, td, m8[ns]
98
+ * sparse (SparseArray with fill_value=0.0)
99
+ * sparse_na (SparseArray with fill_value=np.nan)
100
+ * category, category2
101
+
102
+ """
103
+ placement = BlockPlacement(placement)
104
+ num_items = len(placement)
105
+
106
+ if item_shape is None:
107
+ item_shape = (N,)
108
+
109
+ shape = (num_items,) + item_shape
110
+
111
+ mat = get_numeric_mat(shape)
112
+
113
+ if typestr in (
114
+ "float",
115
+ "f8",
116
+ "f4",
117
+ "f2",
118
+ "int",
119
+ "i8",
120
+ "i4",
121
+ "i2",
122
+ "i1",
123
+ "uint",
124
+ "u8",
125
+ "u4",
126
+ "u2",
127
+ "u1",
128
+ ):
129
+ values = mat.astype(typestr) + num_offset
130
+ elif typestr in ("complex", "c16", "c8"):
131
+ values = 1.0j * (mat.astype(typestr) + num_offset)
132
+ elif typestr in ("object", "string", "O"):
133
+ values = np.reshape([f"A{i:d}" for i in mat.ravel() + num_offset], shape)
134
+ elif typestr in ("b", "bool"):
135
+ values = np.ones(shape, dtype=np.bool_)
136
+ elif typestr in ("datetime", "dt", "M8[ns]"):
137
+ values = (mat * 1e9).astype("M8[ns]")
138
+ elif typestr.startswith("M8[ns"):
139
+ # datetime with tz
140
+ m = re.search(r"M8\[ns,\s*(\w+\/?\w*)\]", typestr)
141
+ assert m is not None, f"incompatible typestr -> {typestr}"
142
+ tz = m.groups()[0]
143
+ assert num_items == 1, "must have only 1 num items for a tz-aware"
144
+ values = DatetimeIndex(np.arange(N) * 10**9, tz=tz)._data
145
+ values = ensure_block_shape(values, ndim=len(shape))
146
+ elif typestr in ("timedelta", "td", "m8[ns]"):
147
+ values = (mat * 1).astype("m8[ns]")
148
+ elif typestr in ("category",):
149
+ values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
150
+ elif typestr in ("category2",):
151
+ values = Categorical(["a", "a", "a", "a", "b", "b", "c", "c", "c", "d"])
152
+ elif typestr in ("sparse", "sparse_na"):
153
+ if shape[-1] != 10:
154
+ # We also are implicitly assuming this in the category cases above
155
+ raise NotImplementedError
156
+
157
+ assert all(s == 1 for s in shape[:-1])
158
+ if typestr.endswith("_na"):
159
+ fill_value = np.nan
160
+ else:
161
+ fill_value = 0.0
162
+ values = SparseArray(
163
+ [fill_value, fill_value, 1, 2, 3, fill_value, 4, 5, fill_value, 6],
164
+ fill_value=fill_value,
165
+ )
166
+ arr = values.sp_values.view()
167
+ arr += num_offset - 1
168
+ else:
169
+ raise ValueError(f'Unsupported typestr: "{typestr}"')
170
+
171
+ return maker(values, placement=placement, ndim=len(shape))
172
+
173
+
174
+ def create_single_mgr(typestr, num_rows=None):
175
+ if num_rows is None:
176
+ num_rows = N
177
+
178
+ return SingleBlockManager(
179
+ create_block(typestr, placement=slice(0, num_rows), item_shape=()),
180
+ Index(np.arange(num_rows)),
181
+ )
182
+
183
+
184
+ def create_mgr(descr, item_shape=None):
185
+ """
186
+ Construct BlockManager from string description.
187
+
188
+ String description syntax looks similar to np.matrix initializer. It looks
189
+ like this::
190
+
191
+ a,b,c: f8; d,e,f: i8
192
+
193
+ Rules are rather simple:
194
+
195
+ * see list of supported datatypes in `create_block` method
196
+ * components are semicolon-separated
197
+ * each component is `NAME,NAME,NAME: DTYPE_ID`
198
+ * whitespace around colons & semicolons are removed
199
+ * components with same DTYPE_ID are combined into single block
200
+ * to force multiple blocks with same dtype, use '-SUFFIX'::
201
+
202
+ 'a:f8-1; b:f8-2; c:f8-foobar'
203
+
204
+ """
205
+ if item_shape is None:
206
+ item_shape = (N,)
207
+
208
+ offset = 0
209
+ mgr_items = []
210
+ block_placements = {}
211
+ for d in descr.split(";"):
212
+ d = d.strip()
213
+ if not len(d):
214
+ continue
215
+ names, blockstr = d.partition(":")[::2]
216
+ blockstr = blockstr.strip()
217
+ names = names.strip().split(",")
218
+
219
+ mgr_items.extend(names)
220
+ placement = list(np.arange(len(names)) + offset)
221
+ try:
222
+ block_placements[blockstr].extend(placement)
223
+ except KeyError:
224
+ block_placements[blockstr] = placement
225
+ offset += len(names)
226
+
227
+ mgr_items = Index(mgr_items)
228
+
229
+ blocks = []
230
+ num_offset = 0
231
+ for blockstr, placement in block_placements.items():
232
+ typestr = blockstr.split("-")[0]
233
+ blocks.append(
234
+ create_block(
235
+ typestr, placement, item_shape=item_shape, num_offset=num_offset
236
+ )
237
+ )
238
+ num_offset += len(placement)
239
+
240
+ sblocks = sorted(blocks, key=lambda b: b.mgr_locs[0])
241
+ return BlockManager(
242
+ tuple(sblocks),
243
+ [mgr_items] + [Index(np.arange(n)) for n in item_shape],
244
+ )
245
+
246
+
247
+ @pytest.fixture
248
+ def fblock():
249
+ return create_block("float", [0, 2, 4])
250
+
251
+
252
+ class TestBlock:
253
+ def test_constructor(self):
254
+ int32block = create_block("i4", [0])
255
+ assert int32block.dtype == np.int32
256
+
257
+ @pytest.mark.parametrize(
258
+ "typ, data",
259
+ [
260
+ ["float", [0, 2, 4]],
261
+ ["complex", [7]],
262
+ ["object", [1, 3]],
263
+ ["bool", [5]],
264
+ ],
265
+ )
266
+ def test_pickle(self, typ, data):
267
+ blk = create_block(typ, data)
268
+ assert_block_equal(tm.round_trip_pickle(blk), blk)
269
+
270
+ def test_mgr_locs(self, fblock):
271
+ assert isinstance(fblock.mgr_locs, BlockPlacement)
272
+ tm.assert_numpy_array_equal(
273
+ fblock.mgr_locs.as_array, np.array([0, 2, 4], dtype=np.intp)
274
+ )
275
+
276
+ def test_attrs(self, fblock):
277
+ assert fblock.shape == fblock.values.shape
278
+ assert fblock.dtype == fblock.values.dtype
279
+ assert len(fblock) == len(fblock.values)
280
+
281
+ def test_copy(self, fblock):
282
+ cop = fblock.copy()
283
+ assert cop is not fblock
284
+ assert_block_equal(fblock, cop)
285
+
286
+ def test_delete(self, fblock):
287
+ newb = fblock.copy()
288
+ locs = newb.mgr_locs
289
+ nb = newb.delete(0)[0]
290
+ assert newb.mgr_locs is locs
291
+
292
+ assert nb is not newb
293
+
294
+ tm.assert_numpy_array_equal(
295
+ nb.mgr_locs.as_array, np.array([2, 4], dtype=np.intp)
296
+ )
297
+ assert not (newb.values[0] == 1).all()
298
+ assert (nb.values[0] == 1).all()
299
+
300
+ newb = fblock.copy()
301
+ locs = newb.mgr_locs
302
+ nb = newb.delete(1)
303
+ assert len(nb) == 2
304
+ assert newb.mgr_locs is locs
305
+
306
+ tm.assert_numpy_array_equal(
307
+ nb[0].mgr_locs.as_array, np.array([0], dtype=np.intp)
308
+ )
309
+ tm.assert_numpy_array_equal(
310
+ nb[1].mgr_locs.as_array, np.array([4], dtype=np.intp)
311
+ )
312
+ assert not (newb.values[1] == 2).all()
313
+ assert (nb[1].values[0] == 2).all()
314
+
315
+ newb = fblock.copy()
316
+ nb = newb.delete(2)
317
+ assert len(nb) == 1
318
+ tm.assert_numpy_array_equal(
319
+ nb[0].mgr_locs.as_array, np.array([0, 2], dtype=np.intp)
320
+ )
321
+ assert (nb[0].values[1] == 1).all()
322
+
323
+ newb = fblock.copy()
324
+
325
+ with pytest.raises(IndexError, match=None):
326
+ newb.delete(3)
327
+
328
+ def test_delete_datetimelike(self):
329
+ # dont use np.delete on values, as that will coerce from DTA/TDA to ndarray
330
+ arr = np.arange(20, dtype="i8").reshape(5, 4).view("m8[ns]")
331
+ df = DataFrame(arr)
332
+ blk = df._mgr.blocks[0]
333
+ assert isinstance(blk.values, TimedeltaArray)
334
+
335
+ nb = blk.delete(1)
336
+ assert len(nb) == 2
337
+ assert isinstance(nb[0].values, TimedeltaArray)
338
+ assert isinstance(nb[1].values, TimedeltaArray)
339
+
340
+ df = DataFrame(arr.view("M8[ns]"))
341
+ blk = df._mgr.blocks[0]
342
+ assert isinstance(blk.values, DatetimeArray)
343
+
344
+ nb = blk.delete([1, 3])
345
+ assert len(nb) == 2
346
+ assert isinstance(nb[0].values, DatetimeArray)
347
+ assert isinstance(nb[1].values, DatetimeArray)
348
+
349
+ def test_split(self):
350
+ # GH#37799
351
+ values = np.random.randn(3, 4)
352
+ blk = new_block(values, placement=[3, 1, 6], ndim=2)
353
+ result = blk._split()
354
+
355
+ # check that we get views, not copies
356
+ values[:] = -9999
357
+ assert (blk.values == -9999).all()
358
+
359
+ assert len(result) == 3
360
+ expected = [
361
+ new_block(values[[0]], placement=[3], ndim=2),
362
+ new_block(values[[1]], placement=[1], ndim=2),
363
+ new_block(values[[2]], placement=[6], ndim=2),
364
+ ]
365
+ for res, exp in zip(result, expected):
366
+ assert_block_equal(res, exp)
367
+
368
+
369
+ class TestBlockManager:
370
+ def test_attrs(self):
371
+ mgr = create_mgr("a,b,c: f8-1; d,e,f: f8-2")
372
+ assert mgr.nblocks == 2
373
+ assert len(mgr) == 6
374
+
375
+ def test_duplicate_ref_loc_failure(self):
376
+ tmp_mgr = create_mgr("a:bool; a: f8")
377
+
378
+ axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
379
+
380
+ blocks[0].mgr_locs = BlockPlacement(np.array([0]))
381
+ blocks[1].mgr_locs = BlockPlacement(np.array([0]))
382
+
383
+ # test trying to create block manager with overlapping ref locs
384
+
385
+ msg = "Gaps in blk ref_locs"
386
+
387
+ with pytest.raises(AssertionError, match=msg):
388
+ mgr = BlockManager(blocks, axes)
389
+ mgr._rebuild_blknos_and_blklocs()
390
+
391
+ blocks[0].mgr_locs = BlockPlacement(np.array([0]))
392
+ blocks[1].mgr_locs = BlockPlacement(np.array([1]))
393
+ mgr = BlockManager(blocks, axes)
394
+ mgr.iget(1)
395
+
396
+ def test_pickle(self, mgr):
397
+ mgr2 = tm.round_trip_pickle(mgr)
398
+ tm.assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
399
+
400
+ # GH2431
401
+ assert hasattr(mgr2, "_is_consolidated")
402
+ assert hasattr(mgr2, "_known_consolidated")
403
+
404
+ # reset to False on load
405
+ assert not mgr2._is_consolidated
406
+ assert not mgr2._known_consolidated
407
+
408
+ @pytest.mark.parametrize("mgr_string", ["a,a,a:f8", "a: f8; a: i8"])
409
+ def test_non_unique_pickle(self, mgr_string):
410
+ mgr = create_mgr(mgr_string)
411
+ mgr2 = tm.round_trip_pickle(mgr)
412
+ tm.assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
413
+
414
+ def test_categorical_block_pickle(self):
415
+ mgr = create_mgr("a: category")
416
+ mgr2 = tm.round_trip_pickle(mgr)
417
+ tm.assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
418
+
419
+ smgr = create_single_mgr("category")
420
+ smgr2 = tm.round_trip_pickle(smgr)
421
+ tm.assert_series_equal(Series(smgr), Series(smgr2))
422
+
423
+ def test_iget(self):
424
+ cols = Index(list("abc"))
425
+ values = np.random.rand(3, 3)
426
+ block = new_block(
427
+ values=values.copy(),
428
+ placement=np.arange(3, dtype=np.intp),
429
+ ndim=values.ndim,
430
+ )
431
+ mgr = BlockManager(blocks=(block,), axes=[cols, Index(np.arange(3))])
432
+
433
+ tm.assert_almost_equal(mgr.iget(0).internal_values(), values[0])
434
+ tm.assert_almost_equal(mgr.iget(1).internal_values(), values[1])
435
+ tm.assert_almost_equal(mgr.iget(2).internal_values(), values[2])
436
+
437
+ def test_set(self):
438
+ mgr = create_mgr("a,b,c: int", item_shape=(3,))
439
+
440
+ mgr.insert(len(mgr.items), "d", np.array(["foo"] * 3))
441
+ mgr.iset(1, np.array(["bar"] * 3))
442
+ tm.assert_numpy_array_equal(mgr.iget(0).internal_values(), np.array([0] * 3))
443
+ tm.assert_numpy_array_equal(
444
+ mgr.iget(1).internal_values(), np.array(["bar"] * 3, dtype=np.object_)
445
+ )
446
+ tm.assert_numpy_array_equal(mgr.iget(2).internal_values(), np.array([2] * 3))
447
+ tm.assert_numpy_array_equal(
448
+ mgr.iget(3).internal_values(), np.array(["foo"] * 3, dtype=np.object_)
449
+ )
450
+
451
+ def test_set_change_dtype(self, mgr):
452
+ mgr.insert(len(mgr.items), "baz", np.zeros(N, dtype=bool))
453
+
454
+ mgr.iset(mgr.items.get_loc("baz"), np.repeat("foo", N))
455
+ idx = mgr.items.get_loc("baz")
456
+ assert mgr.iget(idx).dtype == np.object_
457
+
458
+ mgr2 = mgr.consolidate()
459
+ mgr2.iset(mgr2.items.get_loc("baz"), np.repeat("foo", N))
460
+ idx = mgr2.items.get_loc("baz")
461
+ assert mgr2.iget(idx).dtype == np.object_
462
+
463
+ mgr2.insert(len(mgr2.items), "quux", np.random.randn(N).astype(int))
464
+ idx = mgr2.items.get_loc("quux")
465
+ assert mgr2.iget(idx).dtype == np.int_
466
+
467
+ mgr2.iset(mgr2.items.get_loc("quux"), np.random.randn(N))
468
+ assert mgr2.iget(idx).dtype == np.float_
469
+
470
+ def test_copy(self, mgr):
471
+ cp = mgr.copy(deep=False)
472
+ for blk, cp_blk in zip(mgr.blocks, cp.blocks):
473
+ # view assertion
474
+ tm.assert_equal(cp_blk.values, blk.values)
475
+ if isinstance(blk.values, np.ndarray):
476
+ assert cp_blk.values.base is blk.values.base
477
+ else:
478
+ # DatetimeTZBlock has DatetimeIndex values
479
+ assert cp_blk.values._ndarray.base is blk.values._ndarray.base
480
+
481
+ # copy(deep=True) consolidates, so the block-wise assertions will
482
+ # fail is mgr is not consolidated
483
+ mgr._consolidate_inplace()
484
+ cp = mgr.copy(deep=True)
485
+ for blk, cp_blk in zip(mgr.blocks, cp.blocks):
486
+ bvals = blk.values
487
+ cpvals = cp_blk.values
488
+
489
+ tm.assert_equal(cpvals, bvals)
490
+
491
+ if isinstance(cpvals, np.ndarray):
492
+ lbase = cpvals.base
493
+ rbase = bvals.base
494
+ else:
495
+ lbase = cpvals._ndarray.base
496
+ rbase = bvals._ndarray.base
497
+
498
+ # copy assertion we either have a None for a base or in case of
499
+ # some blocks it is an array (e.g. datetimetz), but was copied
500
+ if isinstance(cpvals, DatetimeArray):
501
+ assert (lbase is None and rbase is None) or (lbase is not rbase)
502
+ elif not isinstance(cpvals, np.ndarray):
503
+ assert lbase is not rbase
504
+ else:
505
+ assert lbase is None and rbase is None
506
+
507
+ def test_sparse(self):
508
+ mgr = create_mgr("a: sparse-1; b: sparse-2")
509
+ assert mgr.as_array().dtype == np.float64
510
+
511
+ def test_sparse_mixed(self):
512
+ mgr = create_mgr("a: sparse-1; b: sparse-2; c: f8")
513
+ assert len(mgr.blocks) == 3
514
+ assert isinstance(mgr, BlockManager)
515
+
516
+ @pytest.mark.parametrize(
517
+ "mgr_string, dtype",
518
+ [("c: f4; d: f2", np.float32), ("c: f4; d: f2; e: f8", np.float64)],
519
+ )
520
+ def test_as_array_float(self, mgr_string, dtype):
521
+ mgr = create_mgr(mgr_string)
522
+ assert mgr.as_array().dtype == dtype
523
+
524
+ @pytest.mark.parametrize(
525
+ "mgr_string, dtype",
526
+ [
527
+ ("a: bool-1; b: bool-2", np.bool_),
528
+ ("a: i8-1; b: i8-2; c: i4; d: i2; e: u1", np.int64),
529
+ ("c: i4; d: i2; e: u1", np.int32),
530
+ ],
531
+ )
532
+ def test_as_array_int_bool(self, mgr_string, dtype):
533
+ mgr = create_mgr(mgr_string)
534
+ assert mgr.as_array().dtype == dtype
535
+
536
+ def test_as_array_datetime(self):
537
+ mgr = create_mgr("h: datetime-1; g: datetime-2")
538
+ assert mgr.as_array().dtype == "M8[ns]"
539
+
540
+ def test_as_array_datetime_tz(self):
541
+ mgr = create_mgr("h: M8[ns, US/Eastern]; g: M8[ns, CET]")
542
+ assert mgr.iget(0).dtype == "datetime64[ns, US/Eastern]"
543
+ assert mgr.iget(1).dtype == "datetime64[ns, CET]"
544
+ assert mgr.as_array().dtype == "object"
545
+
546
+ @pytest.mark.parametrize("t", ["float16", "float32", "float64", "int32", "int64"])
547
+ def test_astype(self, t):
548
+ # coerce all
549
+ mgr = create_mgr("c: f4; d: f2; e: f8")
550
+
551
+ t = np.dtype(t)
552
+ tmgr = mgr.astype(t)
553
+ assert tmgr.iget(0).dtype.type == t
554
+ assert tmgr.iget(1).dtype.type == t
555
+ assert tmgr.iget(2).dtype.type == t
556
+
557
+ # mixed
558
+ mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8")
559
+
560
+ t = np.dtype(t)
561
+ tmgr = mgr.astype(t, errors="ignore")
562
+ assert tmgr.iget(2).dtype.type == t
563
+ assert tmgr.iget(4).dtype.type == t
564
+ assert tmgr.iget(5).dtype.type == t
565
+ assert tmgr.iget(6).dtype.type == t
566
+
567
+ assert tmgr.iget(0).dtype.type == np.object_
568
+ assert tmgr.iget(1).dtype.type == np.object_
569
+ if t != np.int64:
570
+ assert tmgr.iget(3).dtype.type == np.datetime64
571
+ else:
572
+ assert tmgr.iget(3).dtype.type == t
573
+
574
+ def test_convert(self):
575
+ def _compare(old_mgr, new_mgr):
576
+ """compare the blocks, numeric compare ==, object don't"""
577
+ old_blocks = set(old_mgr.blocks)
578
+ new_blocks = set(new_mgr.blocks)
579
+ assert len(old_blocks) == len(new_blocks)
580
+
581
+ # compare non-numeric
582
+ for b in old_blocks:
583
+ found = False
584
+ for nb in new_blocks:
585
+ if (b.values == nb.values).all():
586
+ found = True
587
+ break
588
+ assert found
589
+
590
+ for b in new_blocks:
591
+ found = False
592
+ for ob in old_blocks:
593
+ if (b.values == ob.values).all():
594
+ found = True
595
+ break
596
+ assert found
597
+
598
+ # noops
599
+ mgr = create_mgr("f: i8; g: f8")
600
+ new_mgr = mgr.convert(copy=True)
601
+ _compare(mgr, new_mgr)
602
+
603
+ # convert
604
+ mgr = create_mgr("a,b,foo: object; f: i8; g: f8")
605
+ mgr.iset(0, np.array(["1"] * N, dtype=np.object_))
606
+ mgr.iset(1, np.array(["2."] * N, dtype=np.object_))
607
+ mgr.iset(2, np.array(["foo."] * N, dtype=np.object_))
608
+ new_mgr = mgr.convert(copy=True)
609
+ assert new_mgr.iget(0).dtype == np.object_
610
+ assert new_mgr.iget(1).dtype == np.object_
611
+ assert new_mgr.iget(2).dtype == np.object_
612
+ assert new_mgr.iget(3).dtype == np.int64
613
+ assert new_mgr.iget(4).dtype == np.float64
614
+
615
+ mgr = create_mgr(
616
+ "a,b,foo: object; f: i4; bool: bool; dt: datetime; i: i8; g: f8; h: f2"
617
+ )
618
+ mgr.iset(0, np.array(["1"] * N, dtype=np.object_))
619
+ mgr.iset(1, np.array(["2."] * N, dtype=np.object_))
620
+ mgr.iset(2, np.array(["foo."] * N, dtype=np.object_))
621
+ new_mgr = mgr.convert(copy=True)
622
+ assert new_mgr.iget(0).dtype == np.object_
623
+ assert new_mgr.iget(1).dtype == np.object_
624
+ assert new_mgr.iget(2).dtype == np.object_
625
+ assert new_mgr.iget(3).dtype == np.int32
626
+ assert new_mgr.iget(4).dtype == np.bool_
627
+ assert new_mgr.iget(5).dtype.type, np.datetime64
628
+ assert new_mgr.iget(6).dtype == np.int64
629
+ assert new_mgr.iget(7).dtype == np.float64
630
+ assert new_mgr.iget(8).dtype == np.float16
631
+
632
+ def test_invalid_ea_block(self):
633
+ with pytest.raises(ValueError, match="need to split"):
634
+ create_mgr("a: category; b: category")
635
+
636
+ with pytest.raises(ValueError, match="need to split"):
637
+ create_mgr("a: category2; b: category2")
638
+
639
+ def test_interleave(self):
640
+ # self
641
+ for dtype in ["f8", "i8", "object", "bool", "complex", "M8[ns]", "m8[ns]"]:
642
+ mgr = create_mgr(f"a: {dtype}")
643
+ assert mgr.as_array().dtype == dtype
644
+ mgr = create_mgr(f"a: {dtype}; b: {dtype}")
645
+ assert mgr.as_array().dtype == dtype
646
+
647
+ @pytest.mark.parametrize(
648
+ "mgr_string, dtype",
649
+ [
650
+ ("a: category", "i8"),
651
+ ("a: category; b: category", "i8"),
652
+ ("a: category; b: category2", "object"),
653
+ ("a: category2", "object"),
654
+ ("a: category2; b: category2", "object"),
655
+ ("a: f8", "f8"),
656
+ ("a: f8; b: i8", "f8"),
657
+ ("a: f4; b: i8", "f8"),
658
+ ("a: f4; b: i8; d: object", "object"),
659
+ ("a: bool; b: i8", "object"),
660
+ ("a: complex", "complex"),
661
+ ("a: f8; b: category", "object"),
662
+ ("a: M8[ns]; b: category", "object"),
663
+ ("a: M8[ns]; b: bool", "object"),
664
+ ("a: M8[ns]; b: i8", "object"),
665
+ ("a: m8[ns]; b: bool", "object"),
666
+ ("a: m8[ns]; b: i8", "object"),
667
+ ("a: M8[ns]; b: m8[ns]", "object"),
668
+ ],
669
+ )
670
+ def test_interleave_dtype(self, mgr_string, dtype):
671
+ # will be converted according the actual dtype of the underlying
672
+ mgr = create_mgr("a: category")
673
+ assert mgr.as_array().dtype == "i8"
674
+ mgr = create_mgr("a: category; b: category2")
675
+ assert mgr.as_array().dtype == "object"
676
+ mgr = create_mgr("a: category2")
677
+ assert mgr.as_array().dtype == "object"
678
+
679
+ # combinations
680
+ mgr = create_mgr("a: f8")
681
+ assert mgr.as_array().dtype == "f8"
682
+ mgr = create_mgr("a: f8; b: i8")
683
+ assert mgr.as_array().dtype == "f8"
684
+ mgr = create_mgr("a: f4; b: i8")
685
+ assert mgr.as_array().dtype == "f8"
686
+ mgr = create_mgr("a: f4; b: i8; d: object")
687
+ assert mgr.as_array().dtype == "object"
688
+ mgr = create_mgr("a: bool; b: i8")
689
+ assert mgr.as_array().dtype == "object"
690
+ mgr = create_mgr("a: complex")
691
+ assert mgr.as_array().dtype == "complex"
692
+ mgr = create_mgr("a: f8; b: category")
693
+ assert mgr.as_array().dtype == "f8"
694
+ mgr = create_mgr("a: M8[ns]; b: category")
695
+ assert mgr.as_array().dtype == "object"
696
+ mgr = create_mgr("a: M8[ns]; b: bool")
697
+ assert mgr.as_array().dtype == "object"
698
+ mgr = create_mgr("a: M8[ns]; b: i8")
699
+ assert mgr.as_array().dtype == "object"
700
+ mgr = create_mgr("a: m8[ns]; b: bool")
701
+ assert mgr.as_array().dtype == "object"
702
+ mgr = create_mgr("a: m8[ns]; b: i8")
703
+ assert mgr.as_array().dtype == "object"
704
+ mgr = create_mgr("a: M8[ns]; b: m8[ns]")
705
+ assert mgr.as_array().dtype == "object"
706
+
707
+ def test_consolidate_ordering_issues(self, mgr):
708
+ mgr.iset(mgr.items.get_loc("f"), np.random.randn(N))
709
+ mgr.iset(mgr.items.get_loc("d"), np.random.randn(N))
710
+ mgr.iset(mgr.items.get_loc("b"), np.random.randn(N))
711
+ mgr.iset(mgr.items.get_loc("g"), np.random.randn(N))
712
+ mgr.iset(mgr.items.get_loc("h"), np.random.randn(N))
713
+
714
+ # we have datetime/tz blocks in mgr
715
+ cons = mgr.consolidate()
716
+ assert cons.nblocks == 4
717
+ cons = mgr.consolidate().get_numeric_data()
718
+ assert cons.nblocks == 1
719
+ assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement)
720
+ tm.assert_numpy_array_equal(
721
+ cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.intp)
722
+ )
723
+
724
+ def test_reindex_items(self):
725
+ # mgr is not consolidated, f8 & f8-2 blocks
726
+ mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2")
727
+
728
+ reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0)
729
+ # reindex_axis does not consolidate_inplace, as that risks failing to
730
+ # invalidate _item_cache
731
+ assert not reindexed.is_consolidated()
732
+
733
+ tm.assert_index_equal(reindexed.items, Index(["g", "c", "a", "d"]))
734
+ tm.assert_almost_equal(
735
+ mgr.iget(6).internal_values(), reindexed.iget(0).internal_values()
736
+ )
737
+ tm.assert_almost_equal(
738
+ mgr.iget(2).internal_values(), reindexed.iget(1).internal_values()
739
+ )
740
+ tm.assert_almost_equal(
741
+ mgr.iget(0).internal_values(), reindexed.iget(2).internal_values()
742
+ )
743
+ tm.assert_almost_equal(
744
+ mgr.iget(3).internal_values(), reindexed.iget(3).internal_values()
745
+ )
746
+
747
+ def test_get_numeric_data(self, using_copy_on_write):
748
+ mgr = create_mgr(
749
+ "int: int; float: float; complex: complex;"
750
+ "str: object; bool: bool; obj: object; dt: datetime",
751
+ item_shape=(3,),
752
+ )
753
+ mgr.iset(5, np.array([1, 2, 3], dtype=np.object_))
754
+
755
+ numeric = mgr.get_numeric_data()
756
+ tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"]))
757
+ tm.assert_almost_equal(
758
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
759
+ numeric.iget(numeric.items.get_loc("float")).internal_values(),
760
+ )
761
+
762
+ # Check sharing
763
+ numeric.iset(
764
+ numeric.items.get_loc("float"),
765
+ np.array([100.0, 200.0, 300.0]),
766
+ inplace=True,
767
+ )
768
+ if using_copy_on_write:
769
+ tm.assert_almost_equal(
770
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
771
+ np.array([1.0, 1.0, 1.0]),
772
+ )
773
+ else:
774
+ tm.assert_almost_equal(
775
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
776
+ np.array([100.0, 200.0, 300.0]),
777
+ )
778
+
779
+ numeric2 = mgr.get_numeric_data(copy=True)
780
+ tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"]))
781
+ numeric2.iset(
782
+ numeric2.items.get_loc("float"),
783
+ np.array([1000.0, 2000.0, 3000.0]),
784
+ inplace=True,
785
+ )
786
+ if using_copy_on_write:
787
+ tm.assert_almost_equal(
788
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
789
+ np.array([1.0, 1.0, 1.0]),
790
+ )
791
+ else:
792
+ tm.assert_almost_equal(
793
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
794
+ np.array([100.0, 200.0, 300.0]),
795
+ )
796
+
797
+ def test_get_bool_data(self, using_copy_on_write):
798
+ mgr = create_mgr(
799
+ "int: int; float: float; complex: complex;"
800
+ "str: object; bool: bool; obj: object; dt: datetime",
801
+ item_shape=(3,),
802
+ )
803
+ mgr.iset(6, np.array([True, False, True], dtype=np.object_))
804
+
805
+ bools = mgr.get_bool_data()
806
+ tm.assert_index_equal(bools.items, Index(["bool"]))
807
+ tm.assert_almost_equal(
808
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
809
+ bools.iget(bools.items.get_loc("bool")).internal_values(),
810
+ )
811
+
812
+ bools.iset(0, np.array([True, False, True]), inplace=True)
813
+ if using_copy_on_write:
814
+ tm.assert_numpy_array_equal(
815
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
816
+ np.array([True, True, True]),
817
+ )
818
+ else:
819
+ tm.assert_numpy_array_equal(
820
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
821
+ np.array([True, False, True]),
822
+ )
823
+
824
+ # Check sharing
825
+ bools2 = mgr.get_bool_data(copy=True)
826
+ bools2.iset(0, np.array([False, True, False]))
827
+ if using_copy_on_write:
828
+ tm.assert_numpy_array_equal(
829
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
830
+ np.array([True, True, True]),
831
+ )
832
+ else:
833
+ tm.assert_numpy_array_equal(
834
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
835
+ np.array([True, False, True]),
836
+ )
837
+
838
+ def test_unicode_repr_doesnt_raise(self):
839
+ repr(create_mgr("b,\u05d0: object"))
840
+
841
+ @pytest.mark.parametrize(
842
+ "mgr_string", ["a,b,c: i8-1; d,e,f: i8-2", "a,a,a: i8-1; b,b,b: i8-2"]
843
+ )
844
+ def test_equals(self, mgr_string):
845
+ # unique items
846
+ bm1 = create_mgr(mgr_string)
847
+ bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
848
+ assert bm1.equals(bm2)
849
+
850
+ @pytest.mark.parametrize(
851
+ "mgr_string",
852
+ [
853
+ "a:i8;b:f8", # basic case
854
+ "a:i8;b:f8;c:c8;d:b", # many types
855
+ "a:i8;e:dt;f:td;g:string", # more types
856
+ "a:i8;b:category;c:category2", # categories
857
+ "c:sparse;d:sparse_na;b:f8", # sparse
858
+ ],
859
+ )
860
+ def test_equals_block_order_different_dtypes(self, mgr_string):
861
+ # GH 9330
862
+ bm = create_mgr(mgr_string)
863
+ block_perms = itertools.permutations(bm.blocks)
864
+ for bm_perm in block_perms:
865
+ bm_this = BlockManager(tuple(bm_perm), bm.axes)
866
+ assert bm.equals(bm_this)
867
+ assert bm_this.equals(bm)
868
+
869
+ def test_single_mgr_ctor(self):
870
+ mgr = create_single_mgr("f8", num_rows=5)
871
+ assert mgr.external_values().tolist() == [0.0, 1.0, 2.0, 3.0, 4.0]
872
+
873
+ @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
874
+ def test_validate_bool_args(self, value):
875
+ bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2")
876
+
877
+ msg = (
878
+ 'For argument "inplace" expected type bool, '
879
+ f"received type {type(value).__name__}."
880
+ )
881
+ with pytest.raises(ValueError, match=msg):
882
+ bm1.replace_list([1], [2], inplace=value)
883
+
884
+ def test_iset_split_block(self):
885
+ bm = create_mgr("a,b,c: i8; d: f8")
886
+ bm._iset_split_block(0, np.array([0]))
887
+ tm.assert_numpy_array_equal(
888
+ bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32")
889
+ )
890
+ # First indexer currently does not have a block associated with it in case
891
+ tm.assert_numpy_array_equal(
892
+ bm.blknos, np.array([0, 0, 0, 1], dtype="int64" if IS64 else "int32")
893
+ )
894
+ assert len(bm.blocks) == 2
895
+
896
+ def test_iset_split_block_values(self):
897
+ bm = create_mgr("a,b,c: i8; d: f8")
898
+ bm._iset_split_block(0, np.array([0]), np.array([list(range(10))]))
899
+ tm.assert_numpy_array_equal(
900
+ bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32")
901
+ )
902
+ # First indexer currently does not have a block associated with it in case
903
+ tm.assert_numpy_array_equal(
904
+ bm.blknos, np.array([0, 2, 2, 1], dtype="int64" if IS64 else "int32")
905
+ )
906
+ assert len(bm.blocks) == 3
907
+
908
+
909
+ def _as_array(mgr):
910
+ if mgr.ndim == 1:
911
+ return mgr.external_values()
912
+ return mgr.as_array().T
913
+
914
+
915
+ class TestIndexing:
916
+ # Nosetests-style data-driven tests.
917
+ #
918
+ # This test applies different indexing routines to block managers and
919
+ # compares the outcome to the result of same operations on np.ndarray.
920
+ #
921
+ # NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
922
+ # and are disabled.
923
+
924
+ MANAGERS = [
925
+ create_single_mgr("f8", N),
926
+ create_single_mgr("i8", N),
927
+ # 2-dim
928
+ create_mgr("a,b,c,d,e,f: f8", item_shape=(N,)),
929
+ create_mgr("a,b,c,d,e,f: i8", item_shape=(N,)),
930
+ create_mgr("a,b: f8; c,d: i8; e,f: string", item_shape=(N,)),
931
+ create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N,)),
932
+ ]
933
+
934
+ @pytest.mark.parametrize("mgr", MANAGERS)
935
+ def test_get_slice(self, mgr):
936
+ def assert_slice_ok(mgr, axis, slobj):
937
+ mat = _as_array(mgr)
938
+
939
+ # we maybe using an ndarray to test slicing and
940
+ # might not be the full length of the axis
941
+ if isinstance(slobj, np.ndarray):
942
+ ax = mgr.axes[axis]
943
+ if len(ax) and len(slobj) and len(slobj) != len(ax):
944
+ slobj = np.concatenate(
945
+ [slobj, np.zeros(len(ax) - len(slobj), dtype=bool)]
946
+ )
947
+
948
+ if isinstance(slobj, slice):
949
+ sliced = mgr.get_slice(slobj, axis=axis)
950
+ elif mgr.ndim == 1 and axis == 0:
951
+ sliced = mgr.getitem_mgr(slobj)
952
+ else:
953
+ # BlockManager doesn't support non-slice, SingleBlockManager
954
+ # doesn't support axis > 0
955
+ return
956
+
957
+ mat_slobj = (slice(None),) * axis + (slobj,)
958
+ tm.assert_numpy_array_equal(
959
+ mat[mat_slobj], _as_array(sliced), check_dtype=False
960
+ )
961
+ tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
962
+
963
+ assert mgr.ndim <= 2, mgr.ndim
964
+ for ax in range(mgr.ndim):
965
+ # slice
966
+ assert_slice_ok(mgr, ax, slice(None))
967
+ assert_slice_ok(mgr, ax, slice(3))
968
+ assert_slice_ok(mgr, ax, slice(100))
969
+ assert_slice_ok(mgr, ax, slice(1, 4))
970
+ assert_slice_ok(mgr, ax, slice(3, 0, -2))
971
+
972
+ if mgr.ndim < 2:
973
+ # 2D only support slice objects
974
+
975
+ # boolean mask
976
+ assert_slice_ok(mgr, ax, np.array([], dtype=np.bool_))
977
+ assert_slice_ok(mgr, ax, np.ones(mgr.shape[ax], dtype=np.bool_))
978
+ assert_slice_ok(mgr, ax, np.zeros(mgr.shape[ax], dtype=np.bool_))
979
+
980
+ if mgr.shape[ax] >= 3:
981
+ assert_slice_ok(mgr, ax, np.arange(mgr.shape[ax]) % 3 == 0)
982
+ assert_slice_ok(
983
+ mgr, ax, np.array([True, True, False], dtype=np.bool_)
984
+ )
985
+
986
+ # fancy indexer
987
+ assert_slice_ok(mgr, ax, [])
988
+ assert_slice_ok(mgr, ax, list(range(mgr.shape[ax])))
989
+
990
+ if mgr.shape[ax] >= 3:
991
+ assert_slice_ok(mgr, ax, [0, 1, 2])
992
+ assert_slice_ok(mgr, ax, [-1, -2, -3])
993
+
994
+ @pytest.mark.parametrize("mgr", MANAGERS)
995
+ def test_take(self, mgr):
996
+ def assert_take_ok(mgr, axis, indexer):
997
+ mat = _as_array(mgr)
998
+ taken = mgr.take(indexer, axis)
999
+ tm.assert_numpy_array_equal(
1000
+ np.take(mat, indexer, axis), _as_array(taken), check_dtype=False
1001
+ )
1002
+ tm.assert_index_equal(mgr.axes[axis].take(indexer), taken.axes[axis])
1003
+
1004
+ for ax in range(mgr.ndim):
1005
+ # take/fancy indexer
1006
+ assert_take_ok(mgr, ax, indexer=[])
1007
+ assert_take_ok(mgr, ax, indexer=[0, 0, 0])
1008
+ assert_take_ok(mgr, ax, indexer=list(range(mgr.shape[ax])))
1009
+
1010
+ if mgr.shape[ax] >= 3:
1011
+ assert_take_ok(mgr, ax, indexer=[0, 1, 2])
1012
+ assert_take_ok(mgr, ax, indexer=[-1, -2, -3])
1013
+
1014
+ @pytest.mark.parametrize("mgr", MANAGERS)
1015
+ @pytest.mark.parametrize("fill_value", [None, np.nan, 100.0])
1016
+ def test_reindex_axis(self, fill_value, mgr):
1017
+ def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
1018
+ mat = _as_array(mgr)
1019
+ indexer = mgr.axes[axis].get_indexer_for(new_labels)
1020
+
1021
+ reindexed = mgr.reindex_axis(new_labels, axis, fill_value=fill_value)
1022
+ tm.assert_numpy_array_equal(
1023
+ algos.take_nd(mat, indexer, axis, fill_value=fill_value),
1024
+ _as_array(reindexed),
1025
+ check_dtype=False,
1026
+ )
1027
+ tm.assert_index_equal(reindexed.axes[axis], new_labels)
1028
+
1029
+ for ax in range(mgr.ndim):
1030
+ assert_reindex_axis_is_ok(mgr, ax, Index([]), fill_value)
1031
+ assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax], fill_value)
1032
+ assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][[0, 0, 0]], fill_value)
1033
+ assert_reindex_axis_is_ok(mgr, ax, Index(["foo", "bar", "baz"]), fill_value)
1034
+ assert_reindex_axis_is_ok(
1035
+ mgr, ax, Index(["foo", mgr.axes[ax][0], "baz"]), fill_value
1036
+ )
1037
+
1038
+ if mgr.shape[ax] >= 3:
1039
+ assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][:-3], fill_value)
1040
+ assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][-3::-1], fill_value)
1041
+ assert_reindex_axis_is_ok(
1042
+ mgr, ax, mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value
1043
+ )
1044
+
1045
+ @pytest.mark.parametrize("mgr", MANAGERS)
1046
+ @pytest.mark.parametrize("fill_value", [None, np.nan, 100.0])
1047
+ def test_reindex_indexer(self, fill_value, mgr):
1048
+ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value):
1049
+ mat = _as_array(mgr)
1050
+ reindexed_mat = algos.take_nd(mat, indexer, axis, fill_value=fill_value)
1051
+ reindexed = mgr.reindex_indexer(
1052
+ new_labels, indexer, axis, fill_value=fill_value
1053
+ )
1054
+ tm.assert_numpy_array_equal(
1055
+ reindexed_mat, _as_array(reindexed), check_dtype=False
1056
+ )
1057
+ tm.assert_index_equal(reindexed.axes[axis], new_labels)
1058
+
1059
+ for ax in range(mgr.ndim):
1060
+ assert_reindex_indexer_is_ok(
1061
+ mgr, ax, Index([]), np.array([], dtype=np.intp), fill_value
1062
+ )
1063
+ assert_reindex_indexer_is_ok(
1064
+ mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value
1065
+ )
1066
+ assert_reindex_indexer_is_ok(
1067
+ mgr,
1068
+ ax,
1069
+ Index(["foo"] * mgr.shape[ax]),
1070
+ np.arange(mgr.shape[ax]),
1071
+ fill_value,
1072
+ )
1073
+ assert_reindex_indexer_is_ok(
1074
+ mgr, ax, mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), fill_value
1075
+ )
1076
+ assert_reindex_indexer_is_ok(
1077
+ mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], fill_value
1078
+ )
1079
+ assert_reindex_indexer_is_ok(
1080
+ mgr, ax, Index(["foo", "bar", "baz"]), np.array([0, 0, 0]), fill_value
1081
+ )
1082
+ assert_reindex_indexer_is_ok(
1083
+ mgr, ax, Index(["foo", "bar", "baz"]), np.array([-1, 0, -1]), fill_value
1084
+ )
1085
+ assert_reindex_indexer_is_ok(
1086
+ mgr,
1087
+ ax,
1088
+ Index(["foo", mgr.axes[ax][0], "baz"]),
1089
+ np.array([-1, -1, -1]),
1090
+ fill_value,
1091
+ )
1092
+
1093
+ if mgr.shape[ax] >= 3:
1094
+ assert_reindex_indexer_is_ok(
1095
+ mgr,
1096
+ ax,
1097
+ Index(["foo", "bar", "baz"]),
1098
+ np.array([0, 1, 2]),
1099
+ fill_value,
1100
+ )
1101
+
1102
+
1103
+ class TestBlockPlacement:
1104
+ @pytest.mark.parametrize(
1105
+ "slc, expected",
1106
+ [
1107
+ (slice(0, 4), 4),
1108
+ (slice(0, 4, 2), 2),
1109
+ (slice(0, 3, 2), 2),
1110
+ (slice(0, 1, 2), 1),
1111
+ (slice(1, 0, -1), 1),
1112
+ ],
1113
+ )
1114
+ def test_slice_len(self, slc, expected):
1115
+ assert len(BlockPlacement(slc)) == expected
1116
+
1117
+ @pytest.mark.parametrize("slc", [slice(1, 1, 0), slice(1, 2, 0)])
1118
+ def test_zero_step_raises(self, slc):
1119
+ msg = "slice step cannot be zero"
1120
+ with pytest.raises(ValueError, match=msg):
1121
+ BlockPlacement(slc)
1122
+
1123
+ def test_slice_canonize_negative_stop(self):
1124
+ # GH#37524 negative stop is OK with negative step and positive start
1125
+ slc = slice(3, -1, -2)
1126
+
1127
+ bp = BlockPlacement(slc)
1128
+ assert bp.indexer == slice(3, None, -2)
1129
+
1130
+ @pytest.mark.parametrize(
1131
+ "slc",
1132
+ [
1133
+ slice(None, None),
1134
+ slice(10, None),
1135
+ slice(None, None, -1),
1136
+ slice(None, 10, -1),
1137
+ # These are "unbounded" because negative index will
1138
+ # change depending on container shape.
1139
+ slice(-1, None),
1140
+ slice(None, -1),
1141
+ slice(-1, -1),
1142
+ slice(-1, None, -1),
1143
+ slice(None, -1, -1),
1144
+ slice(-1, -1, -1),
1145
+ ],
1146
+ )
1147
+ def test_unbounded_slice_raises(self, slc):
1148
+ msg = "unbounded slice"
1149
+ with pytest.raises(ValueError, match=msg):
1150
+ BlockPlacement(slc)
1151
+
1152
+ @pytest.mark.parametrize(
1153
+ "slc",
1154
+ [
1155
+ slice(0, 0),
1156
+ slice(100, 0),
1157
+ slice(100, 100),
1158
+ slice(100, 100, -1),
1159
+ slice(0, 100, -1),
1160
+ ],
1161
+ )
1162
+ def test_not_slice_like_slices(self, slc):
1163
+ assert not BlockPlacement(slc).is_slice_like
1164
+
1165
+ @pytest.mark.parametrize(
1166
+ "arr, slc",
1167
+ [
1168
+ ([0], slice(0, 1, 1)),
1169
+ ([100], slice(100, 101, 1)),
1170
+ ([0, 1, 2], slice(0, 3, 1)),
1171
+ ([0, 5, 10], slice(0, 15, 5)),
1172
+ ([0, 100], slice(0, 200, 100)),
1173
+ ([2, 1], slice(2, 0, -1)),
1174
+ ],
1175
+ )
1176
+ def test_array_to_slice_conversion(self, arr, slc):
1177
+ assert BlockPlacement(arr).as_slice == slc
1178
+
1179
+ @pytest.mark.parametrize(
1180
+ "arr",
1181
+ [
1182
+ [],
1183
+ [-1],
1184
+ [-1, -2, -3],
1185
+ [-10],
1186
+ [-1],
1187
+ [-1, 0, 1, 2],
1188
+ [-2, 0, 2, 4],
1189
+ [1, 0, -1],
1190
+ [1, 1, 1],
1191
+ ],
1192
+ )
1193
+ def test_not_slice_like_arrays(self, arr):
1194
+ assert not BlockPlacement(arr).is_slice_like
1195
+
1196
+ @pytest.mark.parametrize(
1197
+ "slc, expected",
1198
+ [(slice(0, 3), [0, 1, 2]), (slice(0, 0), []), (slice(3, 0), [])],
1199
+ )
1200
+ def test_slice_iter(self, slc, expected):
1201
+ assert list(BlockPlacement(slc)) == expected
1202
+
1203
+ @pytest.mark.parametrize(
1204
+ "slc, arr",
1205
+ [
1206
+ (slice(0, 3), [0, 1, 2]),
1207
+ (slice(0, 0), []),
1208
+ (slice(3, 0), []),
1209
+ (slice(3, 0, -1), [3, 2, 1]),
1210
+ ],
1211
+ )
1212
+ def test_slice_to_array_conversion(self, slc, arr):
1213
+ tm.assert_numpy_array_equal(
1214
+ BlockPlacement(slc).as_array, np.asarray(arr, dtype=np.intp)
1215
+ )
1216
+
1217
+ def test_blockplacement_add(self):
1218
+ bpl = BlockPlacement(slice(0, 5))
1219
+ assert bpl.add(1).as_slice == slice(1, 6, 1)
1220
+ assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)
1221
+ assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]
1222
+
1223
+ @pytest.mark.parametrize(
1224
+ "val, inc, expected",
1225
+ [
1226
+ (slice(0, 0), 0, []),
1227
+ (slice(1, 4), 0, [1, 2, 3]),
1228
+ (slice(3, 0, -1), 0, [3, 2, 1]),
1229
+ ([1, 2, 4], 0, [1, 2, 4]),
1230
+ (slice(0, 0), 10, []),
1231
+ (slice(1, 4), 10, [11, 12, 13]),
1232
+ (slice(3, 0, -1), 10, [13, 12, 11]),
1233
+ ([1, 2, 4], 10, [11, 12, 14]),
1234
+ (slice(0, 0), -1, []),
1235
+ (slice(1, 4), -1, [0, 1, 2]),
1236
+ ([1, 2, 4], -1, [0, 1, 3]),
1237
+ ],
1238
+ )
1239
+ def test_blockplacement_add_int(self, val, inc, expected):
1240
+ assert list(BlockPlacement(val).add(inc)) == expected
1241
+
1242
+ @pytest.mark.parametrize("val", [slice(1, 4), [1, 2, 4]])
1243
+ def test_blockplacement_add_int_raises(self, val):
1244
+ msg = "iadd causes length change"
1245
+ with pytest.raises(ValueError, match=msg):
1246
+ BlockPlacement(val).add(-10)
1247
+
1248
+
1249
+ class TestCanHoldElement:
1250
+ @pytest.fixture(
1251
+ params=[
1252
+ lambda x: x,
1253
+ lambda x: x.to_series(),
1254
+ lambda x: x._data,
1255
+ lambda x: list(x),
1256
+ lambda x: x.astype(object),
1257
+ lambda x: np.asarray(x),
1258
+ lambda x: x[0],
1259
+ lambda x: x[:0],
1260
+ ]
1261
+ )
1262
+ def element(self, request):
1263
+ """
1264
+ Functions that take an Index and return an element that should have
1265
+ blk._can_hold_element(element) for a Block with this index's dtype.
1266
+ """
1267
+ return request.param
1268
+
1269
+ def test_datetime_block_can_hold_element(self):
1270
+ block = create_block("datetime", [0])
1271
+
1272
+ assert block._can_hold_element([])
1273
+
1274
+ # We will check that block._can_hold_element iff arr.__setitem__ works
1275
+ arr = pd.array(block.values.ravel())
1276
+
1277
+ # coerce None
1278
+ assert block._can_hold_element(None)
1279
+ arr[0] = None
1280
+ assert arr[0] is pd.NaT
1281
+
1282
+ # coerce different types of datetime objects
1283
+ vals = [np.datetime64("2010-10-10"), datetime(2010, 10, 10)]
1284
+ for val in vals:
1285
+ assert block._can_hold_element(val)
1286
+ arr[0] = val
1287
+
1288
+ val = date(2010, 10, 10)
1289
+ assert not block._can_hold_element(val)
1290
+
1291
+ msg = (
1292
+ "value should be a 'Timestamp', 'NaT', "
1293
+ "or array of those. Got 'date' instead."
1294
+ )
1295
+ with pytest.raises(TypeError, match=msg):
1296
+ arr[0] = val
1297
+
1298
+ @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
1299
+ def test_interval_can_hold_element_emptylist(self, dtype, element):
1300
+ arr = np.array([1, 3, 4], dtype=dtype)
1301
+ ii = IntervalIndex.from_breaks(arr)
1302
+ blk = new_block(ii._data, [1], ndim=2)
1303
+
1304
+ assert blk._can_hold_element([])
1305
+ # TODO: check this holds for all blocks
1306
+
1307
+ @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
1308
+ def test_interval_can_hold_element(self, dtype, element):
1309
+ arr = np.array([1, 3, 4, 9], dtype=dtype)
1310
+ ii = IntervalIndex.from_breaks(arr)
1311
+ blk = new_block(ii._data, [1], ndim=2)
1312
+
1313
+ elem = element(ii)
1314
+ self.check_series_setitem(elem, ii, True)
1315
+ assert blk._can_hold_element(elem)
1316
+
1317
+ # Careful: to get the expected Series-inplace behavior we need
1318
+ # `elem` to not have the same length as `arr`
1319
+ ii2 = IntervalIndex.from_breaks(arr[:-1], closed="neither")
1320
+ elem = element(ii2)
1321
+ self.check_series_setitem(elem, ii, False)
1322
+ assert not blk._can_hold_element(elem)
1323
+
1324
+ ii3 = IntervalIndex.from_breaks([Timestamp(1), Timestamp(3), Timestamp(4)])
1325
+ elem = element(ii3)
1326
+ self.check_series_setitem(elem, ii, False)
1327
+ assert not blk._can_hold_element(elem)
1328
+
1329
+ ii4 = IntervalIndex.from_breaks([Timedelta(1), Timedelta(3), Timedelta(4)])
1330
+ elem = element(ii4)
1331
+ self.check_series_setitem(elem, ii, False)
1332
+ assert not blk._can_hold_element(elem)
1333
+
1334
+ def test_period_can_hold_element_emptylist(self):
1335
+ pi = period_range("2016", periods=3, freq="A")
1336
+ blk = new_block(pi._data.reshape(1, 3), [1], ndim=2)
1337
+
1338
+ assert blk._can_hold_element([])
1339
+
1340
+ def test_period_can_hold_element(self, element):
1341
+ pi = period_range("2016", periods=3, freq="A")
1342
+
1343
+ elem = element(pi)
1344
+ self.check_series_setitem(elem, pi, True)
1345
+
1346
+ # Careful: to get the expected Series-inplace behavior we need
1347
+ # `elem` to not have the same length as `arr`
1348
+ pi2 = pi.asfreq("D")[:-1]
1349
+ elem = element(pi2)
1350
+ self.check_series_setitem(elem, pi, False)
1351
+
1352
+ dti = pi.to_timestamp("S")[:-1]
1353
+ elem = element(dti)
1354
+ self.check_series_setitem(elem, pi, False)
1355
+
1356
+ def check_can_hold_element(self, obj, elem, inplace: bool):
1357
+ blk = obj._mgr.blocks[0]
1358
+ if inplace:
1359
+ assert blk._can_hold_element(elem)
1360
+ else:
1361
+ assert not blk._can_hold_element(elem)
1362
+
1363
+ def check_series_setitem(self, elem, index: Index, inplace: bool):
1364
+ arr = index._data.copy()
1365
+ ser = Series(arr, copy=False)
1366
+
1367
+ self.check_can_hold_element(ser, elem, inplace)
1368
+
1369
+ if is_scalar(elem):
1370
+ ser[0] = elem
1371
+ else:
1372
+ ser[: len(elem)] = elem
1373
+
1374
+ if inplace:
1375
+ assert ser.array is arr # i.e. setting was done inplace
1376
+ else:
1377
+ assert ser.dtype == object
1378
+
1379
+
1380
+ class TestShouldStore:
1381
+ def test_should_store_categorical(self):
1382
+ cat = Categorical(["A", "B", "C"])
1383
+ df = DataFrame(cat)
1384
+ blk = df._mgr.blocks[0]
1385
+
1386
+ # matching dtype
1387
+ assert blk.should_store(cat)
1388
+ assert blk.should_store(cat[:-1])
1389
+
1390
+ # different dtype
1391
+ assert not blk.should_store(cat.as_ordered())
1392
+
1393
+ # ndarray instead of Categorical
1394
+ assert not blk.should_store(np.asarray(cat))
1395
+
1396
+
1397
+ def test_validate_ndim(block_maker):
1398
+ values = np.array([1.0, 2.0])
1399
+ placement = slice(2)
1400
+ msg = r"Wrong number of dimensions. values.ndim != ndim \[1 != 2\]"
1401
+
1402
+ with pytest.raises(ValueError, match=msg):
1403
+ block_maker(values, placement, ndim=2)
1404
+
1405
+
1406
+ def test_block_shape():
1407
+ idx = Index([0, 1, 2, 3, 4])
1408
+ a = Series([1, 2, 3]).reindex(idx)
1409
+ b = Series(Categorical([1, 2, 3])).reindex(idx)
1410
+
1411
+ assert a._mgr.blocks[0].mgr_locs.indexer == b._mgr.blocks[0].mgr_locs.indexer
1412
+
1413
+
1414
+ def test_make_block_no_pandas_array(block_maker):
1415
+ # https://github.com/pandas-dev/pandas/pull/24866
1416
+ arr = pd.arrays.PandasArray(np.array([1, 2]))
1417
+
1418
+ # PandasArray, no dtype
1419
+ result = block_maker(arr, slice(len(arr)), ndim=arr.ndim)
1420
+ assert result.dtype.kind in ["i", "u"]
1421
+
1422
+ if block_maker is make_block:
1423
+ # new_block requires caller to unwrap PandasArray
1424
+ assert result.is_extension is False
1425
+
1426
+ # PandasArray, PandasDtype
1427
+ result = block_maker(arr, slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim)
1428
+ assert result.dtype.kind in ["i", "u"]
1429
+ assert result.is_extension is False
1430
+
1431
+ # new_block no longer taked dtype keyword
1432
+ # ndarray, PandasDtype
1433
+ result = block_maker(
1434
+ arr.to_numpy(), slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim
1435
+ )
1436
+ assert result.dtype.kind in ["i", "u"]
1437
+ assert result.is_extension is False
videochat2/lib/python3.10/site-packages/pandas/tests/internals/test_managers.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing interaction between the different managers (BlockManager, ArrayManager)
3
+ """
4
+ from pandas.core.dtypes.missing import array_equivalent
5
+
6
+ import pandas as pd
7
+ import pandas._testing as tm
8
+ from pandas.core.internals import (
9
+ ArrayManager,
10
+ BlockManager,
11
+ SingleArrayManager,
12
+ SingleBlockManager,
13
+ )
14
+
15
+
16
+ def test_dataframe_creation():
17
+ with pd.option_context("mode.data_manager", "block"):
18
+ df_block = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
19
+ assert isinstance(df_block._mgr, BlockManager)
20
+
21
+ with pd.option_context("mode.data_manager", "array"):
22
+ df_array = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
23
+ assert isinstance(df_array._mgr, ArrayManager)
24
+
25
+ # also ensure both are seen as equal
26
+ tm.assert_frame_equal(df_block, df_array)
27
+
28
+ # conversion from one manager to the other
29
+ result = df_block._as_manager("block")
30
+ assert isinstance(result._mgr, BlockManager)
31
+ result = df_block._as_manager("array")
32
+ assert isinstance(result._mgr, ArrayManager)
33
+ tm.assert_frame_equal(result, df_block)
34
+ assert all(
35
+ array_equivalent(left, right)
36
+ for left, right in zip(result._mgr.arrays, df_array._mgr.arrays)
37
+ )
38
+
39
+ result = df_array._as_manager("array")
40
+ assert isinstance(result._mgr, ArrayManager)
41
+ result = df_array._as_manager("block")
42
+ assert isinstance(result._mgr, BlockManager)
43
+ tm.assert_frame_equal(result, df_array)
44
+ assert len(result._mgr.blocks) == 2
45
+
46
+
47
+ def test_series_creation():
48
+ with pd.option_context("mode.data_manager", "block"):
49
+ s_block = pd.Series([1, 2, 3], name="A", index=["a", "b", "c"])
50
+ assert isinstance(s_block._mgr, SingleBlockManager)
51
+
52
+ with pd.option_context("mode.data_manager", "array"):
53
+ s_array = pd.Series([1, 2, 3], name="A", index=["a", "b", "c"])
54
+ assert isinstance(s_array._mgr, SingleArrayManager)
55
+
56
+ # also ensure both are seen as equal
57
+ tm.assert_series_equal(s_block, s_array)
58
+
59
+ # conversion from one manager to the other
60
+ result = s_block._as_manager("block")
61
+ assert isinstance(result._mgr, SingleBlockManager)
62
+ result = s_block._as_manager("array")
63
+ assert isinstance(result._mgr, SingleArrayManager)
64
+ tm.assert_series_equal(result, s_block)
65
+
66
+ result = s_array._as_manager("array")
67
+ assert isinstance(result._mgr, SingleArrayManager)
68
+ result = s_array._as_manager("block")
69
+ assert isinstance(result._mgr, SingleBlockManager)
70
+ tm.assert_series_equal(result, s_array)
videochat2/lib/python3.10/site-packages/pandas/tests/io/data/parquet/simple.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe3c5e35a94667ab6da6076fa6db0b57ce991047108d000518d4a8251f9e5f79
3
+ size 2157
videochat2/lib/python3.10/site-packages/pandas/tests/io/data/pickle/test_mi_py27.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a459bfcc43aebb69e8bf9a7e18a3a4e1ad926cb73b8ce9dba68b53dc81109bd
3
+ size 1395
videochat2/lib/python3.10/site-packages/pandas/tests/io/data/pickle/test_py27.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a4d456262c5e3c687b5cf1f6656cd72d0844f3b0dbe8f00a63c48084718584b
3
+ size 943
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (405 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc ADDED
Binary file (7.31 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc ADDED
Binary file (23.7 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc ADDED
Binary file (7.49 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc ADDED
Binary file (8.64 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc ADDED
Binary file (2.43 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc ADDED
Binary file (5.7 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc ADDED
Binary file (4.43 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+
4
+ @pytest.fixture(params=[True, False])
5
+ def sort(request):
6
+ """Boolean sort keyword for concat and DataFrame.append."""
7
+ return request.param
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime as dt
2
+ from itertools import combinations
3
+
4
+ import dateutil
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ Index,
12
+ Series,
13
+ Timestamp,
14
+ concat,
15
+ isna,
16
+ )
17
+ import pandas._testing as tm
18
+
19
+
20
+ class TestAppend:
21
+ def test_append(self, sort, float_frame):
22
+ mixed_frame = float_frame.copy()
23
+ mixed_frame["foo"] = "bar"
24
+
25
+ begin_index = float_frame.index[:5]
26
+ end_index = float_frame.index[5:]
27
+
28
+ begin_frame = float_frame.reindex(begin_index)
29
+ end_frame = float_frame.reindex(end_index)
30
+
31
+ appended = begin_frame._append(end_frame)
32
+ tm.assert_almost_equal(appended["A"], float_frame["A"])
33
+
34
+ del end_frame["A"]
35
+ partial_appended = begin_frame._append(end_frame, sort=sort)
36
+ assert "A" in partial_appended
37
+
38
+ partial_appended = end_frame._append(begin_frame, sort=sort)
39
+ assert "A" in partial_appended
40
+
41
+ # mixed type handling
42
+ appended = mixed_frame[:5]._append(mixed_frame[5:])
43
+ tm.assert_frame_equal(appended, mixed_frame)
44
+
45
+ # what to test here
46
+ mixed_appended = mixed_frame[:5]._append(float_frame[5:], sort=sort)
47
+ mixed_appended2 = float_frame[:5]._append(mixed_frame[5:], sort=sort)
48
+
49
+ # all equal except 'foo' column
50
+ tm.assert_frame_equal(
51
+ mixed_appended.reindex(columns=["A", "B", "C", "D"]),
52
+ mixed_appended2.reindex(columns=["A", "B", "C", "D"]),
53
+ )
54
+
55
+ def test_append_empty(self, float_frame):
56
+ empty = DataFrame()
57
+
58
+ appended = float_frame._append(empty)
59
+ tm.assert_frame_equal(float_frame, appended)
60
+ assert appended is not float_frame
61
+
62
+ appended = empty._append(float_frame)
63
+ tm.assert_frame_equal(float_frame, appended)
64
+ assert appended is not float_frame
65
+
66
+ def test_append_overlap_raises(self, float_frame):
67
+ msg = "Indexes have overlapping values"
68
+ with pytest.raises(ValueError, match=msg):
69
+ float_frame._append(float_frame, verify_integrity=True)
70
+
71
+ def test_append_new_columns(self):
72
+ # see gh-6129: new columns
73
+ df = DataFrame({"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}})
74
+ row = Series([5, 6, 7], index=["a", "b", "c"], name="z")
75
+ expected = DataFrame(
76
+ {
77
+ "a": {"x": 1, "y": 2, "z": 5},
78
+ "b": {"x": 3, "y": 4, "z": 6},
79
+ "c": {"z": 7},
80
+ }
81
+ )
82
+ result = df._append(row)
83
+ tm.assert_frame_equal(result, expected)
84
+
85
+ def test_append_length0_frame(self, sort):
86
+ df = DataFrame(columns=["A", "B", "C"])
87
+ df3 = DataFrame(index=[0, 1], columns=["A", "B"])
88
+ df5 = df._append(df3, sort=sort)
89
+
90
+ expected = DataFrame(index=[0, 1], columns=["A", "B", "C"])
91
+ tm.assert_frame_equal(df5, expected)
92
+
93
+ def test_append_records(self):
94
+ arr1 = np.zeros((2,), dtype=("i4,f4,a10"))
95
+ arr1[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
96
+
97
+ arr2 = np.zeros((3,), dtype=("i4,f4,a10"))
98
+ arr2[:] = [(3, 4.0, "foo"), (5, 6.0, "bar"), (7.0, 8.0, "baz")]
99
+
100
+ df1 = DataFrame(arr1)
101
+ df2 = DataFrame(arr2)
102
+
103
+ result = df1._append(df2, ignore_index=True)
104
+ expected = DataFrame(np.concatenate((arr1, arr2)))
105
+ tm.assert_frame_equal(result, expected)
106
+
107
+ # rewrite sort fixture, since we also want to test default of None
108
+ def test_append_sorts(self, sort):
109
+ df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
110
+ df2 = DataFrame({"a": [1, 2], "c": [3, 4]}, index=[2, 3])
111
+
112
+ result = df1._append(df2, sort=sort)
113
+
114
+ # for None / True
115
+ expected = DataFrame(
116
+ {"b": [1, 2, None, None], "a": [1, 2, 1, 2], "c": [None, None, 3, 4]},
117
+ columns=["a", "b", "c"],
118
+ )
119
+ if sort is False:
120
+ expected = expected[["b", "a", "c"]]
121
+ tm.assert_frame_equal(result, expected)
122
+
123
+ def test_append_different_columns(self, sort):
124
+ df = DataFrame(
125
+ {
126
+ "bools": np.random.randn(10) > 0,
127
+ "ints": np.random.randint(0, 10, 10),
128
+ "floats": np.random.randn(10),
129
+ "strings": ["foo", "bar"] * 5,
130
+ }
131
+ )
132
+
133
+ a = df[:5].loc[:, ["bools", "ints", "floats"]]
134
+ b = df[5:].loc[:, ["strings", "ints", "floats"]]
135
+
136
+ appended = a._append(b, sort=sort)
137
+ assert isna(appended["strings"][0:4]).all()
138
+ assert isna(appended["bools"][5:]).all()
139
+
140
+ def test_append_many(self, sort, float_frame):
141
+ chunks = [
142
+ float_frame[:5],
143
+ float_frame[5:10],
144
+ float_frame[10:15],
145
+ float_frame[15:],
146
+ ]
147
+
148
+ result = chunks[0]._append(chunks[1:])
149
+ tm.assert_frame_equal(result, float_frame)
150
+
151
+ chunks[-1] = chunks[-1].copy()
152
+ chunks[-1]["foo"] = "bar"
153
+ result = chunks[0]._append(chunks[1:], sort=sort)
154
+ tm.assert_frame_equal(result.loc[:, float_frame.columns], float_frame)
155
+ assert (result["foo"][15:] == "bar").all()
156
+ assert result["foo"][:15].isna().all()
157
+
158
+ def test_append_preserve_index_name(self):
159
+ # #980
160
+ df1 = DataFrame(columns=["A", "B", "C"])
161
+ df1 = df1.set_index(["A"])
162
+ df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]], columns=["A", "B", "C"])
163
+ df2 = df2.set_index(["A"])
164
+
165
+ result = df1._append(df2)
166
+ assert result.index.name == "A"
167
+
168
+ indexes_can_append = [
169
+ pd.RangeIndex(3),
170
+ Index([4, 5, 6]),
171
+ Index([4.5, 5.5, 6.5]),
172
+ Index(list("abc")),
173
+ pd.CategoricalIndex("A B C".split()),
174
+ pd.CategoricalIndex("D E F".split(), ordered=True),
175
+ pd.IntervalIndex.from_breaks([7, 8, 9, 10]),
176
+ pd.DatetimeIndex(
177
+ [
178
+ dt.datetime(2013, 1, 3, 0, 0),
179
+ dt.datetime(2013, 1, 3, 6, 10),
180
+ dt.datetime(2013, 1, 3, 7, 12),
181
+ ]
182
+ ),
183
+ pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]),
184
+ ]
185
+
186
+ @pytest.mark.parametrize(
187
+ "index", indexes_can_append, ids=lambda x: type(x).__name__
188
+ )
189
+ def test_append_same_columns_type(self, index):
190
+ # GH18359
191
+
192
+ # df wider than ser
193
+ df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index)
194
+ ser_index = index[:2]
195
+ ser = Series([7, 8], index=ser_index, name=2)
196
+ result = df._append(ser)
197
+ expected = DataFrame(
198
+ [[1, 2, 3.0], [4, 5, 6], [7, 8, np.nan]], index=[0, 1, 2], columns=index
199
+ )
200
+ # integer dtype is preserved for columns present in ser.index
201
+ assert expected.dtypes.iloc[0].kind == "i"
202
+ assert expected.dtypes.iloc[1].kind == "i"
203
+
204
+ tm.assert_frame_equal(result, expected)
205
+
206
+ # ser wider than df
207
+ ser_index = index
208
+ index = index[:2]
209
+ df = DataFrame([[1, 2], [4, 5]], columns=index)
210
+ ser = Series([7, 8, 9], index=ser_index, name=2)
211
+ result = df._append(ser)
212
+ expected = DataFrame(
213
+ [[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]],
214
+ index=[0, 1, 2],
215
+ columns=ser_index,
216
+ )
217
+ tm.assert_frame_equal(result, expected)
218
+
219
+ @pytest.mark.parametrize(
220
+ "df_columns, series_index",
221
+ combinations(indexes_can_append, r=2),
222
+ ids=lambda x: type(x).__name__,
223
+ )
224
+ def test_append_different_columns_types(self, df_columns, series_index):
225
+ # GH18359
226
+ # See also test 'test_append_different_columns_types_raises' below
227
+ # for errors raised when appending
228
+
229
+ df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns)
230
+ ser = Series([7, 8, 9], index=series_index, name=2)
231
+
232
+ result = df._append(ser)
233
+ idx_diff = ser.index.difference(df_columns)
234
+ combined_columns = Index(df_columns.tolist()).append(idx_diff)
235
+ expected = DataFrame(
236
+ [
237
+ [1.0, 2.0, 3.0, np.nan, np.nan, np.nan],
238
+ [4, 5, 6, np.nan, np.nan, np.nan],
239
+ [np.nan, np.nan, np.nan, 7, 8, 9],
240
+ ],
241
+ index=[0, 1, 2],
242
+ columns=combined_columns,
243
+ )
244
+ tm.assert_frame_equal(result, expected)
245
+
246
+ def test_append_dtype_coerce(self, sort):
247
+ # GH 4993
248
+ # appending with datetime will incorrectly convert datetime64
249
+
250
+ df1 = DataFrame(
251
+ index=[1, 2],
252
+ data=[dt.datetime(2013, 1, 1, 0, 0), dt.datetime(2013, 1, 2, 0, 0)],
253
+ columns=["start_time"],
254
+ )
255
+ df2 = DataFrame(
256
+ index=[4, 5],
257
+ data=[
258
+ [dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 3, 6, 10)],
259
+ [dt.datetime(2013, 1, 4, 0, 0), dt.datetime(2013, 1, 4, 7, 10)],
260
+ ],
261
+ columns=["start_time", "end_time"],
262
+ )
263
+
264
+ expected = concat(
265
+ [
266
+ Series(
267
+ [
268
+ pd.NaT,
269
+ pd.NaT,
270
+ dt.datetime(2013, 1, 3, 6, 10),
271
+ dt.datetime(2013, 1, 4, 7, 10),
272
+ ],
273
+ name="end_time",
274
+ ),
275
+ Series(
276
+ [
277
+ dt.datetime(2013, 1, 1, 0, 0),
278
+ dt.datetime(2013, 1, 2, 0, 0),
279
+ dt.datetime(2013, 1, 3, 0, 0),
280
+ dt.datetime(2013, 1, 4, 0, 0),
281
+ ],
282
+ name="start_time",
283
+ ),
284
+ ],
285
+ axis=1,
286
+ sort=sort,
287
+ )
288
+ result = df1._append(df2, ignore_index=True, sort=sort)
289
+ if sort:
290
+ expected = expected[["end_time", "start_time"]]
291
+ else:
292
+ expected = expected[["start_time", "end_time"]]
293
+
294
+ tm.assert_frame_equal(result, expected)
295
+
296
+ def test_append_missing_column_proper_upcast(self, sort):
297
+ df1 = DataFrame({"A": np.array([1, 2, 3, 4], dtype="i8")})
298
+ df2 = DataFrame({"B": np.array([True, False, True, False], dtype=bool)})
299
+
300
+ appended = df1._append(df2, ignore_index=True, sort=sort)
301
+ assert appended["A"].dtype == "f8"
302
+ assert appended["B"].dtype == "O"
303
+
304
+ def test_append_empty_frame_to_series_with_dateutil_tz(self):
305
+ # GH 23682
306
+ date = Timestamp("2018-10-24 07:30:00", tz=dateutil.tz.tzutc())
307
+ ser = Series({"a": 1.0, "b": 2.0, "date": date})
308
+ df = DataFrame(columns=["c", "d"])
309
+ result_a = df._append(ser, ignore_index=True)
310
+ expected = DataFrame(
311
+ [[np.nan, np.nan, 1.0, 2.0, date]], columns=["c", "d", "a", "b", "date"]
312
+ )
313
+ # These columns get cast to object after append
314
+ expected["c"] = expected["c"].astype(object)
315
+ expected["d"] = expected["d"].astype(object)
316
+ tm.assert_frame_equal(result_a, expected)
317
+
318
+ expected = DataFrame(
319
+ [[np.nan, np.nan, 1.0, 2.0, date]] * 2, columns=["c", "d", "a", "b", "date"]
320
+ )
321
+ expected["c"] = expected["c"].astype(object)
322
+ expected["d"] = expected["d"].astype(object)
323
+ result_b = result_a._append(ser, ignore_index=True)
324
+ tm.assert_frame_equal(result_b, expected)
325
+
326
+ result = df._append([ser, ser], ignore_index=True)
327
+ tm.assert_frame_equal(result, expected)
328
+
329
+ def test_append_empty_tz_frame_with_datetime64ns(self):
330
+ # https://github.com/pandas-dev/pandas/issues/35460
331
+ df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]")
332
+
333
+ # pd.NaT gets inferred as tz-naive, so append result is tz-naive
334
+ result = df._append({"a": pd.NaT}, ignore_index=True)
335
+ expected = DataFrame({"a": [pd.NaT]}).astype(object)
336
+ tm.assert_frame_equal(result, expected)
337
+
338
+ # also test with typed value to append
339
+ df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]")
340
+ other = Series({"a": pd.NaT}, dtype="datetime64[ns]")
341
+ result = df._append(other, ignore_index=True)
342
+ expected = DataFrame({"a": [pd.NaT]}).astype(object)
343
+ tm.assert_frame_equal(result, expected)
344
+
345
+ # mismatched tz
346
+ other = Series({"a": pd.NaT}, dtype="datetime64[ns, US/Pacific]")
347
+ result = df._append(other, ignore_index=True)
348
+ expected = DataFrame({"a": [pd.NaT]}).astype(object)
349
+ tm.assert_frame_equal(result, expected)
350
+
351
+ @pytest.mark.parametrize(
352
+ "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"]
353
+ )
354
+ @pytest.mark.parametrize("val", [1, "NaT"])
355
+ def test_append_empty_frame_with_timedelta64ns_nat(self, dtype_str, val):
356
+ # https://github.com/pandas-dev/pandas/issues/35460
357
+ df = DataFrame(columns=["a"]).astype(dtype_str)
358
+
359
+ other = DataFrame({"a": [np.timedelta64(val, "ns")]})
360
+ result = df._append(other, ignore_index=True)
361
+
362
+ expected = other.astype(object)
363
+ tm.assert_frame_equal(result, expected)
364
+
365
+ @pytest.mark.parametrize(
366
+ "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"]
367
+ )
368
+ @pytest.mark.parametrize("val", [1, "NaT"])
369
+ def test_append_frame_with_timedelta64ns_nat(self, dtype_str, val):
370
+ # https://github.com/pandas-dev/pandas/issues/35460
371
+ df = DataFrame({"a": pd.array([1], dtype=dtype_str)})
372
+
373
+ other = DataFrame({"a": [np.timedelta64(val, "ns")]})
374
+ result = df._append(other, ignore_index=True)
375
+
376
+ expected = DataFrame({"a": [df.iloc[0, 0], other.iloc[0, 0]]}, dtype=object)
377
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ Categorical,
7
+ DataFrame,
8
+ Index,
9
+ Series,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+ dt_data = [
14
+ pd.Timestamp("2011-01-01"),
15
+ pd.Timestamp("2011-01-02"),
16
+ pd.Timestamp("2011-01-03"),
17
+ ]
18
+ tz_data = [
19
+ pd.Timestamp("2011-01-01", tz="US/Eastern"),
20
+ pd.Timestamp("2011-01-02", tz="US/Eastern"),
21
+ pd.Timestamp("2011-01-03", tz="US/Eastern"),
22
+ ]
23
+ td_data = [
24
+ pd.Timedelta("1 days"),
25
+ pd.Timedelta("2 days"),
26
+ pd.Timedelta("3 days"),
27
+ ]
28
+ period_data = [
29
+ pd.Period("2011-01", freq="M"),
30
+ pd.Period("2011-02", freq="M"),
31
+ pd.Period("2011-03", freq="M"),
32
+ ]
33
+ data_dict = {
34
+ "bool": [True, False, True],
35
+ "int64": [1, 2, 3],
36
+ "float64": [1.1, np.nan, 3.3],
37
+ "category": Categorical(["X", "Y", "Z"]),
38
+ "object": ["a", "b", "c"],
39
+ "datetime64[ns]": dt_data,
40
+ "datetime64[ns, US/Eastern]": tz_data,
41
+ "timedelta64[ns]": td_data,
42
+ "period[M]": period_data,
43
+ }
44
+
45
+
46
+ class TestConcatAppendCommon:
47
+ """
48
+ Test common dtype coercion rules between concat and append.
49
+ """
50
+
51
+ @pytest.fixture(params=sorted(data_dict.keys()))
52
+ def item(self, request):
53
+ key = request.param
54
+ return key, data_dict[key]
55
+
56
+ item2 = item
57
+
58
+ def test_dtypes(self, item, index_or_series):
59
+ # to confirm test case covers intended dtypes
60
+ typ, vals = item
61
+ obj = index_or_series(vals)
62
+ if isinstance(obj, Index):
63
+ assert obj.dtype == typ
64
+ elif isinstance(obj, Series):
65
+ if typ.startswith("period"):
66
+ assert obj.dtype == "Period[M]"
67
+ else:
68
+ assert obj.dtype == typ
69
+
70
+ def test_concatlike_same_dtypes(self, item):
71
+ # GH 13660
72
+ typ1, vals1 = item
73
+
74
+ vals2 = vals1
75
+ vals3 = vals1
76
+
77
+ if typ1 == "category":
78
+ exp_data = Categorical(list(vals1) + list(vals2))
79
+ exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
80
+ else:
81
+ exp_data = vals1 + vals2
82
+ exp_data3 = vals1 + vals2 + vals3
83
+
84
+ # ----- Index ----- #
85
+
86
+ # index.append
87
+ res = Index(vals1).append(Index(vals2))
88
+ exp = Index(exp_data)
89
+ tm.assert_index_equal(res, exp)
90
+
91
+ # 3 elements
92
+ res = Index(vals1).append([Index(vals2), Index(vals3)])
93
+ exp = Index(exp_data3)
94
+ tm.assert_index_equal(res, exp)
95
+
96
+ # index.append name mismatch
97
+ i1 = Index(vals1, name="x")
98
+ i2 = Index(vals2, name="y")
99
+ res = i1.append(i2)
100
+ exp = Index(exp_data)
101
+ tm.assert_index_equal(res, exp)
102
+
103
+ # index.append name match
104
+ i1 = Index(vals1, name="x")
105
+ i2 = Index(vals2, name="x")
106
+ res = i1.append(i2)
107
+ exp = Index(exp_data, name="x")
108
+ tm.assert_index_equal(res, exp)
109
+
110
+ # cannot append non-index
111
+ with pytest.raises(TypeError, match="all inputs must be Index"):
112
+ Index(vals1).append(vals2)
113
+
114
+ with pytest.raises(TypeError, match="all inputs must be Index"):
115
+ Index(vals1).append([Index(vals2), vals3])
116
+
117
+ # ----- Series ----- #
118
+
119
+ # series.append
120
+ res = Series(vals1)._append(Series(vals2), ignore_index=True)
121
+ exp = Series(exp_data)
122
+ tm.assert_series_equal(res, exp, check_index_type=True)
123
+
124
+ # concat
125
+ res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
126
+ tm.assert_series_equal(res, exp, check_index_type=True)
127
+
128
+ # 3 elements
129
+ res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
130
+ exp = Series(exp_data3)
131
+ tm.assert_series_equal(res, exp)
132
+
133
+ res = pd.concat(
134
+ [Series(vals1), Series(vals2), Series(vals3)],
135
+ ignore_index=True,
136
+ )
137
+ tm.assert_series_equal(res, exp)
138
+
139
+ # name mismatch
140
+ s1 = Series(vals1, name="x")
141
+ s2 = Series(vals2, name="y")
142
+ res = s1._append(s2, ignore_index=True)
143
+ exp = Series(exp_data)
144
+ tm.assert_series_equal(res, exp, check_index_type=True)
145
+
146
+ res = pd.concat([s1, s2], ignore_index=True)
147
+ tm.assert_series_equal(res, exp, check_index_type=True)
148
+
149
+ # name match
150
+ s1 = Series(vals1, name="x")
151
+ s2 = Series(vals2, name="x")
152
+ res = s1._append(s2, ignore_index=True)
153
+ exp = Series(exp_data, name="x")
154
+ tm.assert_series_equal(res, exp, check_index_type=True)
155
+
156
+ res = pd.concat([s1, s2], ignore_index=True)
157
+ tm.assert_series_equal(res, exp, check_index_type=True)
158
+
159
+ # cannot append non-index
160
+ msg = (
161
+ r"cannot concatenate object of type '.+'; "
162
+ "only Series and DataFrame objs are valid"
163
+ )
164
+ with pytest.raises(TypeError, match=msg):
165
+ Series(vals1)._append(vals2)
166
+
167
+ with pytest.raises(TypeError, match=msg):
168
+ Series(vals1)._append([Series(vals2), vals3])
169
+
170
+ with pytest.raises(TypeError, match=msg):
171
+ pd.concat([Series(vals1), vals2])
172
+
173
+ with pytest.raises(TypeError, match=msg):
174
+ pd.concat([Series(vals1), Series(vals2), vals3])
175
+
176
+ def test_concatlike_dtypes_coercion(self, item, item2, request):
177
+ # GH 13660
178
+ typ1, vals1 = item
179
+ typ2, vals2 = item2
180
+
181
+ vals3 = vals2
182
+
183
+ # basically infer
184
+ exp_index_dtype = None
185
+ exp_series_dtype = None
186
+
187
+ if typ1 == typ2:
188
+ # same dtype is tested in test_concatlike_same_dtypes
189
+ return
190
+ elif typ1 == "category" or typ2 == "category":
191
+ # The `vals1 + vals2` below fails bc one of these is a Categorical
192
+ # instead of a list; we have separate dedicated tests for categorical
193
+ return
194
+
195
+ # specify expected dtype
196
+ if typ1 == "bool" and typ2 in ("int64", "float64"):
197
+ # series coerces to numeric based on numpy rule
198
+ # index doesn't because bool is object dtype
199
+ exp_series_dtype = typ2
200
+ mark = pytest.mark.xfail(reason="GH#39187 casting to object")
201
+ request.node.add_marker(mark)
202
+ elif typ2 == "bool" and typ1 in ("int64", "float64"):
203
+ exp_series_dtype = typ1
204
+ mark = pytest.mark.xfail(reason="GH#39187 casting to object")
205
+ request.node.add_marker(mark)
206
+ elif (
207
+ typ1 == "datetime64[ns, US/Eastern]"
208
+ or typ2 == "datetime64[ns, US/Eastern]"
209
+ or typ1 == "timedelta64[ns]"
210
+ or typ2 == "timedelta64[ns]"
211
+ ):
212
+ exp_index_dtype = object
213
+ exp_series_dtype = object
214
+
215
+ exp_data = vals1 + vals2
216
+ exp_data3 = vals1 + vals2 + vals3
217
+
218
+ # ----- Index ----- #
219
+
220
+ # index.append
221
+ # GH#39817
222
+ res = Index(vals1).append(Index(vals2))
223
+ exp = Index(exp_data, dtype=exp_index_dtype)
224
+ tm.assert_index_equal(res, exp)
225
+
226
+ # 3 elements
227
+ res = Index(vals1).append([Index(vals2), Index(vals3)])
228
+ exp = Index(exp_data3, dtype=exp_index_dtype)
229
+ tm.assert_index_equal(res, exp)
230
+
231
+ # ----- Series ----- #
232
+
233
+ # series._append
234
+ # GH#39817
235
+ res = Series(vals1)._append(Series(vals2), ignore_index=True)
236
+ exp = Series(exp_data, dtype=exp_series_dtype)
237
+ tm.assert_series_equal(res, exp, check_index_type=True)
238
+
239
+ # concat
240
+ # GH#39817
241
+ res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
242
+ tm.assert_series_equal(res, exp, check_index_type=True)
243
+
244
+ # 3 elements
245
+ # GH#39817
246
+ res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
247
+ exp = Series(exp_data3, dtype=exp_series_dtype)
248
+ tm.assert_series_equal(res, exp)
249
+
250
+ # GH#39817
251
+ res = pd.concat(
252
+ [Series(vals1), Series(vals2), Series(vals3)],
253
+ ignore_index=True,
254
+ )
255
+ tm.assert_series_equal(res, exp)
256
+
257
+ def test_concatlike_common_coerce_to_pandas_object(self):
258
+ # GH 13626
259
+ # result must be Timestamp/Timedelta, not datetime.datetime/timedelta
260
+ dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
261
+ tdi = pd.TimedeltaIndex(["1 days", "2 days"])
262
+
263
+ exp = Index(
264
+ [
265
+ pd.Timestamp("2011-01-01"),
266
+ pd.Timestamp("2011-01-02"),
267
+ pd.Timedelta("1 days"),
268
+ pd.Timedelta("2 days"),
269
+ ]
270
+ )
271
+
272
+ res = dti.append(tdi)
273
+ tm.assert_index_equal(res, exp)
274
+ assert isinstance(res[0], pd.Timestamp)
275
+ assert isinstance(res[-1], pd.Timedelta)
276
+
277
+ dts = Series(dti)
278
+ tds = Series(tdi)
279
+ res = dts._append(tds)
280
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
281
+ assert isinstance(res.iloc[0], pd.Timestamp)
282
+ assert isinstance(res.iloc[-1], pd.Timedelta)
283
+
284
+ res = pd.concat([dts, tds])
285
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
286
+ assert isinstance(res.iloc[0], pd.Timestamp)
287
+ assert isinstance(res.iloc[-1], pd.Timedelta)
288
+
289
+ def test_concatlike_datetimetz(self, tz_aware_fixture):
290
+ tz = tz_aware_fixture
291
+ # GH 7795
292
+ dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
293
+ dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
294
+
295
+ exp = pd.DatetimeIndex(
296
+ ["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
297
+ )
298
+
299
+ res = dti1.append(dti2)
300
+ tm.assert_index_equal(res, exp)
301
+
302
+ dts1 = Series(dti1)
303
+ dts2 = Series(dti2)
304
+ res = dts1._append(dts2)
305
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
306
+
307
+ res = pd.concat([dts1, dts2])
308
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
309
+
310
+ @pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
311
+ def test_concatlike_datetimetz_short(self, tz):
312
+ # GH#7795
313
+ ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
314
+ ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
315
+ df1 = DataFrame(0, index=ix1, columns=["A", "B"])
316
+ df2 = DataFrame(0, index=ix2, columns=["A", "B"])
317
+
318
+ exp_idx = pd.DatetimeIndex(
319
+ ["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
320
+ tz=tz,
321
+ )
322
+ exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
323
+
324
+ tm.assert_frame_equal(df1._append(df2), exp)
325
+ tm.assert_frame_equal(pd.concat([df1, df2]), exp)
326
+
327
+ def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
328
+ tz = tz_aware_fixture
329
+ # GH 13660
330
+
331
+ # different tz coerces to object
332
+ dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
333
+ dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
334
+
335
+ exp = Index(
336
+ [
337
+ pd.Timestamp("2011-01-01", tz=tz),
338
+ pd.Timestamp("2011-01-02", tz=tz),
339
+ pd.Timestamp("2012-01-01"),
340
+ pd.Timestamp("2012-01-02"),
341
+ ],
342
+ dtype=object,
343
+ )
344
+
345
+ res = dti1.append(dti2)
346
+ tm.assert_index_equal(res, exp)
347
+
348
+ dts1 = Series(dti1)
349
+ dts2 = Series(dti2)
350
+ res = dts1._append(dts2)
351
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
352
+
353
+ res = pd.concat([dts1, dts2])
354
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
355
+
356
+ # different tz
357
+ dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
358
+
359
+ exp = Index(
360
+ [
361
+ pd.Timestamp("2011-01-01", tz=tz),
362
+ pd.Timestamp("2011-01-02", tz=tz),
363
+ pd.Timestamp("2012-01-01", tz="US/Pacific"),
364
+ pd.Timestamp("2012-01-02", tz="US/Pacific"),
365
+ ],
366
+ dtype=object,
367
+ )
368
+
369
+ res = dti1.append(dti3)
370
+ tm.assert_index_equal(res, exp)
371
+
372
+ dts1 = Series(dti1)
373
+ dts3 = Series(dti3)
374
+ res = dts1._append(dts3)
375
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
376
+
377
+ res = pd.concat([dts1, dts3])
378
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
379
+
380
+ def test_concatlike_common_period(self):
381
+ # GH 13660
382
+ pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
383
+ pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
384
+
385
+ exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
386
+
387
+ res = pi1.append(pi2)
388
+ tm.assert_index_equal(res, exp)
389
+
390
+ ps1 = Series(pi1)
391
+ ps2 = Series(pi2)
392
+ res = ps1._append(ps2)
393
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
394
+
395
+ res = pd.concat([ps1, ps2])
396
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
397
+
398
+ def test_concatlike_common_period_diff_freq_to_object(self):
399
+ # GH 13221
400
+ pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
401
+ pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
402
+
403
+ exp = Index(
404
+ [
405
+ pd.Period("2011-01", freq="M"),
406
+ pd.Period("2011-02", freq="M"),
407
+ pd.Period("2012-01-01", freq="D"),
408
+ pd.Period("2012-02-01", freq="D"),
409
+ ],
410
+ dtype=object,
411
+ )
412
+
413
+ res = pi1.append(pi2)
414
+ tm.assert_index_equal(res, exp)
415
+
416
+ ps1 = Series(pi1)
417
+ ps2 = Series(pi2)
418
+ res = ps1._append(ps2)
419
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
420
+
421
+ res = pd.concat([ps1, ps2])
422
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
423
+
424
+ def test_concatlike_common_period_mixed_dt_to_object(self):
425
+ # GH 13221
426
+ # different datetimelike
427
+ pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
428
+ tdi = pd.TimedeltaIndex(["1 days", "2 days"])
429
+ exp = Index(
430
+ [
431
+ pd.Period("2011-01", freq="M"),
432
+ pd.Period("2011-02", freq="M"),
433
+ pd.Timedelta("1 days"),
434
+ pd.Timedelta("2 days"),
435
+ ],
436
+ dtype=object,
437
+ )
438
+
439
+ res = pi1.append(tdi)
440
+ tm.assert_index_equal(res, exp)
441
+
442
+ ps1 = Series(pi1)
443
+ tds = Series(tdi)
444
+ res = ps1._append(tds)
445
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
446
+
447
+ res = pd.concat([ps1, tds])
448
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
449
+
450
+ # inverse
451
+ exp = Index(
452
+ [
453
+ pd.Timedelta("1 days"),
454
+ pd.Timedelta("2 days"),
455
+ pd.Period("2011-01", freq="M"),
456
+ pd.Period("2011-02", freq="M"),
457
+ ],
458
+ dtype=object,
459
+ )
460
+
461
+ res = tdi.append(pi1)
462
+ tm.assert_index_equal(res, exp)
463
+
464
+ ps1 = Series(pi1)
465
+ tds = Series(tdi)
466
+ res = tds._append(ps1)
467
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
468
+
469
+ res = pd.concat([tds, ps1])
470
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
471
+
472
+ def test_concat_categorical(self):
473
+ # GH 13524
474
+
475
+ # same categories -> category
476
+ s1 = Series([1, 2, np.nan], dtype="category")
477
+ s2 = Series([2, 1, 2], dtype="category")
478
+
479
+ exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
480
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
481
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
482
+
483
+ # partially different categories => not-category
484
+ s1 = Series([3, 2], dtype="category")
485
+ s2 = Series([2, 1], dtype="category")
486
+
487
+ exp = Series([3, 2, 2, 1])
488
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
489
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
490
+
491
+ # completely different categories (same dtype) => not-category
492
+ s1 = Series([10, 11, np.nan], dtype="category")
493
+ s2 = Series([np.nan, 1, 3, 2], dtype="category")
494
+
495
+ exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
496
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
497
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
498
+
499
+ def test_union_categorical_same_categories_different_order(self):
500
+ # https://github.com/pandas-dev/pandas/issues/19096
501
+ a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
502
+ b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
503
+ result = pd.concat([a, b], ignore_index=True)
504
+ expected = Series(
505
+ Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
506
+ )
507
+ tm.assert_series_equal(result, expected)
508
+
509
+ def test_concat_categorical_coercion(self):
510
+ # GH 13524
511
+
512
+ # category + not-category => not-category
513
+ s1 = Series([1, 2, np.nan], dtype="category")
514
+ s2 = Series([2, 1, 2])
515
+
516
+ exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64)
517
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
518
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
519
+
520
+ # result shouldn't be affected by 1st elem dtype
521
+ exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64)
522
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
523
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
524
+
525
+ # all values are not in category => not-category
526
+ s1 = Series([3, 2], dtype="category")
527
+ s2 = Series([2, 1])
528
+
529
+ exp = Series([3, 2, 2, 1])
530
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
531
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
532
+
533
+ exp = Series([2, 1, 3, 2])
534
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
535
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
536
+
537
+ # completely different categories => not-category
538
+ s1 = Series([10, 11, np.nan], dtype="category")
539
+ s2 = Series([1, 3, 2])
540
+
541
+ exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64)
542
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
543
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
544
+
545
+ exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64)
546
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
547
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
548
+
549
+ # different dtype => not-category
550
+ s1 = Series([10, 11, np.nan], dtype="category")
551
+ s2 = Series(["a", "b", "c"])
552
+
553
+ exp = Series([10, 11, np.nan, "a", "b", "c"])
554
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
555
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
556
+
557
+ exp = Series(["a", "b", "c", 10, 11, np.nan])
558
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
559
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
560
+
561
+ # if normal series only contains NaN-likes => not-category
562
+ s1 = Series([10, 11], dtype="category")
563
+ s2 = Series([np.nan, np.nan, np.nan])
564
+
565
+ exp = Series([10, 11, np.nan, np.nan, np.nan])
566
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
567
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
568
+
569
+ exp = Series([np.nan, np.nan, np.nan, 10, 11])
570
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
571
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
572
+
573
+ def test_concat_categorical_3elem_coercion(self):
574
+ # GH 13524
575
+
576
+ # mixed dtypes => not-category
577
+ s1 = Series([1, 2, np.nan], dtype="category")
578
+ s2 = Series([2, 1, 2], dtype="category")
579
+ s3 = Series([1, 2, 1, 2, np.nan])
580
+
581
+ exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
582
+ tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
583
+ tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
584
+
585
+ exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
586
+ tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
587
+ tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
588
+
589
+ # values are all in either category => not-category
590
+ s1 = Series([4, 5, 6], dtype="category")
591
+ s2 = Series([1, 2, 3], dtype="category")
592
+ s3 = Series([1, 3, 4])
593
+
594
+ exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
595
+ tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
596
+ tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
597
+
598
+ exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
599
+ tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
600
+ tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
601
+
602
+ # values are all in either category => not-category
603
+ s1 = Series([4, 5, 6], dtype="category")
604
+ s2 = Series([1, 2, 3], dtype="category")
605
+ s3 = Series([10, 11, 12])
606
+
607
+ exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
608
+ tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
609
+ tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
610
+
611
+ exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
612
+ tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
613
+ tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
614
+
615
+ def test_concat_categorical_multi_coercion(self):
616
+ # GH 13524
617
+
618
+ s1 = Series([1, 3], dtype="category")
619
+ s2 = Series([3, 4], dtype="category")
620
+ s3 = Series([2, 3])
621
+ s4 = Series([2, 2], dtype="category")
622
+ s5 = Series([1, np.nan])
623
+ s6 = Series([1, 3, 2], dtype="category")
624
+
625
+ # mixed dtype, values are all in categories => not-category
626
+ exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
627
+ res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)
628
+ tm.assert_series_equal(res, exp)
629
+ res = s1._append([s2, s3, s4, s5, s6], ignore_index=True)
630
+ tm.assert_series_equal(res, exp)
631
+
632
+ exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
633
+ res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)
634
+ tm.assert_series_equal(res, exp)
635
+ res = s6._append([s5, s4, s3, s2, s1], ignore_index=True)
636
+ tm.assert_series_equal(res, exp)
637
+
638
+ def test_concat_categorical_ordered(self):
639
+ # GH 13524
640
+
641
+ s1 = Series(Categorical([1, 2, np.nan], ordered=True))
642
+ s2 = Series(Categorical([2, 1, 2], ordered=True))
643
+
644
+ exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
645
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
646
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
647
+
648
+ exp = Series(Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True))
649
+ tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp)
650
+ tm.assert_series_equal(s1._append([s2, s1], ignore_index=True), exp)
651
+
652
+ def test_concat_categorical_coercion_nan(self):
653
+ # GH 13524
654
+
655
+ # some edge cases
656
+ # category + not-category => not category
657
+ s1 = Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category")
658
+ s2 = Series([np.nan, 1])
659
+
660
+ exp = Series([np.nan, np.nan, np.nan, 1])
661
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
662
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
663
+
664
+ s1 = Series([1, np.nan], dtype="category")
665
+ s2 = Series([np.nan, np.nan])
666
+
667
+ exp = Series([1, np.nan, np.nan, np.nan], dtype="float")
668
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
669
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
670
+
671
+ # mixed dtype, all nan-likes => not-category
672
+ s1 = Series([np.nan, np.nan], dtype="category")
673
+ s2 = Series([np.nan, np.nan])
674
+
675
+ exp = Series([np.nan, np.nan, np.nan, np.nan])
676
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
677
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
678
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
679
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
680
+
681
+ # all category nan-likes => category
682
+ s1 = Series([np.nan, np.nan], dtype="category")
683
+ s2 = Series([np.nan, np.nan], dtype="category")
684
+
685
+ exp = Series([np.nan, np.nan, np.nan, np.nan], dtype="category")
686
+
687
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
688
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
689
+
690
+ def test_concat_categorical_empty(self):
691
+ # GH 13524
692
+
693
+ s1 = Series([], dtype="category")
694
+ s2 = Series([1, 2], dtype="category")
695
+
696
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
697
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), s2)
698
+
699
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
700
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), s2)
701
+
702
+ s1 = Series([], dtype="category")
703
+ s2 = Series([], dtype="category")
704
+
705
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
706
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), s2)
707
+
708
+ s1 = Series([], dtype="category")
709
+ s2 = Series([], dtype="object")
710
+
711
+ # different dtype => not-category
712
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
713
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), s2)
714
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
715
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), s2)
716
+
717
+ s1 = Series([], dtype="category")
718
+ s2 = Series([np.nan, np.nan])
719
+
720
+ # empty Series is ignored
721
+ exp = Series([np.nan, np.nan])
722
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
723
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
724
+
725
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
726
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
727
+
728
+ def test_categorical_concat_append(self):
729
+ cat = Categorical(["a", "b"], categories=["a", "b"])
730
+ vals = [1, 2]
731
+ df = DataFrame({"cats": cat, "vals": vals})
732
+ cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
733
+ vals2 = [1, 2, 1, 2]
734
+ exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
735
+
736
+ tm.assert_frame_equal(pd.concat([df, df]), exp)
737
+ tm.assert_frame_equal(df._append(df), exp)
738
+
739
+ # GH 13524 can concat different categories
740
+ cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
741
+ vals3 = [1, 2]
742
+ df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
743
+
744
+ res = pd.concat([df, df_different_categories], ignore_index=True)
745
+ exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
746
+ tm.assert_frame_equal(res, exp)
747
+
748
+ res = df._append(df_different_categories, ignore_index=True)
749
+ tm.assert_frame_equal(res, exp)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas.core.dtypes.dtypes import CategoricalDtype
4
+
5
+ import pandas as pd
6
+ from pandas import (
7
+ Categorical,
8
+ DataFrame,
9
+ Series,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ class TestCategoricalConcat:
15
+ def test_categorical_concat(self, sort):
16
+ # See GH 10177
17
+ df1 = DataFrame(
18
+ np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
19
+ )
20
+
21
+ df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
22
+
23
+ cat_values = ["one", "one", "two", "one", "two", "two", "one"]
24
+ df2["h"] = Series(Categorical(cat_values))
25
+
26
+ res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
27
+ exp = DataFrame(
28
+ {
29
+ "a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
30
+ "b": [
31
+ 1,
32
+ 4,
33
+ 7,
34
+ 10,
35
+ 13,
36
+ 16,
37
+ np.nan,
38
+ np.nan,
39
+ np.nan,
40
+ np.nan,
41
+ np.nan,
42
+ np.nan,
43
+ np.nan,
44
+ ],
45
+ "c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
46
+ "h": [None] * 6 + cat_values,
47
+ }
48
+ )
49
+ exp["h"] = exp["h"].astype(df2["h"].dtype)
50
+ tm.assert_frame_equal(res, exp)
51
+
52
+ def test_categorical_concat_dtypes(self):
53
+ # GH8143
54
+ index = ["cat", "obj", "num"]
55
+ cat = Categorical(["a", "b", "c"])
56
+ obj = Series(["a", "b", "c"])
57
+ num = Series([1, 2, 3])
58
+ df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
59
+
60
+ result = df.dtypes == "object"
61
+ expected = Series([False, True, False], index=index)
62
+ tm.assert_series_equal(result, expected)
63
+
64
+ result = df.dtypes == "int64"
65
+ expected = Series([False, False, True], index=index)
66
+ tm.assert_series_equal(result, expected)
67
+
68
+ result = df.dtypes == "category"
69
+ expected = Series([True, False, False], index=index)
70
+ tm.assert_series_equal(result, expected)
71
+
72
+ def test_concat_categoricalindex(self):
73
+ # GH 16111, categories that aren't lexsorted
74
+ categories = [9, 0, 1, 2, 3]
75
+
76
+ a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
77
+ b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
78
+ c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
79
+
80
+ result = pd.concat([a, b, c], axis=1)
81
+
82
+ exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
83
+ exp = DataFrame(
84
+ {
85
+ 0: [1, 1, np.nan, np.nan],
86
+ 1: [np.nan, 2, 2, np.nan],
87
+ 2: [np.nan, np.nan, 3, 3],
88
+ },
89
+ columns=[0, 1, 2],
90
+ index=exp_idx,
91
+ )
92
+ tm.assert_frame_equal(result, exp)
93
+
94
+ def test_categorical_concat_preserve(self):
95
+ # GH 8641 series concat not preserving category dtype
96
+ # GH 13524 can concat different categories
97
+ s = Series(list("abc"), dtype="category")
98
+ s2 = Series(list("abd"), dtype="category")
99
+
100
+ exp = Series(list("abcabd"))
101
+ res = pd.concat([s, s2], ignore_index=True)
102
+ tm.assert_series_equal(res, exp)
103
+
104
+ exp = Series(list("abcabc"), dtype="category")
105
+ res = pd.concat([s, s], ignore_index=True)
106
+ tm.assert_series_equal(res, exp)
107
+
108
+ exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
109
+ res = pd.concat([s, s])
110
+ tm.assert_series_equal(res, exp)
111
+
112
+ a = Series(np.arange(6, dtype="int64"))
113
+ b = Series(list("aabbca"))
114
+
115
+ df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
116
+ res = pd.concat([df2, df2])
117
+ exp = DataFrame(
118
+ {
119
+ "A": pd.concat([a, a]),
120
+ "B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
121
+ }
122
+ )
123
+ tm.assert_frame_equal(res, exp)
124
+
125
+ def test_categorical_index_preserver(self):
126
+ a = Series(np.arange(6, dtype="int64"))
127
+ b = Series(list("aabbca"))
128
+
129
+ df2 = DataFrame(
130
+ {"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
131
+ ).set_index("B")
132
+ result = pd.concat([df2, df2])
133
+ expected = DataFrame(
134
+ {
135
+ "A": pd.concat([a, a]),
136
+ "B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
137
+ }
138
+ ).set_index("B")
139
+ tm.assert_frame_equal(result, expected)
140
+
141
+ # wrong categories -> uses concat_compat, which casts to object
142
+ df3 = DataFrame(
143
+ {"A": a, "B": Categorical(b, categories=list("abe"))}
144
+ ).set_index("B")
145
+ result = pd.concat([df2, df3])
146
+ expected = pd.concat(
147
+ [
148
+ df2.set_axis(df2.index.astype(object), axis=0),
149
+ df3.set_axis(df3.index.astype(object), axis=0),
150
+ ]
151
+ )
152
+ tm.assert_frame_equal(result, expected)
153
+
154
+ def test_concat_categorical_tz(self):
155
+ # GH-23816
156
+ a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
157
+ b = Series(["a", "b"], dtype="category")
158
+ result = pd.concat([a, b], ignore_index=True)
159
+ expected = Series(
160
+ [
161
+ pd.Timestamp("2017-01-01", tz="US/Pacific"),
162
+ pd.Timestamp("2017-01-02", tz="US/Pacific"),
163
+ "a",
164
+ "b",
165
+ ]
166
+ )
167
+ tm.assert_series_equal(result, expected)
168
+
169
+ def test_concat_categorical_unchanged(self):
170
+ # GH-12007
171
+ # test fix for when concat on categorical and float
172
+ # coerces dtype categorical -> float
173
+ df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A"))
174
+ ser = Series([0, 1, 2], index=[0, 1, 3], name="B")
175
+ result = pd.concat([df, ser], axis=1)
176
+ expected = DataFrame(
177
+ {
178
+ "A": Series(["a", "b", "c", np.nan], dtype="category"),
179
+ "B": Series([0, 1, np.nan, 2], dtype="float"),
180
+ }
181
+ )
182
+ tm.assert_equal(result, expected)
183
+
184
+ def test_categorical_concat_gh7864(self):
185
+ # GH 7864
186
+ # make sure ordering is preserved
187
+ df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
188
+ df["grade"] = Categorical(df["raw_grade"])
189
+ df["grade"].cat.set_categories(["e", "a", "b"])
190
+
191
+ df1 = df[0:3]
192
+ df2 = df[3:]
193
+
194
+ tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
195
+ tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
196
+
197
+ dfx = pd.concat([df1, df2])
198
+ tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
199
+
200
+ dfa = df1._append(df2)
201
+ tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
202
+
203
+ def test_categorical_index_upcast(self):
204
+ # GH 17629
205
+ # test upcasting to object when concatinating on categorical indexes
206
+ # with non-identical categories
207
+
208
+ a = DataFrame({"foo": [1, 2]}, index=Categorical(["foo", "bar"]))
209
+ b = DataFrame({"foo": [4, 3]}, index=Categorical(["baz", "bar"]))
210
+
211
+ res = pd.concat([a, b])
212
+ exp = DataFrame({"foo": [1, 2, 4, 3]}, index=["foo", "bar", "baz", "bar"])
213
+
214
+ tm.assert_equal(res, exp)
215
+
216
+ a = Series([1, 2], index=Categorical(["foo", "bar"]))
217
+ b = Series([4, 3], index=Categorical(["baz", "bar"]))
218
+
219
+ res = pd.concat([a, b])
220
+ exp = Series([1, 2, 4, 3], index=["foo", "bar", "baz", "bar"])
221
+
222
+ tm.assert_equal(res, exp)
223
+
224
+ def test_categorical_missing_from_one_frame(self):
225
+ # GH 25412
226
+ df1 = DataFrame({"f1": [1, 2, 3]})
227
+ df2 = DataFrame({"f1": [2, 3, 1], "f2": Series([4, 4, 4]).astype("category")})
228
+ result = pd.concat([df1, df2], sort=True)
229
+ dtype = CategoricalDtype([4])
230
+ expected = DataFrame(
231
+ {
232
+ "f1": [1, 2, 3, 2, 3, 1],
233
+ "f2": Categorical.from_codes([-1, -1, -1, 0, 0, 0], dtype=dtype),
234
+ },
235
+ index=[0, 1, 2, 0, 1, 2],
236
+ )
237
+ tm.assert_frame_equal(result, expected)
238
+
239
+ def test_concat_categorical_same_categories_different_order(self):
240
+ # https://github.com/pandas-dev/pandas/issues/24845
241
+
242
+ c1 = pd.CategoricalIndex(["a", "a"], categories=["a", "b"], ordered=False)
243
+ c2 = pd.CategoricalIndex(["b", "b"], categories=["b", "a"], ordered=False)
244
+ c3 = pd.CategoricalIndex(
245
+ ["a", "a", "b", "b"], categories=["a", "b"], ordered=False
246
+ )
247
+
248
+ df1 = DataFrame({"A": [1, 2]}, index=c1)
249
+ df2 = DataFrame({"A": [3, 4]}, index=c2)
250
+
251
+ result = pd.concat((df1, df2))
252
+ expected = DataFrame({"A": [1, 2, 3, 4]}, index=c3)
253
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py ADDED
@@ -0,0 +1,787 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import (
2
+ abc,
3
+ deque,
4
+ )
5
+ from datetime import datetime
6
+ from decimal import Decimal
7
+ from typing import Iterator
8
+ from warnings import (
9
+ catch_warnings,
10
+ simplefilter,
11
+ )
12
+
13
+ import numpy as np
14
+ import pytest
15
+
16
+ from pandas.errors import (
17
+ InvalidIndexError,
18
+ PerformanceWarning,
19
+ )
20
+ import pandas.util._test_decorators as td
21
+
22
+ import pandas as pd
23
+ from pandas import (
24
+ DataFrame,
25
+ Index,
26
+ MultiIndex,
27
+ PeriodIndex,
28
+ Series,
29
+ concat,
30
+ date_range,
31
+ )
32
+ import pandas._testing as tm
33
+ from pandas.core.arrays import SparseArray
34
+ from pandas.tests.extension.decimal import to_decimal
35
+
36
+
37
+ class TestConcatenate:
38
+ def test_append_concat(self):
39
+ # GH#1815
40
+ d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
41
+ d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
42
+
43
+ s1 = Series(np.random.randn(10), d1)
44
+ s2 = Series(np.random.randn(10), d2)
45
+
46
+ s1 = s1.to_period()
47
+ s2 = s2.to_period()
48
+
49
+ # drops index
50
+ result = concat([s1, s2])
51
+ assert isinstance(result.index, PeriodIndex)
52
+ assert result.index[0] == s1.index[0]
53
+
54
+ def test_concat_copy(self, using_array_manager, using_copy_on_write):
55
+ df = DataFrame(np.random.randn(4, 3))
56
+ df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
57
+ df3 = DataFrame({5: "foo"}, index=range(4))
58
+
59
+ # These are actual copies.
60
+ result = concat([df, df2, df3], axis=1, copy=True)
61
+
62
+ if not using_copy_on_write:
63
+ for arr in result._mgr.arrays:
64
+ assert arr.base is None
65
+ else:
66
+ for arr in result._mgr.arrays:
67
+ assert arr.base is not None
68
+
69
+ # These are the same.
70
+ result = concat([df, df2, df3], axis=1, copy=False)
71
+
72
+ for arr in result._mgr.arrays:
73
+ if arr.dtype.kind == "f":
74
+ assert arr.base is df._mgr.arrays[0].base
75
+ elif arr.dtype.kind in ["i", "u"]:
76
+ assert arr.base is df2._mgr.arrays[0].base
77
+ elif arr.dtype == object:
78
+ if using_array_manager:
79
+ # we get the same array object, which has no base
80
+ assert arr is df3._mgr.arrays[0]
81
+ else:
82
+ assert arr.base is not None
83
+
84
+ # Float block was consolidated.
85
+ df4 = DataFrame(np.random.randn(4, 1))
86
+ result = concat([df, df2, df3, df4], axis=1, copy=False)
87
+ for arr in result._mgr.arrays:
88
+ if arr.dtype.kind == "f":
89
+ if using_array_manager or using_copy_on_write:
90
+ # this is a view on some array in either df or df4
91
+ assert any(
92
+ np.shares_memory(arr, other)
93
+ for other in df._mgr.arrays + df4._mgr.arrays
94
+ )
95
+ else:
96
+ # the block was consolidated, so we got a copy anyway
97
+ assert arr.base is None
98
+ elif arr.dtype.kind in ["i", "u"]:
99
+ assert arr.base is df2._mgr.arrays[0].base
100
+ elif arr.dtype == object:
101
+ # this is a view on df3
102
+ assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
103
+
104
+ def test_concat_with_group_keys(self):
105
+ # axis=0
106
+ df = DataFrame(np.random.randn(3, 4))
107
+ df2 = DataFrame(np.random.randn(4, 4))
108
+
109
+ result = concat([df, df2], keys=[0, 1])
110
+ exp_index = MultiIndex.from_arrays(
111
+ [[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
112
+ )
113
+ expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
114
+ tm.assert_frame_equal(result, expected)
115
+
116
+ result = concat([df, df], keys=[0, 1])
117
+ exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
118
+ expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
119
+ tm.assert_frame_equal(result, expected)
120
+
121
+ # axis=1
122
+ df = DataFrame(np.random.randn(4, 3))
123
+ df2 = DataFrame(np.random.randn(4, 4))
124
+
125
+ result = concat([df, df2], keys=[0, 1], axis=1)
126
+ expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
127
+ tm.assert_frame_equal(result, expected)
128
+
129
+ result = concat([df, df], keys=[0, 1], axis=1)
130
+ expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
131
+ tm.assert_frame_equal(result, expected)
132
+
133
+ def test_concat_keys_specific_levels(self):
134
+ df = DataFrame(np.random.randn(10, 4))
135
+ pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
136
+ level = ["three", "two", "one", "zero"]
137
+ result = concat(
138
+ pieces,
139
+ axis=1,
140
+ keys=["one", "two", "three"],
141
+ levels=[level],
142
+ names=["group_key"],
143
+ )
144
+
145
+ tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
146
+ tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
147
+
148
+ assert result.columns.names == ["group_key", None]
149
+
150
+ @pytest.mark.parametrize("mapping", ["mapping", "dict"])
151
+ def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
152
+ constructor = dict if mapping == "dict" else non_dict_mapping_subclass
153
+ frames = constructor(
154
+ {
155
+ "foo": DataFrame(np.random.randn(4, 3)),
156
+ "bar": DataFrame(np.random.randn(4, 3)),
157
+ "baz": DataFrame(np.random.randn(4, 3)),
158
+ "qux": DataFrame(np.random.randn(4, 3)),
159
+ }
160
+ )
161
+
162
+ sorted_keys = list(frames.keys())
163
+
164
+ result = concat(frames)
165
+ expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
166
+ tm.assert_frame_equal(result, expected)
167
+
168
+ result = concat(frames, axis=1)
169
+ expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
170
+ tm.assert_frame_equal(result, expected)
171
+
172
+ keys = ["baz", "foo", "bar"]
173
+ result = concat(frames, keys=keys)
174
+ expected = concat([frames[k] for k in keys], keys=keys)
175
+ tm.assert_frame_equal(result, expected)
176
+
177
+ def test_concat_keys_and_levels(self):
178
+ df = DataFrame(np.random.randn(1, 3))
179
+ df2 = DataFrame(np.random.randn(1, 4))
180
+
181
+ levels = [["foo", "baz"], ["one", "two"]]
182
+ names = ["first", "second"]
183
+ result = concat(
184
+ [df, df2, df, df2],
185
+ keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
186
+ levels=levels,
187
+ names=names,
188
+ )
189
+ expected = concat([df, df2, df, df2])
190
+ exp_index = MultiIndex(
191
+ levels=levels + [[0]],
192
+ codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
193
+ names=names + [None],
194
+ )
195
+ expected.index = exp_index
196
+
197
+ tm.assert_frame_equal(result, expected)
198
+
199
+ # no names
200
+ result = concat(
201
+ [df, df2, df, df2],
202
+ keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
203
+ levels=levels,
204
+ )
205
+ assert result.index.names == (None,) * 3
206
+
207
+ # no levels
208
+ result = concat(
209
+ [df, df2, df, df2],
210
+ keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
211
+ names=["first", "second"],
212
+ )
213
+ assert result.index.names == ("first", "second", None)
214
+ tm.assert_index_equal(
215
+ result.index.levels[0], Index(["baz", "foo"], name="first")
216
+ )
217
+
218
+ def test_concat_keys_levels_no_overlap(self):
219
+ # GH #1406
220
+ df = DataFrame(np.random.randn(1, 3), index=["a"])
221
+ df2 = DataFrame(np.random.randn(1, 4), index=["b"])
222
+
223
+ msg = "Values not found in passed level"
224
+ with pytest.raises(ValueError, match=msg):
225
+ concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
226
+
227
+ msg = "Key one not in level"
228
+ with pytest.raises(ValueError, match=msg):
229
+ concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
230
+
231
+ def test_crossed_dtypes_weird_corner(self):
232
+ columns = ["A", "B", "C", "D"]
233
+ df1 = DataFrame(
234
+ {
235
+ "A": np.array([1, 2, 3, 4], dtype="f8"),
236
+ "B": np.array([1, 2, 3, 4], dtype="i8"),
237
+ "C": np.array([1, 2, 3, 4], dtype="f8"),
238
+ "D": np.array([1, 2, 3, 4], dtype="i8"),
239
+ },
240
+ columns=columns,
241
+ )
242
+
243
+ df2 = DataFrame(
244
+ {
245
+ "A": np.array([1, 2, 3, 4], dtype="i8"),
246
+ "B": np.array([1, 2, 3, 4], dtype="f8"),
247
+ "C": np.array([1, 2, 3, 4], dtype="i8"),
248
+ "D": np.array([1, 2, 3, 4], dtype="f8"),
249
+ },
250
+ columns=columns,
251
+ )
252
+
253
+ appended = concat([df1, df2], ignore_index=True)
254
+ expected = DataFrame(
255
+ np.concatenate([df1.values, df2.values], axis=0), columns=columns
256
+ )
257
+ tm.assert_frame_equal(appended, expected)
258
+
259
+ df = DataFrame(np.random.randn(1, 3), index=["a"])
260
+ df2 = DataFrame(np.random.randn(1, 4), index=["b"])
261
+ result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
262
+ assert result.index.names == ("first", "second")
263
+
264
+ def test_with_mixed_tuples(self, sort):
265
+ # 10697
266
+ # columns have mixed tuples, so handle properly
267
+ df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
268
+ df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
269
+
270
+ # it works
271
+ concat([df1, df2], sort=sort)
272
+
273
+ def test_concat_mixed_objs(self):
274
+ # concat mixed series/frames
275
+ # G2385
276
+
277
+ # axis 1
278
+ index = date_range("01-Jan-2013", periods=10, freq="H")
279
+ arr = np.arange(10, dtype="int64")
280
+ s1 = Series(arr, index=index)
281
+ s2 = Series(arr, index=index)
282
+ df = DataFrame(arr.reshape(-1, 1), index=index)
283
+
284
+ expected = DataFrame(
285
+ np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
286
+ )
287
+ result = concat([df, df], axis=1)
288
+ tm.assert_frame_equal(result, expected)
289
+
290
+ expected = DataFrame(
291
+ np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
292
+ )
293
+ result = concat([s1, s2], axis=1)
294
+ tm.assert_frame_equal(result, expected)
295
+
296
+ expected = DataFrame(
297
+ np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
298
+ )
299
+ result = concat([s1, s2, s1], axis=1)
300
+ tm.assert_frame_equal(result, expected)
301
+
302
+ expected = DataFrame(
303
+ np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
304
+ )
305
+ result = concat([s1, df, s2, s2, s1], axis=1)
306
+ tm.assert_frame_equal(result, expected)
307
+
308
+ # with names
309
+ s1.name = "foo"
310
+ expected = DataFrame(
311
+ np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
312
+ )
313
+ result = concat([s1, df, s2], axis=1)
314
+ tm.assert_frame_equal(result, expected)
315
+
316
+ s2.name = "bar"
317
+ expected = DataFrame(
318
+ np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
319
+ )
320
+ result = concat([s1, df, s2], axis=1)
321
+ tm.assert_frame_equal(result, expected)
322
+
323
+ # ignore index
324
+ expected = DataFrame(
325
+ np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
326
+ )
327
+ result = concat([s1, df, s2], axis=1, ignore_index=True)
328
+ tm.assert_frame_equal(result, expected)
329
+
330
+ # axis 0
331
+ expected = DataFrame(
332
+ np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
333
+ )
334
+ result = concat([s1, df, s2])
335
+ tm.assert_frame_equal(result, expected)
336
+
337
+ expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
338
+ result = concat([s1, df, s2], ignore_index=True)
339
+ tm.assert_frame_equal(result, expected)
340
+
341
+ def test_dtype_coerceion(self):
342
+ # 12411
343
+ df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
344
+
345
+ result = concat([df.iloc[[0]], df.iloc[[1]]])
346
+ tm.assert_series_equal(result.dtypes, df.dtypes)
347
+
348
+ # 12045
349
+ df = DataFrame({"date": [datetime(2012, 1, 1), datetime(1012, 1, 2)]})
350
+ result = concat([df.iloc[[0]], df.iloc[[1]]])
351
+ tm.assert_series_equal(result.dtypes, df.dtypes)
352
+
353
+ # 11594
354
+ df = DataFrame({"text": ["some words"] + [None] * 9})
355
+ result = concat([df.iloc[[0]], df.iloc[[1]]])
356
+ tm.assert_series_equal(result.dtypes, df.dtypes)
357
+
358
+ def test_concat_single_with_key(self):
359
+ df = DataFrame(np.random.randn(10, 4))
360
+
361
+ result = concat([df], keys=["foo"])
362
+ expected = concat([df, df], keys=["foo", "bar"])
363
+ tm.assert_frame_equal(result, expected[:10])
364
+
365
+ def test_concat_no_items_raises(self):
366
+ with pytest.raises(ValueError, match="No objects to concatenate"):
367
+ concat([])
368
+
369
+ def test_concat_exclude_none(self):
370
+ df = DataFrame(np.random.randn(10, 4))
371
+
372
+ pieces = [df[:5], None, None, df[5:]]
373
+ result = concat(pieces)
374
+ tm.assert_frame_equal(result, df)
375
+ with pytest.raises(ValueError, match="All objects passed were None"):
376
+ concat([None, None])
377
+
378
+ def test_concat_keys_with_none(self):
379
+ # #1649
380
+ df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
381
+
382
+ result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
383
+ expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
384
+ tm.assert_frame_equal(result, expected)
385
+
386
+ result = concat(
387
+ [None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
388
+ )
389
+ expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
390
+ tm.assert_frame_equal(result, expected)
391
+
392
+ def test_concat_bug_1719(self):
393
+ ts1 = tm.makeTimeSeries()
394
+ ts2 = tm.makeTimeSeries()[::2]
395
+
396
+ # to join with union
397
+ # these two are of different length!
398
+ left = concat([ts1, ts2], join="outer", axis=1)
399
+ right = concat([ts2, ts1], join="outer", axis=1)
400
+
401
+ assert len(left) == len(right)
402
+
403
+ def test_concat_bug_2972(self):
404
+ ts0 = Series(np.zeros(5))
405
+ ts1 = Series(np.ones(5))
406
+ ts0.name = ts1.name = "same name"
407
+ result = concat([ts0, ts1], axis=1)
408
+
409
+ expected = DataFrame({0: ts0, 1: ts1})
410
+ expected.columns = ["same name", "same name"]
411
+ tm.assert_frame_equal(result, expected)
412
+
413
+ def test_concat_bug_3602(self):
414
+ # GH 3602, duplicate columns
415
+ df1 = DataFrame(
416
+ {
417
+ "firmNo": [0, 0, 0, 0],
418
+ "prc": [6, 6, 6, 6],
419
+ "stringvar": ["rrr", "rrr", "rrr", "rrr"],
420
+ }
421
+ )
422
+ df2 = DataFrame(
423
+ {"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
424
+ )
425
+ expected = DataFrame(
426
+ [
427
+ [0, 6, "rrr", 9, 1, 6],
428
+ [0, 6, "rrr", 10, 2, 6],
429
+ [0, 6, "rrr", 11, 3, 6],
430
+ [0, 6, "rrr", 12, 4, 6],
431
+ ]
432
+ )
433
+ expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
434
+
435
+ result = concat([df1, df2], axis=1)
436
+ tm.assert_frame_equal(result, expected)
437
+
438
+ def test_concat_iterables(self):
439
+ # GH8645 check concat works with tuples, list, generators, and weird
440
+ # stuff like deque and custom iterables
441
+ df1 = DataFrame([1, 2, 3])
442
+ df2 = DataFrame([4, 5, 6])
443
+ expected = DataFrame([1, 2, 3, 4, 5, 6])
444
+ tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
445
+ tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
446
+ tm.assert_frame_equal(
447
+ concat((df for df in (df1, df2)), ignore_index=True), expected
448
+ )
449
+ tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
450
+
451
+ class CustomIterator1:
452
+ def __len__(self) -> int:
453
+ return 2
454
+
455
+ def __getitem__(self, index):
456
+ try:
457
+ return {0: df1, 1: df2}[index]
458
+ except KeyError as err:
459
+ raise IndexError from err
460
+
461
+ tm.assert_frame_equal(concat(CustomIterator1(), ignore_index=True), expected)
462
+
463
+ class CustomIterator2(abc.Iterable):
464
+ def __iter__(self) -> Iterator:
465
+ yield df1
466
+ yield df2
467
+
468
+ tm.assert_frame_equal(concat(CustomIterator2(), ignore_index=True), expected)
469
+
470
+ def test_concat_order(self):
471
+ # GH 17344, GH#47331
472
+ dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
473
+ dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for _ in range(100)]
474
+
475
+ result = concat(dfs, sort=True).columns
476
+ expected = Index([1, "a", None])
477
+ tm.assert_index_equal(result, expected)
478
+
479
+ def test_concat_different_extension_dtypes_upcasts(self):
480
+ a = Series(pd.array([1, 2], dtype="Int64"))
481
+ b = Series(to_decimal([1, 2]))
482
+
483
+ result = concat([a, b], ignore_index=True)
484
+ expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
485
+ tm.assert_series_equal(result, expected)
486
+
487
+ def test_concat_ordered_dict(self):
488
+ # GH 21510
489
+ expected = concat(
490
+ [Series(range(3)), Series(range(4))], keys=["First", "Another"]
491
+ )
492
+ result = concat({"First": Series(range(3)), "Another": Series(range(4))})
493
+ tm.assert_series_equal(result, expected)
494
+
495
+ def test_concat_duplicate_indices_raise(self):
496
+ # GH 45888: test raise for concat DataFrames with duplicate indices
497
+ # https://github.com/pandas-dev/pandas/issues/36263
498
+ df1 = DataFrame(np.random.randn(5), index=[0, 1, 2, 3, 3], columns=["a"])
499
+ df2 = DataFrame(np.random.randn(5), index=[0, 1, 2, 2, 4], columns=["b"])
500
+ msg = "Reindexing only valid with uniquely valued Index objects"
501
+ with pytest.raises(InvalidIndexError, match=msg):
502
+ concat([df1, df2], axis=1)
503
+
504
+
505
+ @pytest.mark.parametrize("dt", np.sctypes["float"])
506
+ def test_concat_no_unnecessary_upcast(dt, frame_or_series):
507
+ # GH 13247
508
+ dims = frame_or_series(dtype=object).ndim
509
+
510
+ dfs = [
511
+ frame_or_series(np.array([1], dtype=dt, ndmin=dims)),
512
+ frame_or_series(np.array([np.nan], dtype=dt, ndmin=dims)),
513
+ frame_or_series(np.array([5], dtype=dt, ndmin=dims)),
514
+ ]
515
+ x = concat(dfs)
516
+ assert x.values.dtype == dt
517
+
518
+
519
+ @pytest.mark.parametrize("pdt", [Series, DataFrame])
520
+ @pytest.mark.parametrize("dt", np.sctypes["int"])
521
+ def test_concat_will_upcast(dt, pdt):
522
+ with catch_warnings(record=True):
523
+ dims = pdt().ndim
524
+ dfs = [
525
+ pdt(np.array([1], dtype=dt, ndmin=dims)),
526
+ pdt(np.array([np.nan], ndmin=dims)),
527
+ pdt(np.array([5], dtype=dt, ndmin=dims)),
528
+ ]
529
+ x = concat(dfs)
530
+ assert x.values.dtype == "float64"
531
+
532
+
533
+ def test_concat_empty_and_non_empty_frame_regression():
534
+ # GH 18178 regression test
535
+ df1 = DataFrame({"foo": [1]})
536
+ df2 = DataFrame({"foo": []})
537
+ expected = DataFrame({"foo": [1.0]})
538
+ result = concat([df1, df2])
539
+ tm.assert_frame_equal(result, expected)
540
+
541
+
542
+ def test_concat_sparse():
543
+ # GH 23557
544
+ a = Series(SparseArray([0, 1, 2]))
545
+ expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
546
+ pd.SparseDtype(np.int64, 0)
547
+ )
548
+ result = concat([a, a], axis=1)
549
+ tm.assert_frame_equal(result, expected)
550
+
551
+
552
+ def test_concat_dense_sparse():
553
+ # GH 30668
554
+ dtype = pd.SparseDtype(np.float64, None)
555
+ a = Series(pd.arrays.SparseArray([1, None]), dtype=dtype)
556
+ b = Series([1], dtype=float)
557
+ expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(dtype)
558
+ result = concat([a, b], axis=0)
559
+ tm.assert_series_equal(result, expected)
560
+
561
+
562
+ @pytest.mark.parametrize("keys", [["e", "f", "f"], ["f", "e", "f"]])
563
+ def test_duplicate_keys(keys):
564
+ # GH 33654
565
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
566
+ s1 = Series([7, 8, 9], name="c")
567
+ s2 = Series([10, 11, 12], name="d")
568
+ result = concat([df, s1, s2], axis=1, keys=keys)
569
+ expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
570
+ expected_columns = MultiIndex.from_tuples(
571
+ [(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")]
572
+ )
573
+ expected = DataFrame(expected_values, columns=expected_columns)
574
+ tm.assert_frame_equal(result, expected)
575
+
576
+
577
+ def test_duplicate_keys_same_frame():
578
+ # GH 43595
579
+ keys = ["e", "e"]
580
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
581
+ result = concat([df, df], axis=1, keys=keys)
582
+ expected_values = [[1, 4, 1, 4], [2, 5, 2, 5], [3, 6, 3, 6]]
583
+ expected_columns = MultiIndex.from_tuples(
584
+ [(keys[0], "a"), (keys[0], "b"), (keys[1], "a"), (keys[1], "b")]
585
+ )
586
+ expected = DataFrame(expected_values, columns=expected_columns)
587
+ with catch_warnings():
588
+ # result.columns not sorted, resulting in performance warning
589
+ simplefilter("ignore", PerformanceWarning)
590
+ tm.assert_frame_equal(result, expected)
591
+
592
+
593
+ @pytest.mark.parametrize(
594
+ "obj",
595
+ [
596
+ tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
597
+ tm.SubclassedSeries(np.arange(0, 10), name="A"),
598
+ ],
599
+ )
600
+ def test_concat_preserves_subclass(obj):
601
+ # GH28330 -- preserve subclass
602
+
603
+ result = concat([obj, obj])
604
+ assert isinstance(result, type(obj))
605
+
606
+
607
+ def test_concat_frame_axis0_extension_dtypes():
608
+ # preserve extension dtype (through common_dtype mechanism)
609
+ df1 = DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")})
610
+ df2 = DataFrame({"a": np.array([4, 5, 6])})
611
+
612
+ result = concat([df1, df2], ignore_index=True)
613
+ expected = DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64")
614
+ tm.assert_frame_equal(result, expected)
615
+
616
+ result = concat([df2, df1], ignore_index=True)
617
+ expected = DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64")
618
+ tm.assert_frame_equal(result, expected)
619
+
620
+
621
+ def test_concat_preserves_extension_int64_dtype():
622
+ # GH 24768
623
+ df_a = DataFrame({"a": [-1]}, dtype="Int64")
624
+ df_b = DataFrame({"b": [1]}, dtype="Int64")
625
+ result = concat([df_a, df_b], ignore_index=True)
626
+ expected = DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64")
627
+ tm.assert_frame_equal(result, expected)
628
+
629
+
630
+ @pytest.mark.parametrize(
631
+ "dtype1,dtype2,expected_dtype",
632
+ [
633
+ ("bool", "bool", "bool"),
634
+ ("boolean", "bool", "boolean"),
635
+ ("bool", "boolean", "boolean"),
636
+ ("boolean", "boolean", "boolean"),
637
+ ],
638
+ )
639
+ def test_concat_bool_types(dtype1, dtype2, expected_dtype):
640
+ # GH 42800
641
+ ser1 = Series([True, False], dtype=dtype1)
642
+ ser2 = Series([False, True], dtype=dtype2)
643
+ result = concat([ser1, ser2], ignore_index=True)
644
+ expected = Series([True, False, False, True], dtype=expected_dtype)
645
+ tm.assert_series_equal(result, expected)
646
+
647
+
648
+ @pytest.mark.parametrize(
649
+ ("keys", "integrity"),
650
+ [
651
+ (["red"] * 3, True),
652
+ (["red"] * 3, False),
653
+ (["red", "blue", "red"], False),
654
+ (["red", "blue", "red"], True),
655
+ ],
656
+ )
657
+ def test_concat_repeated_keys(keys, integrity):
658
+ # GH: 20816
659
+ series_list = [Series({"a": 1}), Series({"b": 2}), Series({"c": 3})]
660
+ result = concat(series_list, keys=keys, verify_integrity=integrity)
661
+ tuples = list(zip(keys, ["a", "b", "c"]))
662
+ expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples))
663
+ tm.assert_series_equal(result, expected)
664
+
665
+
666
+ def test_concat_null_object_with_dti():
667
+ # GH#40841
668
+ dti = pd.DatetimeIndex(
669
+ ["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)"
670
+ )
671
+ right = DataFrame(data={"C": [0.5274]}, index=dti)
672
+
673
+ idx = Index([None], dtype="object", name="Maybe Time (UTC)")
674
+ left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx)
675
+
676
+ result = concat([left, right], axis="columns")
677
+
678
+ exp_index = Index([None, dti[0]], dtype=object)
679
+ expected = DataFrame(
680
+ {"A": [None, None], "B": [np.nan, np.nan], "C": [np.nan, 0.5274]},
681
+ index=exp_index,
682
+ )
683
+ tm.assert_frame_equal(result, expected)
684
+
685
+
686
+ def test_concat_multiindex_with_empty_rangeindex():
687
+ # GH#41234
688
+ mi = MultiIndex.from_tuples([("B", 1), ("C", 1)])
689
+ df1 = DataFrame([[1, 2]], columns=mi)
690
+ df2 = DataFrame(index=[1], columns=pd.RangeIndex(0))
691
+
692
+ result = concat([df1, df2])
693
+ expected = DataFrame([[1, 2], [np.nan, np.nan]], columns=mi)
694
+ tm.assert_frame_equal(result, expected)
695
+
696
+
697
+ @pytest.mark.parametrize(
698
+ "data",
699
+ [
700
+ Series(data=[1, 2]),
701
+ DataFrame(
702
+ data={
703
+ "col1": [1, 2],
704
+ }
705
+ ),
706
+ DataFrame(dtype=float),
707
+ Series(dtype=float),
708
+ ],
709
+ )
710
+ def test_concat_drop_attrs(data):
711
+ # GH#41828
712
+ df1 = data.copy()
713
+ df1.attrs = {1: 1}
714
+ df2 = data.copy()
715
+ df2.attrs = {1: 2}
716
+ df = concat([df1, df2])
717
+ assert len(df.attrs) == 0
718
+
719
+
720
+ @pytest.mark.parametrize(
721
+ "data",
722
+ [
723
+ Series(data=[1, 2]),
724
+ DataFrame(
725
+ data={
726
+ "col1": [1, 2],
727
+ }
728
+ ),
729
+ DataFrame(dtype=float),
730
+ Series(dtype=float),
731
+ ],
732
+ )
733
+ def test_concat_retain_attrs(data):
734
+ # GH#41828
735
+ df1 = data.copy()
736
+ df1.attrs = {1: 1}
737
+ df2 = data.copy()
738
+ df2.attrs = {1: 1}
739
+ df = concat([df1, df2])
740
+ assert df.attrs[1] == 1
741
+
742
+
743
+ @td.skip_array_manager_invalid_test
744
+ @pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"])
745
+ @pytest.mark.parametrize("empty_dtype", [None, "float64", "object"])
746
+ def test_concat_ignore_empty_object_float(empty_dtype, df_dtype):
747
+ # https://github.com/pandas-dev/pandas/issues/45637
748
+ df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype)
749
+ empty = DataFrame(columns=["foo", "bar"], dtype=empty_dtype)
750
+ result = concat([empty, df])
751
+ expected = df
752
+ if df_dtype == "int64":
753
+ # TODO what exact behaviour do we want for integer eventually?
754
+ if empty_dtype == "float64":
755
+ expected = df.astype("float64")
756
+ else:
757
+ expected = df.astype("object")
758
+ tm.assert_frame_equal(result, expected)
759
+
760
+
761
+ @td.skip_array_manager_invalid_test
762
+ @pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"])
763
+ @pytest.mark.parametrize("empty_dtype", [None, "float64", "object"])
764
+ def test_concat_ignore_all_na_object_float(empty_dtype, df_dtype):
765
+ df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype)
766
+ empty = DataFrame({"foo": [np.nan], "bar": [np.nan]}, dtype=empty_dtype)
767
+ result = concat([empty, df], ignore_index=True)
768
+
769
+ if df_dtype == "int64":
770
+ # TODO what exact behaviour do we want for integer eventually?
771
+ if empty_dtype == "object":
772
+ df_dtype = "object"
773
+ else:
774
+ df_dtype = "float64"
775
+ expected = DataFrame({"foo": [None, 1, 2], "bar": [None, 1, 2]}, dtype=df_dtype)
776
+ tm.assert_frame_equal(result, expected)
777
+
778
+
779
+ @td.skip_array_manager_invalid_test
780
+ def test_concat_ignore_empty_from_reindex():
781
+ # https://github.com/pandas-dev/pandas/pull/43507#issuecomment-920375856
782
+ df1 = DataFrame({"a": [1], "b": [pd.Timestamp("2012-01-01")]})
783
+ df2 = DataFrame({"a": [2]})
784
+
785
+ result = concat([df1, df2.reindex(columns=df1.columns)], ignore_index=True)
786
+ expected = df1 = DataFrame({"a": [1, 2], "b": [pd.Timestamp("2012-01-01"), pd.NaT]})
787
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ Index,
8
+ Series,
9
+ concat,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ class TestDataFrameConcat:
15
+ def test_concat_multiple_frames_dtypes(self):
16
+ # GH#2759
17
+ df1 = DataFrame(data=np.ones((10, 2)), columns=["foo", "bar"], dtype=np.float64)
18
+ df2 = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
19
+ results = concat((df1, df2), axis=1).dtypes
20
+ expected = Series(
21
+ [np.dtype("float64")] * 2 + [np.dtype("float32")] * 2,
22
+ index=["foo", "bar", 0, 1],
23
+ )
24
+ tm.assert_series_equal(results, expected)
25
+
26
+ def test_concat_tuple_keys(self):
27
+ # GH#14438
28
+ df1 = DataFrame(np.ones((2, 2)), columns=list("AB"))
29
+ df2 = DataFrame(np.ones((3, 2)) * 2, columns=list("AB"))
30
+ results = concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")])
31
+ expected = DataFrame(
32
+ {
33
+ "A": {
34
+ ("bee", "bah", 0): 1.0,
35
+ ("bee", "bah", 1): 1.0,
36
+ ("bee", "boo", 0): 2.0,
37
+ ("bee", "boo", 1): 2.0,
38
+ ("bee", "boo", 2): 2.0,
39
+ },
40
+ "B": {
41
+ ("bee", "bah", 0): 1.0,
42
+ ("bee", "bah", 1): 1.0,
43
+ ("bee", "boo", 0): 2.0,
44
+ ("bee", "boo", 1): 2.0,
45
+ ("bee", "boo", 2): 2.0,
46
+ },
47
+ }
48
+ )
49
+ tm.assert_frame_equal(results, expected)
50
+
51
+ def test_concat_named_keys(self):
52
+ # GH#14252
53
+ df = DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]})
54
+ index = Index(["a", "b"], name="baz")
55
+ concatted_named_from_keys = concat([df, df], keys=index)
56
+ expected_named = DataFrame(
57
+ {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
58
+ index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]),
59
+ )
60
+ tm.assert_frame_equal(concatted_named_from_keys, expected_named)
61
+
62
+ index_no_name = Index(["a", "b"], name=None)
63
+ concatted_named_from_names = concat([df, df], keys=index_no_name, names=["baz"])
64
+ tm.assert_frame_equal(concatted_named_from_names, expected_named)
65
+
66
+ concatted_unnamed = concat([df, df], keys=index_no_name)
67
+ expected_unnamed = DataFrame(
68
+ {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
69
+ index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]),
70
+ )
71
+ tm.assert_frame_equal(concatted_unnamed, expected_unnamed)
72
+
73
+ def test_concat_axis_parameter(self):
74
+ # GH#14369
75
+ df1 = DataFrame({"A": [0.1, 0.2]}, index=range(2))
76
+ df2 = DataFrame({"A": [0.3, 0.4]}, index=range(2))
77
+
78
+ # Index/row/0 DataFrame
79
+ expected_index = DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
80
+
81
+ concatted_index = concat([df1, df2], axis="index")
82
+ tm.assert_frame_equal(concatted_index, expected_index)
83
+
84
+ concatted_row = concat([df1, df2], axis="rows")
85
+ tm.assert_frame_equal(concatted_row, expected_index)
86
+
87
+ concatted_0 = concat([df1, df2], axis=0)
88
+ tm.assert_frame_equal(concatted_0, expected_index)
89
+
90
+ # Columns/1 DataFrame
91
+ expected_columns = DataFrame(
92
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"]
93
+ )
94
+
95
+ concatted_columns = concat([df1, df2], axis="columns")
96
+ tm.assert_frame_equal(concatted_columns, expected_columns)
97
+
98
+ concatted_1 = concat([df1, df2], axis=1)
99
+ tm.assert_frame_equal(concatted_1, expected_columns)
100
+
101
+ series1 = Series([0.1, 0.2])
102
+ series2 = Series([0.3, 0.4])
103
+
104
+ # Index/row/0 Series
105
+ expected_index_series = Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
106
+
107
+ concatted_index_series = concat([series1, series2], axis="index")
108
+ tm.assert_series_equal(concatted_index_series, expected_index_series)
109
+
110
+ concatted_row_series = concat([series1, series2], axis="rows")
111
+ tm.assert_series_equal(concatted_row_series, expected_index_series)
112
+
113
+ concatted_0_series = concat([series1, series2], axis=0)
114
+ tm.assert_series_equal(concatted_0_series, expected_index_series)
115
+
116
+ # Columns/1 Series
117
+ expected_columns_series = DataFrame(
118
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1]
119
+ )
120
+
121
+ concatted_columns_series = concat([series1, series2], axis="columns")
122
+ tm.assert_frame_equal(concatted_columns_series, expected_columns_series)
123
+
124
+ concatted_1_series = concat([series1, series2], axis=1)
125
+ tm.assert_frame_equal(concatted_1_series, expected_columns_series)
126
+
127
+ # Testing ValueError
128
+ with pytest.raises(ValueError, match="No axis named"):
129
+ concat([series1, series2], axis="something")
130
+
131
+ def test_concat_numerical_names(self):
132
+ # GH#15262, GH#12223
133
+ df = DataFrame(
134
+ {"col": range(9)},
135
+ dtype="int32",
136
+ index=(
137
+ pd.MultiIndex.from_product(
138
+ [["A0", "A1", "A2"], ["B0", "B1", "B2"]], names=[1, 2]
139
+ )
140
+ ),
141
+ )
142
+ result = concat((df.iloc[:2, :], df.iloc[-2:, :]))
143
+ expected = DataFrame(
144
+ {"col": [0, 1, 7, 8]},
145
+ dtype="int32",
146
+ index=pd.MultiIndex.from_tuples(
147
+ [("A0", "B0"), ("A0", "B1"), ("A2", "B1"), ("A2", "B2")], names=[1, 2]
148
+ ),
149
+ )
150
+ tm.assert_frame_equal(result, expected)
151
+
152
+ def test_concat_astype_dup_col(self):
153
+ # GH#23049
154
+ df = DataFrame([{"a": "b"}])
155
+ df = concat([df, df], axis=1)
156
+
157
+ result = df.astype("category")
158
+ expected = DataFrame(
159
+ np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"]
160
+ ).astype("category")
161
+ tm.assert_frame_equal(result, expected)
162
+
163
+ def test_concat_dataframe_keys_bug(self, sort):
164
+ t1 = DataFrame(
165
+ {"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
166
+ )
167
+ t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
168
+
169
+ # it works
170
+ result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
171
+ assert list(result.columns) == [("t1", "value"), ("t2", "value")]
172
+
173
+ def test_concat_bool_with_int(self):
174
+ # GH#42092 we may want to change this to return object, but that
175
+ # would need a deprecation
176
+ df1 = DataFrame(Series([True, False, True, True], dtype="bool"))
177
+ df2 = DataFrame(Series([1, 0, 1], dtype="int64"))
178
+
179
+ result = concat([df1, df2])
180
+ expected = concat([df1.astype("int64"), df2])
181
+ tm.assert_frame_equal(result, expected)
182
+
183
+ def test_concat_duplicates_in_index_with_keys(self):
184
+ # GH#42651
185
+ index = [1, 1, 3]
186
+ data = [1, 2, 3]
187
+
188
+ df = DataFrame(data=data, index=index)
189
+ result = concat([df], keys=["A"], names=["ID", "date"])
190
+ mi = pd.MultiIndex.from_product([["A"], index], names=["ID", "date"])
191
+ expected = DataFrame(data=data, index=mi)
192
+ tm.assert_frame_equal(result, expected)
193
+ tm.assert_index_equal(result.index.levels[1], Index([1, 3], name="date"))
194
+
195
+ @pytest.mark.parametrize("ignore_index", [True, False])
196
+ @pytest.mark.parametrize("order", ["C", "F"])
197
+ @pytest.mark.parametrize("axis", [0, 1])
198
+ def test_concat_copies(self, axis, order, ignore_index, using_copy_on_write):
199
+ # based on asv ConcatDataFrames
200
+ df = DataFrame(np.zeros((10000, 200), dtype=np.float32, order=order))
201
+
202
+ res = concat([df] * 5, axis=axis, ignore_index=ignore_index, copy=True)
203
+
204
+ if not using_copy_on_write:
205
+ for arr in res._iter_column_arrays():
206
+ for arr2 in df._iter_column_arrays():
207
+ assert not np.shares_memory(arr, arr2)
208
+
209
+ def test_outer_sort_columns(self):
210
+ # GH#47127
211
+ df1 = DataFrame({"A": [0], "B": [1], 0: 1})
212
+ df2 = DataFrame({"A": [100]})
213
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
214
+ expected = DataFrame({0: [1.0, np.nan], "A": [0, 100], "B": [1.0, np.nan]})
215
+ tm.assert_frame_equal(result, expected)
216
+
217
+ def test_inner_sort_columns(self):
218
+ # GH#47127
219
+ df1 = DataFrame({"A": [0], "B": [1], 0: 1})
220
+ df2 = DataFrame({"A": [100], 0: 2})
221
+ result = concat([df1, df2], ignore_index=True, join="inner", sort=True)
222
+ expected = DataFrame({0: [1, 2], "A": [0, 100]})
223
+ tm.assert_frame_equal(result, expected)
224
+
225
+ def test_sort_columns_one_df(self):
226
+ # GH#47127
227
+ df1 = DataFrame({"A": [100], 0: 2})
228
+ result = concat([df1], ignore_index=True, join="inner", sort=True)
229
+ expected = DataFrame({0: [2], "A": [100]})
230
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime as dt
2
+ from datetime import datetime
3
+
4
+ import dateutil
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ DatetimeIndex,
12
+ Index,
13
+ MultiIndex,
14
+ Series,
15
+ Timestamp,
16
+ concat,
17
+ date_range,
18
+ to_timedelta,
19
+ )
20
+ import pandas._testing as tm
21
+
22
+
23
+ class TestDatetimeConcat:
24
+ def test_concat_datetime64_block(self):
25
+ rng = date_range("1/1/2000", periods=10)
26
+
27
+ df = DataFrame({"time": rng})
28
+
29
+ result = concat([df, df])
30
+ assert (result.iloc[:10]["time"] == rng).all()
31
+ assert (result.iloc[10:]["time"] == rng).all()
32
+
33
+ def test_concat_datetime_datetime64_frame(self):
34
+ # GH#2624
35
+ rows = []
36
+ rows.append([datetime(2010, 1, 1), 1])
37
+ rows.append([datetime(2010, 1, 2), "hi"])
38
+
39
+ df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
40
+
41
+ ind = date_range(start="2000/1/1", freq="D", periods=10)
42
+ df1 = DataFrame({"date": ind, "test": range(10)})
43
+
44
+ # it works!
45
+ concat([df1, df2_obj])
46
+
47
+ def test_concat_datetime_timezone(self):
48
+ # GH 18523
49
+ idx1 = date_range("2011-01-01", periods=3, freq="H", tz="Europe/Paris")
50
+ idx2 = date_range(start=idx1[0], end=idx1[-1], freq="H")
51
+ df1 = DataFrame({"a": [1, 2, 3]}, index=idx1)
52
+ df2 = DataFrame({"b": [1, 2, 3]}, index=idx2)
53
+ result = concat([df1, df2], axis=1)
54
+
55
+ exp_idx = (
56
+ DatetimeIndex(
57
+ [
58
+ "2011-01-01 00:00:00+01:00",
59
+ "2011-01-01 01:00:00+01:00",
60
+ "2011-01-01 02:00:00+01:00",
61
+ ],
62
+ freq="H",
63
+ )
64
+ .tz_convert("UTC")
65
+ .tz_convert("Europe/Paris")
66
+ )
67
+
68
+ expected = DataFrame(
69
+ [[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"]
70
+ )
71
+
72
+ tm.assert_frame_equal(result, expected)
73
+
74
+ idx3 = date_range("2011-01-01", periods=3, freq="H", tz="Asia/Tokyo")
75
+ df3 = DataFrame({"b": [1, 2, 3]}, index=idx3)
76
+ result = concat([df1, df3], axis=1)
77
+
78
+ exp_idx = DatetimeIndex(
79
+ [
80
+ "2010-12-31 15:00:00+00:00",
81
+ "2010-12-31 16:00:00+00:00",
82
+ "2010-12-31 17:00:00+00:00",
83
+ "2010-12-31 23:00:00+00:00",
84
+ "2011-01-01 00:00:00+00:00",
85
+ "2011-01-01 01:00:00+00:00",
86
+ ]
87
+ )
88
+
89
+ expected = DataFrame(
90
+ [
91
+ [np.nan, 1],
92
+ [np.nan, 2],
93
+ [np.nan, 3],
94
+ [1, np.nan],
95
+ [2, np.nan],
96
+ [3, np.nan],
97
+ ],
98
+ index=exp_idx,
99
+ columns=["a", "b"],
100
+ )
101
+
102
+ tm.assert_frame_equal(result, expected)
103
+
104
+ # GH 13783: Concat after resample
105
+ result = concat([df1.resample("H").mean(), df2.resample("H").mean()], sort=True)
106
+ expected = DataFrame(
107
+ {"a": [1, 2, 3] + [np.nan] * 3, "b": [np.nan] * 3 + [1, 2, 3]},
108
+ index=idx1.append(idx1),
109
+ )
110
+ tm.assert_frame_equal(result, expected)
111
+
112
+ def test_concat_datetimeindex_freq(self):
113
+ # GH 3232
114
+ # Monotonic index result
115
+ dr = date_range("01-Jan-2013", periods=100, freq="50L", tz="UTC")
116
+ data = list(range(100))
117
+ expected = DataFrame(data, index=dr)
118
+ result = concat([expected[:50], expected[50:]])
119
+ tm.assert_frame_equal(result, expected)
120
+
121
+ # Non-monotonic index result
122
+ result = concat([expected[50:], expected[:50]])
123
+ expected = DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50]))
124
+ expected.index._data.freq = None
125
+ tm.assert_frame_equal(result, expected)
126
+
127
+ def test_concat_multiindex_datetime_object_index(self):
128
+ # https://github.com/pandas-dev/pandas/issues/11058
129
+ idx = Index(
130
+ [dt.date(2013, 1, 1), dt.date(2014, 1, 1), dt.date(2015, 1, 1)],
131
+ dtype="object",
132
+ )
133
+
134
+ s = Series(
135
+ ["a", "b"],
136
+ index=MultiIndex.from_arrays(
137
+ [
138
+ [1, 2],
139
+ idx[:-1],
140
+ ],
141
+ names=["first", "second"],
142
+ ),
143
+ )
144
+ s2 = Series(
145
+ ["a", "b"],
146
+ index=MultiIndex.from_arrays(
147
+ [[1, 2], idx[::2]],
148
+ names=["first", "second"],
149
+ ),
150
+ )
151
+ mi = MultiIndex.from_arrays(
152
+ [[1, 2, 2], idx],
153
+ names=["first", "second"],
154
+ )
155
+ assert mi.levels[1].dtype == object
156
+
157
+ expected = DataFrame(
158
+ [["a", "a"], ["b", np.nan], [np.nan, "b"]],
159
+ index=mi,
160
+ )
161
+ result = concat([s, s2], axis=1)
162
+ tm.assert_frame_equal(result, expected)
163
+
164
+ def test_concat_NaT_series(self):
165
+ # GH 11693
166
+ # test for merging NaT series with datetime series.
167
+ x = Series(
168
+ date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="US/Eastern")
169
+ )
170
+ y = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
171
+ expected = Series([x[0], x[1], pd.NaT, pd.NaT])
172
+
173
+ result = concat([x, y], ignore_index=True)
174
+ tm.assert_series_equal(result, expected)
175
+
176
+ # all NaT with tz
177
+ expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns, US/Eastern]")
178
+ result = concat([y, y], ignore_index=True)
179
+ tm.assert_series_equal(result, expected)
180
+
181
+ # without tz
182
+ x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h"))
183
+ y = Series(date_range("20151124 10:00", "20151124 11:00", freq="1h"))
184
+ y[:] = pd.NaT
185
+ expected = Series([x[0], x[1], pd.NaT, pd.NaT])
186
+ result = concat([x, y], ignore_index=True)
187
+ tm.assert_series_equal(result, expected)
188
+
189
+ # all NaT without tz
190
+ x[:] = pd.NaT
191
+ expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns]")
192
+ result = concat([x, y], ignore_index=True)
193
+ tm.assert_series_equal(result, expected)
194
+
195
+ @pytest.mark.parametrize("tz", [None, "UTC"])
196
+ def test_concat_NaT_dataframes(self, tz):
197
+ # GH 12396
198
+
199
+ first = DataFrame([[pd.NaT], [pd.NaT]])
200
+ first = first.apply(lambda x: x.dt.tz_localize(tz))
201
+ second = DataFrame(
202
+ [[Timestamp("2015/01/01", tz=tz)], [Timestamp("2016/01/01", tz=tz)]],
203
+ index=[2, 3],
204
+ )
205
+ expected = DataFrame(
206
+ [
207
+ pd.NaT,
208
+ pd.NaT,
209
+ Timestamp("2015/01/01", tz=tz),
210
+ Timestamp("2016/01/01", tz=tz),
211
+ ]
212
+ )
213
+
214
+ result = concat([first, second], axis=0)
215
+ tm.assert_frame_equal(result, expected)
216
+
217
+ @pytest.mark.parametrize("tz1", [None, "UTC"])
218
+ @pytest.mark.parametrize("tz2", [None, "UTC"])
219
+ @pytest.mark.parametrize("s", [pd.NaT, Timestamp("20150101")])
220
+ def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s):
221
+ # GH 12396
222
+
223
+ # tz-naive
224
+ first = DataFrame([[pd.NaT], [pd.NaT]]).apply(lambda x: x.dt.tz_localize(tz1))
225
+ second = DataFrame([s]).apply(lambda x: x.dt.tz_localize(tz2))
226
+
227
+ result = concat([first, second], axis=0)
228
+ expected = DataFrame(Series([pd.NaT, pd.NaT, s], index=[0, 1, 0]))
229
+ expected = expected.apply(lambda x: x.dt.tz_localize(tz2))
230
+ if tz1 != tz2:
231
+ expected = expected.astype(object)
232
+
233
+ tm.assert_frame_equal(result, expected)
234
+
235
+ @pytest.mark.parametrize("tz1", [None, "UTC"])
236
+ @pytest.mark.parametrize("tz2", [None, "UTC"])
237
+ def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2):
238
+ # GH 12396
239
+
240
+ first = DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1))
241
+ second = DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1])
242
+ expected = DataFrame(
243
+ {
244
+ 0: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1),
245
+ 1: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2),
246
+ }
247
+ )
248
+ result = concat([first, second], axis=1)
249
+ tm.assert_frame_equal(result, expected)
250
+
251
+ @pytest.mark.parametrize("tz1", [None, "UTC"])
252
+ @pytest.mark.parametrize("tz2", [None, "UTC"])
253
+ def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2):
254
+ # GH 12396
255
+
256
+ # tz-naive
257
+ first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)
258
+ second = DataFrame(
259
+ [
260
+ [Timestamp("2015/01/01", tz=tz2)],
261
+ [Timestamp("2016/01/01", tz=tz2)],
262
+ ],
263
+ index=[2, 3],
264
+ )
265
+
266
+ expected = DataFrame(
267
+ [
268
+ pd.NaT,
269
+ pd.NaT,
270
+ Timestamp("2015/01/01", tz=tz2),
271
+ Timestamp("2016/01/01", tz=tz2),
272
+ ]
273
+ )
274
+ if tz1 != tz2:
275
+ expected = expected.astype(object)
276
+
277
+ result = concat([first, second])
278
+ tm.assert_frame_equal(result, expected)
279
+
280
+
281
+ class TestTimezoneConcat:
282
+ def test_concat_tz_series(self):
283
+ # gh-11755: tz and no tz
284
+ x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
285
+ y = Series(date_range("2012-01-01", "2012-01-02"))
286
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
287
+ result = concat([x, y], ignore_index=True)
288
+ tm.assert_series_equal(result, expected)
289
+
290
+ # gh-11887: concat tz and object
291
+ x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
292
+ y = Series(["a", "b"])
293
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
294
+ result = concat([x, y], ignore_index=True)
295
+ tm.assert_series_equal(result, expected)
296
+
297
+ # see gh-12217 and gh-12306
298
+ # Concatenating two UTC times
299
+ first = DataFrame([[datetime(2016, 1, 1)]])
300
+ first[0] = first[0].dt.tz_localize("UTC")
301
+
302
+ second = DataFrame([[datetime(2016, 1, 2)]])
303
+ second[0] = second[0].dt.tz_localize("UTC")
304
+
305
+ result = concat([first, second])
306
+ assert result[0].dtype == "datetime64[ns, UTC]"
307
+
308
+ # Concatenating two London times
309
+ first = DataFrame([[datetime(2016, 1, 1)]])
310
+ first[0] = first[0].dt.tz_localize("Europe/London")
311
+
312
+ second = DataFrame([[datetime(2016, 1, 2)]])
313
+ second[0] = second[0].dt.tz_localize("Europe/London")
314
+
315
+ result = concat([first, second])
316
+ assert result[0].dtype == "datetime64[ns, Europe/London]"
317
+
318
+ # Concatenating 2+1 London times
319
+ first = DataFrame([[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]])
320
+ first[0] = first[0].dt.tz_localize("Europe/London")
321
+
322
+ second = DataFrame([[datetime(2016, 1, 3)]])
323
+ second[0] = second[0].dt.tz_localize("Europe/London")
324
+
325
+ result = concat([first, second])
326
+ assert result[0].dtype == "datetime64[ns, Europe/London]"
327
+
328
+ # Concat'ing 1+2 London times
329
+ first = DataFrame([[datetime(2016, 1, 1)]])
330
+ first[0] = first[0].dt.tz_localize("Europe/London")
331
+
332
+ second = DataFrame([[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]])
333
+ second[0] = second[0].dt.tz_localize("Europe/London")
334
+
335
+ result = concat([first, second])
336
+ assert result[0].dtype == "datetime64[ns, Europe/London]"
337
+
338
+ def test_concat_tz_series_tzlocal(self):
339
+ # see gh-13583
340
+ x = [
341
+ Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()),
342
+ Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()),
343
+ ]
344
+ y = [
345
+ Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()),
346
+ Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()),
347
+ ]
348
+
349
+ result = concat([Series(x), Series(y)], ignore_index=True)
350
+ tm.assert_series_equal(result, Series(x + y))
351
+ assert result.dtype == "datetime64[ns, tzlocal()]"
352
+
353
+ def test_concat_tz_series_with_datetimelike(self):
354
+ # see gh-12620: tz and timedelta
355
+ x = [
356
+ Timestamp("2011-01-01", tz="US/Eastern"),
357
+ Timestamp("2011-02-01", tz="US/Eastern"),
358
+ ]
359
+ y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
360
+ result = concat([Series(x), Series(y)], ignore_index=True)
361
+ tm.assert_series_equal(result, Series(x + y, dtype="object"))
362
+
363
+ # tz and period
364
+ y = [pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M")]
365
+ result = concat([Series(x), Series(y)], ignore_index=True)
366
+ tm.assert_series_equal(result, Series(x + y, dtype="object"))
367
+
368
+ def test_concat_tz_frame(self):
369
+ df2 = DataFrame(
370
+ {
371
+ "A": Timestamp("20130102", tz="US/Eastern"),
372
+ "B": Timestamp("20130603", tz="CET"),
373
+ },
374
+ index=range(5),
375
+ )
376
+
377
+ # concat
378
+ df3 = concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
379
+ tm.assert_frame_equal(df2, df3)
380
+
381
+ def test_concat_multiple_tzs(self):
382
+ # GH#12467
383
+ # combining datetime tz-aware and naive DataFrames
384
+ ts1 = Timestamp("2015-01-01", tz=None)
385
+ ts2 = Timestamp("2015-01-01", tz="UTC")
386
+ ts3 = Timestamp("2015-01-01", tz="EST")
387
+
388
+ df1 = DataFrame({"time": [ts1]})
389
+ df2 = DataFrame({"time": [ts2]})
390
+ df3 = DataFrame({"time": [ts3]})
391
+
392
+ results = concat([df1, df2]).reset_index(drop=True)
393
+ expected = DataFrame({"time": [ts1, ts2]}, dtype=object)
394
+ tm.assert_frame_equal(results, expected)
395
+
396
+ results = concat([df1, df3]).reset_index(drop=True)
397
+ expected = DataFrame({"time": [ts1, ts3]}, dtype=object)
398
+ tm.assert_frame_equal(results, expected)
399
+
400
+ results = concat([df2, df3]).reset_index(drop=True)
401
+ expected = DataFrame({"time": [ts2, ts3]})
402
+ tm.assert_frame_equal(results, expected)
403
+
404
+ def test_concat_multiindex_with_tz(self):
405
+ # GH 6606
406
+ df = DataFrame(
407
+ {
408
+ "dt": [
409
+ datetime(2014, 1, 1),
410
+ datetime(2014, 1, 2),
411
+ datetime(2014, 1, 3),
412
+ ],
413
+ "b": ["A", "B", "C"],
414
+ "c": [1, 2, 3],
415
+ "d": [4, 5, 6],
416
+ }
417
+ )
418
+ df["dt"] = df["dt"].apply(lambda d: Timestamp(d, tz="US/Pacific"))
419
+ df = df.set_index(["dt", "b"])
420
+
421
+ exp_idx1 = DatetimeIndex(
422
+ ["2014-01-01", "2014-01-02", "2014-01-03"] * 2, tz="US/Pacific", name="dt"
423
+ )
424
+ exp_idx2 = Index(["A", "B", "C"] * 2, name="b")
425
+ exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
426
+ expected = DataFrame(
427
+ {"c": [1, 2, 3] * 2, "d": [4, 5, 6] * 2}, index=exp_idx, columns=["c", "d"]
428
+ )
429
+
430
+ result = concat([df, df])
431
+ tm.assert_frame_equal(result, expected)
432
+
433
+ def test_concat_tz_not_aligned(self):
434
+ # GH#22796
435
+ ts = pd.to_datetime([1, 2]).tz_localize("UTC")
436
+ a = DataFrame({"A": ts})
437
+ b = DataFrame({"A": ts, "B": ts})
438
+ result = concat([a, b], sort=True, ignore_index=True)
439
+ expected = DataFrame(
440
+ {"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)}
441
+ )
442
+ tm.assert_frame_equal(result, expected)
443
+
444
+ @pytest.mark.parametrize(
445
+ "t1",
446
+ [
447
+ "2015-01-01",
448
+ pytest.param(
449
+ pd.NaT,
450
+ marks=pytest.mark.xfail(
451
+ reason="GH23037 incorrect dtype when concatenating"
452
+ ),
453
+ ),
454
+ ],
455
+ )
456
+ def test_concat_tz_NaT(self, t1):
457
+ # GH#22796
458
+ # Concatenating tz-aware multicolumn DataFrames
459
+ ts1 = Timestamp(t1, tz="UTC")
460
+ ts2 = Timestamp("2015-01-01", tz="UTC")
461
+ ts3 = Timestamp("2015-01-01", tz="UTC")
462
+
463
+ df1 = DataFrame([[ts1, ts2]])
464
+ df2 = DataFrame([[ts3]])
465
+
466
+ result = concat([df1, df2])
467
+ expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
468
+
469
+ tm.assert_frame_equal(result, expected)
470
+
471
+ def test_concat_tz_with_empty(self):
472
+ # GH 9188
473
+ result = concat(
474
+ [DataFrame(date_range("2000", periods=1, tz="UTC")), DataFrame()]
475
+ )
476
+ expected = DataFrame(date_range("2000", periods=1, tz="UTC"))
477
+ tm.assert_frame_equal(result, expected)
478
+
479
+
480
+ class TestPeriodConcat:
481
+ def test_concat_period_series(self):
482
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
483
+ y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="D"))
484
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="Period[D]")
485
+ result = concat([x, y], ignore_index=True)
486
+ tm.assert_series_equal(result, expected)
487
+
488
+ def test_concat_period_multiple_freq_series(self):
489
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
490
+ y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="M"))
491
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
492
+ result = concat([x, y], ignore_index=True)
493
+ tm.assert_series_equal(result, expected)
494
+ assert result.dtype == "object"
495
+
496
+ def test_concat_period_other_series(self):
497
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
498
+ y = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="M"))
499
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
500
+ result = concat([x, y], ignore_index=True)
501
+ tm.assert_series_equal(result, expected)
502
+ assert result.dtype == "object"
503
+
504
+ # non-period
505
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
506
+ y = Series(DatetimeIndex(["2015-11-01", "2015-12-01"]))
507
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
508
+ result = concat([x, y], ignore_index=True)
509
+ tm.assert_series_equal(result, expected)
510
+ assert result.dtype == "object"
511
+
512
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
513
+ y = Series(["A", "B"])
514
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
515
+ result = concat([x, y], ignore_index=True)
516
+ tm.assert_series_equal(result, expected)
517
+ assert result.dtype == "object"
518
+
519
+
520
+ def test_concat_timedelta64_block():
521
+ rng = to_timedelta(np.arange(10), unit="s")
522
+
523
+ df = DataFrame({"time": rng})
524
+
525
+ result = concat([df, df])
526
+ tm.assert_frame_equal(result.iloc[:10], df)
527
+ tm.assert_frame_equal(result.iloc[10:], df)
528
+
529
+
530
+ def test_concat_multiindex_datetime_nat():
531
+ # GH#44900
532
+ left = DataFrame({"a": 1}, index=MultiIndex.from_tuples([(1, pd.NaT)]))
533
+ right = DataFrame(
534
+ {"b": 2}, index=MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
535
+ )
536
+ result = concat([left, right], axis="columns")
537
+ expected = DataFrame(
538
+ {"a": [1.0, np.nan], "b": 2}, MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
539
+ )
540
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ RangeIndex,
8
+ Series,
9
+ concat,
10
+ date_range,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+
15
+ class TestEmptyConcat:
16
+ def test_handle_empty_objects(self, sort):
17
+ df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
18
+
19
+ dfcopy = df[:5].copy()
20
+ dfcopy["foo"] = "bar"
21
+ empty = df[5:5]
22
+
23
+ frames = [dfcopy, empty, empty, df[5:]]
24
+ concatted = concat(frames, axis=0, sort=sort)
25
+
26
+ expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
27
+ expected["foo"] = expected["foo"].astype("O")
28
+ expected.loc[0:4, "foo"] = "bar"
29
+
30
+ tm.assert_frame_equal(concatted, expected)
31
+
32
+ # empty as first element with time series
33
+ # GH3259
34
+ df = DataFrame(
35
+ {"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
36
+ )
37
+ empty = DataFrame()
38
+ result = concat([df, empty], axis=1)
39
+ tm.assert_frame_equal(result, df)
40
+ result = concat([empty, df], axis=1)
41
+ tm.assert_frame_equal(result, df)
42
+
43
+ result = concat([df, empty])
44
+ tm.assert_frame_equal(result, df)
45
+ result = concat([empty, df])
46
+ tm.assert_frame_equal(result, df)
47
+
48
+ def test_concat_empty_series(self):
49
+ # GH 11082
50
+ s1 = Series([1, 2, 3], name="x")
51
+ s2 = Series(name="y", dtype="float64")
52
+ res = concat([s1, s2], axis=1)
53
+ exp = DataFrame(
54
+ {"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
55
+ index=RangeIndex(3),
56
+ )
57
+ tm.assert_frame_equal(res, exp)
58
+
59
+ s1 = Series([1, 2, 3], name="x")
60
+ s2 = Series(name="y", dtype="float64")
61
+ res = concat([s1, s2], axis=0)
62
+ # name will be reset
63
+ exp = Series([1, 2, 3])
64
+ tm.assert_series_equal(res, exp)
65
+
66
+ # empty Series with no name
67
+ s1 = Series([1, 2, 3], name="x")
68
+ s2 = Series(name=None, dtype="float64")
69
+ res = concat([s1, s2], axis=1)
70
+ exp = DataFrame(
71
+ {"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
72
+ columns=["x", 0],
73
+ index=RangeIndex(3),
74
+ )
75
+ tm.assert_frame_equal(res, exp)
76
+
77
+ @pytest.mark.parametrize("tz", [None, "UTC"])
78
+ @pytest.mark.parametrize("values", [[], [1, 2, 3]])
79
+ def test_concat_empty_series_timelike(self, tz, values):
80
+ # GH 18447
81
+
82
+ first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
83
+ dtype = None if values else np.float64
84
+ second = Series(values, dtype=dtype)
85
+
86
+ expected = DataFrame(
87
+ {
88
+ 0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
89
+ 1: values,
90
+ }
91
+ )
92
+ result = concat([first, second], axis=1)
93
+ tm.assert_frame_equal(result, expected)
94
+
95
+ @pytest.mark.parametrize(
96
+ "left,right,expected",
97
+ [
98
+ # booleans
99
+ (np.bool_, np.int32, np.object_), # changed from int32 in 2.0 GH#39817
100
+ (np.bool_, np.float32, np.object_),
101
+ # datetime-like
102
+ ("m8[ns]", np.bool_, np.object_),
103
+ ("m8[ns]", np.int64, np.object_),
104
+ ("M8[ns]", np.bool_, np.object_),
105
+ ("M8[ns]", np.int64, np.object_),
106
+ # categorical
107
+ ("category", "category", "category"),
108
+ ("category", "object", "object"),
109
+ ],
110
+ )
111
+ def test_concat_empty_series_dtypes(self, left, right, expected):
112
+ # GH#39817, GH#45101
113
+ result = concat([Series(dtype=left), Series(dtype=right)])
114
+ assert result.dtype == expected
115
+
116
+ @pytest.mark.parametrize(
117
+ "dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
118
+ )
119
+ def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
120
+ dtype = np.dtype(dtype)
121
+
122
+ result = concat([Series(dtype=dtype)])
123
+ assert result.dtype == dtype
124
+
125
+ result = concat([Series(dtype=dtype), Series(dtype=dtype)])
126
+ assert result.dtype == dtype
127
+
128
+ @pytest.mark.parametrize("dtype", ["float64", "int8", "uint8", "m8[ns]", "M8[ns]"])
129
+ @pytest.mark.parametrize(
130
+ "dtype2",
131
+ ["float64", "int8", "uint8", "m8[ns]", "M8[ns]"],
132
+ )
133
+ def test_concat_empty_series_dtypes_roundtrips(self, dtype, dtype2):
134
+ # round-tripping with self & like self
135
+ if dtype == dtype2:
136
+ return
137
+
138
+ def int_result_type(dtype, dtype2):
139
+ typs = {dtype.kind, dtype2.kind}
140
+ if not len(typs - {"i", "u", "b"}) and (
141
+ dtype.kind == "i" or dtype2.kind == "i"
142
+ ):
143
+ return "i"
144
+ elif not len(typs - {"u", "b"}) and (
145
+ dtype.kind == "u" or dtype2.kind == "u"
146
+ ):
147
+ return "u"
148
+ return None
149
+
150
+ def float_result_type(dtype, dtype2):
151
+ typs = {dtype.kind, dtype2.kind}
152
+ if not len(typs - {"f", "i", "u"}) and (
153
+ dtype.kind == "f" or dtype2.kind == "f"
154
+ ):
155
+ return "f"
156
+ return None
157
+
158
+ def get_result_type(dtype, dtype2):
159
+ result = float_result_type(dtype, dtype2)
160
+ if result is not None:
161
+ return result
162
+ result = int_result_type(dtype, dtype2)
163
+ if result is not None:
164
+ return result
165
+ return "O"
166
+
167
+ dtype = np.dtype(dtype)
168
+ dtype2 = np.dtype(dtype2)
169
+ expected = get_result_type(dtype, dtype2)
170
+ result = concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
171
+ assert result.kind == expected
172
+
173
+ def test_concat_empty_series_dtypes_triple(self):
174
+ assert (
175
+ concat(
176
+ [Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
177
+ ).dtype
178
+ == np.object_
179
+ )
180
+
181
+ def test_concat_empty_series_dtype_category_with_array(self):
182
+ # GH#18515
183
+ assert (
184
+ concat(
185
+ [Series(np.array([]), dtype="category"), Series(dtype="float64")]
186
+ ).dtype
187
+ == "float64"
188
+ )
189
+
190
+ def test_concat_empty_series_dtypes_sparse(self):
191
+ result = concat(
192
+ [
193
+ Series(dtype="float64").astype("Sparse"),
194
+ Series(dtype="float64").astype("Sparse"),
195
+ ]
196
+ )
197
+ assert result.dtype == "Sparse[float64]"
198
+
199
+ result = concat(
200
+ [Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
201
+ )
202
+ expected = pd.SparseDtype(np.float64)
203
+ assert result.dtype == expected
204
+
205
+ result = concat(
206
+ [Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
207
+ )
208
+ expected = pd.SparseDtype("object")
209
+ assert result.dtype == expected
210
+
211
+ def test_concat_empty_df_object_dtype(self):
212
+ # GH 9149
213
+ df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
214
+ df_2 = DataFrame(columns=df_1.columns)
215
+ result = concat([df_1, df_2], axis=0)
216
+ expected = df_1.astype(object)
217
+ tm.assert_frame_equal(result, expected)
218
+
219
+ def test_concat_empty_dataframe_dtypes(self):
220
+ df = DataFrame(columns=list("abc"))
221
+ df["a"] = df["a"].astype(np.bool_)
222
+ df["b"] = df["b"].astype(np.int32)
223
+ df["c"] = df["c"].astype(np.float64)
224
+
225
+ result = concat([df, df])
226
+ assert result["a"].dtype == np.bool_
227
+ assert result["b"].dtype == np.int32
228
+ assert result["c"].dtype == np.float64
229
+
230
+ result = concat([df, df.astype(np.float64)])
231
+ assert result["a"].dtype == np.object_
232
+ assert result["b"].dtype == np.float64
233
+ assert result["c"].dtype == np.float64
234
+
235
+ def test_concat_inner_join_empty(self):
236
+ # GH 15328
237
+ df_empty = DataFrame()
238
+ df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
239
+ df_expected = DataFrame({"a": []}, index=RangeIndex(0), dtype="int64")
240
+
241
+ for how, expected in [("inner", df_expected), ("outer", df_a)]:
242
+ result = concat([df_a, df_empty], axis=1, join=how)
243
+ tm.assert_frame_equal(result, expected)
244
+
245
+ def test_empty_dtype_coerce(self):
246
+ # xref to #12411
247
+ # xref to #12045
248
+ # xref to #11594
249
+ # see below
250
+
251
+ # 10571
252
+ df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
253
+ df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
254
+ result = concat([df1, df2])
255
+ expected = df1.dtypes
256
+ tm.assert_series_equal(result.dtypes, expected)
257
+
258
+ def test_concat_empty_dataframe(self):
259
+ # 39037
260
+ df1 = DataFrame(columns=["a", "b"])
261
+ df2 = DataFrame(columns=["b", "c"])
262
+ result = concat([df1, df2, df1])
263
+ expected = DataFrame(columns=["a", "b", "c"])
264
+ tm.assert_frame_equal(result, expected)
265
+
266
+ df3 = DataFrame(columns=["a", "b"])
267
+ df4 = DataFrame(columns=["b"])
268
+ result = concat([df3, df4])
269
+ expected = DataFrame(columns=["a", "b"])
270
+ tm.assert_frame_equal(result, expected)
271
+
272
+ def test_concat_empty_dataframe_different_dtypes(self):
273
+ # 39037
274
+ df1 = DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
275
+ df2 = DataFrame({"a": [1, 2, 3]})
276
+
277
+ result = concat([df1[:0], df2[:0]])
278
+ assert result["a"].dtype == np.int64
279
+ assert result["b"].dtype == np.object_
280
+
281
+ def test_concat_to_empty_ea(self):
282
+ """48510 `concat` to an empty EA should maintain type EA dtype."""
283
+ df_empty = DataFrame({"a": pd.array([], dtype=pd.Int64Dtype())})
284
+ df_new = DataFrame({"a": pd.array([1, 2, 3], dtype=pd.Int64Dtype())})
285
+ expected = df_new.copy()
286
+ result = concat([df_empty, df_new])
287
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas.errors import PerformanceWarning
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ Index,
12
+ MultiIndex,
13
+ Series,
14
+ concat,
15
+ )
16
+ import pandas._testing as tm
17
+
18
+
19
+ class TestIndexConcat:
20
+ def test_concat_ignore_index(self, sort):
21
+ frame1 = DataFrame(
22
+ {"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
23
+ )
24
+ frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
25
+ frame1.index = Index(["x", "y", "z"])
26
+ frame2.index = Index(["x", "y", "q"])
27
+
28
+ v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
29
+
30
+ nan = np.nan
31
+ expected = DataFrame(
32
+ [
33
+ [nan, nan, nan, 4.3],
34
+ ["a", 1, 4.5, 5.2],
35
+ ["b", 2, 3.2, 2.2],
36
+ ["c", 3, 1.2, nan],
37
+ ],
38
+ index=Index(["q", "x", "y", "z"]),
39
+ )
40
+ if not sort:
41
+ expected = expected.loc[["x", "y", "z", "q"]]
42
+
43
+ tm.assert_frame_equal(v1, expected)
44
+
45
+ @pytest.mark.parametrize(
46
+ "name_in1,name_in2,name_in3,name_out",
47
+ [
48
+ ("idx", "idx", "idx", "idx"),
49
+ ("idx", "idx", None, None),
50
+ ("idx", None, None, None),
51
+ ("idx1", "idx2", None, None),
52
+ ("idx1", "idx1", "idx2", None),
53
+ ("idx1", "idx2", "idx3", None),
54
+ (None, None, None, None),
55
+ ],
56
+ )
57
+ def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
58
+ # GH13475
59
+ indices = [
60
+ Index(["a", "b", "c"], name=name_in1),
61
+ Index(["b", "c", "d"], name=name_in2),
62
+ Index(["c", "d", "e"], name=name_in3),
63
+ ]
64
+ frames = [
65
+ DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
66
+ ]
67
+ result = concat(frames, axis=1)
68
+
69
+ exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
70
+ expected = DataFrame(
71
+ {
72
+ "x": [0, 1, 2, np.nan, np.nan],
73
+ "y": [np.nan, 0, 1, 2, np.nan],
74
+ "z": [np.nan, np.nan, 0, 1, 2],
75
+ },
76
+ index=exp_ind,
77
+ )
78
+
79
+ tm.assert_frame_equal(result, expected)
80
+
81
+ def test_concat_rename_index(self):
82
+ a = DataFrame(
83
+ np.random.rand(3, 3),
84
+ columns=list("ABC"),
85
+ index=Index(list("abc"), name="index_a"),
86
+ )
87
+ b = DataFrame(
88
+ np.random.rand(3, 3),
89
+ columns=list("ABC"),
90
+ index=Index(list("abc"), name="index_b"),
91
+ )
92
+
93
+ result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
94
+
95
+ exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
96
+ names = list(exp.index.names)
97
+ names[1] = "lvl1"
98
+ exp.index.set_names(names, inplace=True)
99
+
100
+ tm.assert_frame_equal(result, exp)
101
+ assert result.index.names == exp.index.names
102
+
103
+ def test_concat_copy_index_series(self, axis, using_copy_on_write):
104
+ # GH 29879
105
+ ser = Series([1, 2])
106
+ comb = concat([ser, ser], axis=axis, copy=True)
107
+ if not using_copy_on_write or axis in [0, "index"]:
108
+ assert comb.index is not ser.index
109
+ else:
110
+ assert comb.index is ser.index
111
+
112
+ def test_concat_copy_index_frame(self, axis, using_copy_on_write):
113
+ # GH 29879
114
+ df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
115
+ comb = concat([df, df], axis=axis, copy=True)
116
+ if not using_copy_on_write:
117
+ assert comb.index is not df.index
118
+ assert comb.columns is not df.columns
119
+ elif axis in [0, "index"]:
120
+ assert comb.index is not df.index
121
+ assert comb.columns is df.columns
122
+ elif axis in [1, "columns"]:
123
+ assert comb.index is df.index
124
+ assert comb.columns is not df.columns
125
+
126
+ def test_default_index(self):
127
+ # is_series and ignore_index
128
+ s1 = Series([1, 2, 3], name="x")
129
+ s2 = Series([4, 5, 6], name="y")
130
+ res = concat([s1, s2], axis=1, ignore_index=True)
131
+ assert isinstance(res.columns, pd.RangeIndex)
132
+ exp = DataFrame([[1, 4], [2, 5], [3, 6]])
133
+ # use check_index_type=True to check the result have
134
+ # RangeIndex (default index)
135
+ tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
136
+
137
+ # is_series and all inputs have no names
138
+ s1 = Series([1, 2, 3])
139
+ s2 = Series([4, 5, 6])
140
+ res = concat([s1, s2], axis=1, ignore_index=False)
141
+ assert isinstance(res.columns, pd.RangeIndex)
142
+ exp = DataFrame([[1, 4], [2, 5], [3, 6]])
143
+ exp.columns = pd.RangeIndex(2)
144
+ tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
145
+
146
+ # is_dataframe and ignore_index
147
+ df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
148
+ df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
149
+
150
+ res = concat([df1, df2], axis=0, ignore_index=True)
151
+ exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
152
+ tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
153
+
154
+ res = concat([df1, df2], axis=1, ignore_index=True)
155
+ exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
156
+ tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
157
+
158
+ def test_dups_index(self):
159
+ # GH 4771
160
+
161
+ # single dtypes
162
+ df = DataFrame(
163
+ np.random.randint(0, 10, size=40).reshape(10, 4),
164
+ columns=["A", "A", "C", "C"],
165
+ )
166
+
167
+ result = concat([df, df], axis=1)
168
+ tm.assert_frame_equal(result.iloc[:, :4], df)
169
+ tm.assert_frame_equal(result.iloc[:, 4:], df)
170
+
171
+ result = concat([df, df], axis=0)
172
+ tm.assert_frame_equal(result.iloc[:10], df)
173
+ tm.assert_frame_equal(result.iloc[10:], df)
174
+
175
+ # multi dtypes
176
+ df = concat(
177
+ [
178
+ DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
179
+ DataFrame(
180
+ np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
181
+ ),
182
+ ],
183
+ axis=1,
184
+ )
185
+
186
+ result = concat([df, df], axis=1)
187
+ tm.assert_frame_equal(result.iloc[:, :6], df)
188
+ tm.assert_frame_equal(result.iloc[:, 6:], df)
189
+
190
+ result = concat([df, df], axis=0)
191
+ tm.assert_frame_equal(result.iloc[:10], df)
192
+ tm.assert_frame_equal(result.iloc[10:], df)
193
+
194
+ # append
195
+ result = df.iloc[0:8, :]._append(df.iloc[8:])
196
+ tm.assert_frame_equal(result, df)
197
+
198
+ result = df.iloc[0:8, :]._append(df.iloc[8:9])._append(df.iloc[9:10])
199
+ tm.assert_frame_equal(result, df)
200
+
201
+ expected = concat([df, df], axis=0)
202
+ result = df._append(df)
203
+ tm.assert_frame_equal(result, expected)
204
+
205
+
206
+ class TestMultiIndexConcat:
207
+ def test_concat_multiindex_with_keys(self, multiindex_dataframe_random_data):
208
+ frame = multiindex_dataframe_random_data
209
+ index = frame.index
210
+ result = concat([frame, frame], keys=[0, 1], names=["iteration"])
211
+
212
+ assert result.index.names == ("iteration",) + index.names
213
+ tm.assert_frame_equal(result.loc[0], frame)
214
+ tm.assert_frame_equal(result.loc[1], frame)
215
+ assert result.index.nlevels == 3
216
+
217
+ def test_concat_multiindex_with_none_in_index_names(self):
218
+ # GH 15787
219
+ index = MultiIndex.from_product([[1], range(5)], names=["level1", None])
220
+ df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
221
+
222
+ result = concat([df, df], keys=[1, 2], names=["level2"])
223
+ index = MultiIndex.from_product(
224
+ [[1, 2], [1], range(5)], names=["level2", "level1", None]
225
+ )
226
+ expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
227
+ tm.assert_frame_equal(result, expected)
228
+
229
+ result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
230
+ level2 = [1] * 5 + [2] * 2
231
+ level1 = [1] * 7
232
+ no_name = list(range(5)) + list(range(2))
233
+ tuples = list(zip(level2, level1, no_name))
234
+ index = MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
235
+ expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
236
+ tm.assert_frame_equal(result, expected)
237
+
238
+ def test_concat_multiindex_rangeindex(self):
239
+ # GH13542
240
+ # when multi-index levels are RangeIndex objects
241
+ # there is a bug in concat with objects of len 1
242
+
243
+ df = DataFrame(np.random.randn(9, 2))
244
+ df.index = MultiIndex(
245
+ levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
246
+ codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
247
+ )
248
+
249
+ res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
250
+ exp = df.iloc[[2, 3, 4, 5], :]
251
+ tm.assert_frame_equal(res, exp)
252
+
253
+ def test_concat_multiindex_dfs_with_deepcopy(self):
254
+ # GH 9967
255
+ example_multiindex1 = MultiIndex.from_product([["a"], ["b"]])
256
+ example_dataframe1 = DataFrame([0], index=example_multiindex1)
257
+
258
+ example_multiindex2 = MultiIndex.from_product([["a"], ["c"]])
259
+ example_dataframe2 = DataFrame([1], index=example_multiindex2)
260
+
261
+ example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
262
+ expected_index = MultiIndex(
263
+ levels=[["s1", "s2"], ["a"], ["b", "c"]],
264
+ codes=[[0, 1], [0, 0], [0, 1]],
265
+ names=["testname", None, None],
266
+ )
267
+ expected = DataFrame([[0], [1]], index=expected_index)
268
+ result_copy = concat(deepcopy(example_dict), names=["testname"])
269
+ tm.assert_frame_equal(result_copy, expected)
270
+ result_no_copy = concat(example_dict, names=["testname"])
271
+ tm.assert_frame_equal(result_no_copy, expected)
272
+
273
+ @pytest.mark.parametrize(
274
+ "mi1_list",
275
+ [
276
+ [["a"], range(2)],
277
+ [["b"], np.arange(2.0, 4.0)],
278
+ [["c"], ["A", "B"]],
279
+ [["d"], pd.date_range(start="2017", end="2018", periods=2)],
280
+ ],
281
+ )
282
+ @pytest.mark.parametrize(
283
+ "mi2_list",
284
+ [
285
+ [["a"], range(2)],
286
+ [["b"], np.arange(2.0, 4.0)],
287
+ [["c"], ["A", "B"]],
288
+ [["d"], pd.date_range(start="2017", end="2018", periods=2)],
289
+ ],
290
+ )
291
+ def test_concat_with_various_multiindex_dtypes(
292
+ self, mi1_list: list, mi2_list: list
293
+ ):
294
+ # GitHub #23478
295
+ mi1 = MultiIndex.from_product(mi1_list)
296
+ mi2 = MultiIndex.from_product(mi2_list)
297
+
298
+ df1 = DataFrame(np.zeros((1, len(mi1))), columns=mi1)
299
+ df2 = DataFrame(np.zeros((1, len(mi2))), columns=mi2)
300
+
301
+ if mi1_list[0] == mi2_list[0]:
302
+ expected_mi = MultiIndex(
303
+ levels=[mi1_list[0], list(mi1_list[1])],
304
+ codes=[[0, 0, 0, 0], [0, 1, 0, 1]],
305
+ )
306
+ else:
307
+ expected_mi = MultiIndex(
308
+ levels=[
309
+ mi1_list[0] + mi2_list[0],
310
+ list(mi1_list[1]) + list(mi2_list[1]),
311
+ ],
312
+ codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
313
+ )
314
+
315
+ expected_df = DataFrame(np.zeros((1, len(expected_mi))), columns=expected_mi)
316
+
317
+ with tm.assert_produces_warning(None):
318
+ result_df = concat((df1, df2), axis=1)
319
+
320
+ tm.assert_frame_equal(expected_df, result_df)
321
+
322
+ def test_concat_multiindex_(self):
323
+ # GitHub #44786
324
+ df = DataFrame({"col": ["a", "b", "c"]}, index=["1", "2", "2"])
325
+ df = concat([df], keys=["X"])
326
+
327
+ iterables = [["X"], ["1", "2", "2"]]
328
+ result_index = df.index
329
+ expected_index = MultiIndex.from_product(iterables)
330
+
331
+ tm.assert_index_equal(result_index, expected_index)
332
+
333
+ result_df = df
334
+ expected_df = DataFrame(
335
+ {"col": ["a", "b", "c"]}, index=MultiIndex.from_product(iterables)
336
+ )
337
+ tm.assert_frame_equal(result_df, expected_df)
338
+
339
+ def test_concat_with_key_not_unique(self):
340
+ # GitHub #46519
341
+ df1 = DataFrame({"name": [1]})
342
+ df2 = DataFrame({"name": [2]})
343
+ df3 = DataFrame({"name": [3]})
344
+ df_a = concat([df1, df2, df3], keys=["x", "y", "x"])
345
+ # the warning is caused by indexing unsorted multi-index
346
+ with tm.assert_produces_warning(
347
+ PerformanceWarning, match="indexing past lexsort depth"
348
+ ):
349
+ out_a = df_a.loc[("x", 0), :]
350
+
351
+ df_b = DataFrame(
352
+ {"name": [1, 2, 3]}, index=Index([("x", 0), ("y", 0), ("x", 0)])
353
+ )
354
+ with tm.assert_produces_warning(
355
+ PerformanceWarning, match="indexing past lexsort depth"
356
+ ):
357
+ out_b = df_b.loc[("x", 0)]
358
+
359
+ tm.assert_frame_equal(out_a, out_b)
360
+
361
+ df1 = DataFrame({"name": ["a", "a", "b"]})
362
+ df2 = DataFrame({"name": ["a", "b"]})
363
+ df3 = DataFrame({"name": ["c", "d"]})
364
+ df_a = concat([df1, df2, df3], keys=["x", "y", "x"])
365
+ with tm.assert_produces_warning(
366
+ PerformanceWarning, match="indexing past lexsort depth"
367
+ ):
368
+ out_a = df_a.loc[("x", 0), :]
369
+
370
+ df_b = DataFrame(
371
+ {
372
+ "a": ["x", "x", "x", "y", "y", "x", "x"],
373
+ "b": [0, 1, 2, 0, 1, 0, 1],
374
+ "name": list("aababcd"),
375
+ }
376
+ ).set_index(["a", "b"])
377
+ df_b.index.names = [None, None]
378
+ with tm.assert_produces_warning(
379
+ PerformanceWarning, match="indexing past lexsort depth"
380
+ ):
381
+ out_b = df_b.loc[("x", 0), :]
382
+
383
+ tm.assert_frame_equal(out_a, out_b)
384
+
385
+ def test_concat_with_duplicated_levels(self):
386
+ # keyword levels should be unique
387
+ df1 = DataFrame({"A": [1]}, index=["x"])
388
+ df2 = DataFrame({"A": [1]}, index=["y"])
389
+ msg = r"Level values not unique: \['x', 'y', 'y'\]"
390
+ with pytest.raises(ValueError, match=msg):
391
+ concat([df1, df2], keys=["x", "y"], levels=[["x", "y", "y"]])
392
+
393
+ @pytest.mark.parametrize("levels", [[["x", "y"]], [["x", "y", "y"]]])
394
+ def test_concat_with_levels_with_none_keys(self, levels):
395
+ df1 = DataFrame({"A": [1]}, index=["x"])
396
+ df2 = DataFrame({"A": [1]}, index=["y"])
397
+ msg = "levels supported only when keys is not None"
398
+ with pytest.raises(ValueError, match=msg):
399
+ concat([df1, df2], levels=levels)
400
+
401
+ def test_concat_range_index_result(self):
402
+ # GH#47501
403
+ df1 = DataFrame({"a": [1, 2]})
404
+ df2 = DataFrame({"b": [1, 2]})
405
+
406
+ result = concat([df1, df2], sort=True, axis=1)
407
+ expected = DataFrame({"a": [1, 2], "b": [1, 2]})
408
+ tm.assert_frame_equal(result, expected)
409
+ expected_index = pd.RangeIndex(0, 2)
410
+ tm.assert_index_equal(result.index, expected_index, exact=True)
411
+
412
+ def test_concat_index_keep_dtype(self):
413
+ # GH#47329
414
+ df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype="object"))
415
+ df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="object"))
416
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
417
+ expected = DataFrame(
418
+ [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="object")
419
+ )
420
+ tm.assert_frame_equal(result, expected)
421
+
422
+ def test_concat_index_keep_dtype_ea_numeric(self, any_numeric_ea_dtype):
423
+ # GH#47329
424
+ df1 = DataFrame(
425
+ [[0, 1, 1]], columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype)
426
+ )
427
+ df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=any_numeric_ea_dtype))
428
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
429
+ expected = DataFrame(
430
+ [[0, 1, 1.0], [0, 1, np.nan]],
431
+ columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype),
432
+ )
433
+ tm.assert_frame_equal(result, expected)
434
+
435
+ @pytest.mark.parametrize("dtype", ["Int8", "Int16", "Int32"])
436
+ def test_concat_index_find_common(self, dtype):
437
+ # GH#47329
438
+ df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype))
439
+ df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="Int32"))
440
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
441
+ expected = DataFrame(
442
+ [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="Int32")
443
+ )
444
+ tm.assert_frame_equal(result, expected)
445
+
446
+ def test_concat_axis_1_sort_false_rangeindex(self):
447
+ # GH 46675
448
+ s1 = Series(["a", "b", "c"])
449
+ s2 = Series(["a", "b"])
450
+ s3 = Series(["a", "b", "c", "d"])
451
+ s4 = Series([], dtype=object)
452
+ result = concat(
453
+ [s1, s2, s3, s4], sort=False, join="outer", ignore_index=False, axis=1
454
+ )
455
+ expected = DataFrame(
456
+ [
457
+ ["a"] * 3 + [np.nan],
458
+ ["b"] * 3 + [np.nan],
459
+ ["c", np.nan] * 2,
460
+ [np.nan] * 2 + ["d"] + [np.nan],
461
+ ],
462
+ dtype=object,
463
+ )
464
+ tm.assert_frame_equal(
465
+ result, expected, check_index_type=True, check_column_type=True
466
+ )
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import StringIO
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ concat,
9
+ read_csv,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ class TestInvalidConcat:
15
+ def test_concat_invalid(self):
16
+ # trying to concat a ndframe with a non-ndframe
17
+ df1 = tm.makeCustomDataframe(10, 2)
18
+ for obj in [1, {}, [1, 2], (1, 2)]:
19
+ msg = (
20
+ f"cannot concatenate object of type '{type(obj)}'; "
21
+ "only Series and DataFrame objs are valid"
22
+ )
23
+ with pytest.raises(TypeError, match=msg):
24
+ concat([df1, obj])
25
+
26
+ def test_concat_invalid_first_argument(self):
27
+ df1 = tm.makeCustomDataframe(10, 2)
28
+ msg = (
29
+ "first argument must be an iterable of pandas "
30
+ 'objects, you passed an object of type "DataFrame"'
31
+ )
32
+ with pytest.raises(TypeError, match=msg):
33
+ concat(df1)
34
+
35
+ def test_concat_generator_obj(self):
36
+ # generator ok though
37
+ concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
38
+
39
+ def test_concat_textreader_obj(self):
40
+ # text reader ok
41
+ # GH6583
42
+ data = """index,A,B,C,D
43
+ foo,2,3,4,5
44
+ bar,7,8,9,10
45
+ baz,12,13,14,15
46
+ qux,12,13,14,15
47
+ foo2,12,13,14,15
48
+ bar2,12,13,14,15
49
+ """
50
+
51
+ with read_csv(StringIO(data), chunksize=1) as reader:
52
+ result = concat(reader, ignore_index=True)
53
+ expected = read_csv(StringIO(data))
54
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ DataFrame,
6
+ DatetimeIndex,
7
+ Index,
8
+ MultiIndex,
9
+ Series,
10
+ concat,
11
+ date_range,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+
16
+ class TestSeriesConcat:
17
+ def test_concat_series(self):
18
+ ts = tm.makeTimeSeries()
19
+ ts.name = "foo"
20
+
21
+ pieces = [ts[:5], ts[5:15], ts[15:]]
22
+
23
+ result = concat(pieces)
24
+ tm.assert_series_equal(result, ts)
25
+ assert result.name == ts.name
26
+
27
+ result = concat(pieces, keys=[0, 1, 2])
28
+ expected = ts.copy()
29
+
30
+ ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
31
+
32
+ exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
33
+ exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
34
+ expected.index = exp_index
35
+ tm.assert_series_equal(result, expected)
36
+
37
+ def test_concat_empty_and_non_empty_series_regression(self):
38
+ # GH 18187 regression test
39
+ s1 = Series([1])
40
+ s2 = Series([], dtype=object)
41
+
42
+ expected = s1
43
+ result = concat([s1, s2])
44
+ tm.assert_series_equal(result, expected)
45
+
46
+ def test_concat_series_axis1(self):
47
+ ts = tm.makeTimeSeries()
48
+
49
+ pieces = [ts[:-2], ts[2:], ts[2:-2]]
50
+
51
+ result = concat(pieces, axis=1)
52
+ expected = DataFrame(pieces).T
53
+ tm.assert_frame_equal(result, expected)
54
+
55
+ result = concat(pieces, keys=["A", "B", "C"], axis=1)
56
+ expected = DataFrame(pieces, index=["A", "B", "C"]).T
57
+ tm.assert_frame_equal(result, expected)
58
+
59
+ def test_concat_series_axis1_preserves_series_names(self):
60
+ # preserve series names, #2489
61
+ s = Series(np.random.randn(5), name="A")
62
+ s2 = Series(np.random.randn(5), name="B")
63
+
64
+ result = concat([s, s2], axis=1)
65
+ expected = DataFrame({"A": s, "B": s2})
66
+ tm.assert_frame_equal(result, expected)
67
+
68
+ s2.name = None
69
+ result = concat([s, s2], axis=1)
70
+ tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
71
+
72
+ def test_concat_series_axis1_with_reindex(self, sort):
73
+ # must reindex, #2603
74
+ s = Series(np.random.randn(3), index=["c", "a", "b"], name="A")
75
+ s2 = Series(np.random.randn(4), index=["d", "a", "b", "c"], name="B")
76
+ result = concat([s, s2], axis=1, sort=sort)
77
+ expected = DataFrame({"A": s, "B": s2}, index=["c", "a", "b", "d"])
78
+ if sort:
79
+ expected = expected.sort_index()
80
+ tm.assert_frame_equal(result, expected)
81
+
82
+ def test_concat_series_axis1_names_applied(self):
83
+ # ensure names argument is not ignored on axis=1, #23490
84
+ s = Series([1, 2, 3])
85
+ s2 = Series([4, 5, 6])
86
+ result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
87
+ expected = DataFrame(
88
+ [[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
89
+ )
90
+ tm.assert_frame_equal(result, expected)
91
+
92
+ result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
93
+ expected = DataFrame(
94
+ [[1, 4], [2, 5], [3, 6]],
95
+ columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
96
+ )
97
+ tm.assert_frame_equal(result, expected)
98
+
99
+ def test_concat_series_axis1_same_names_ignore_index(self):
100
+ dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
101
+ s1 = Series(np.random.randn(len(dates)), index=dates, name="value")
102
+ s2 = Series(np.random.randn(len(dates)), index=dates, name="value")
103
+
104
+ result = concat([s1, s2], axis=1, ignore_index=True)
105
+ expected = Index(range(2))
106
+
107
+ tm.assert_index_equal(result.columns, expected, exact=True)
108
+
109
+ @pytest.mark.parametrize(
110
+ "s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))]
111
+ )
112
+ def test_concat_series_name_npscalar_tuple(self, s1name, s2name):
113
+ # GH21015
114
+ s1 = Series({"a": 1, "b": 2}, name=s1name)
115
+ s2 = Series({"c": 5, "d": 6}, name=s2name)
116
+ result = concat([s1, s2])
117
+ expected = Series({"a": 1, "b": 2, "c": 5, "d": 6})
118
+ tm.assert_series_equal(result, expected)
119
+
120
+ def test_concat_series_partial_columns_names(self):
121
+ # GH10698
122
+ named_series = Series([1, 2], name="foo")
123
+ unnamed_series1 = Series([1, 2])
124
+ unnamed_series2 = Series([4, 5])
125
+
126
+ result = concat([named_series, unnamed_series1, unnamed_series2], axis=1)
127
+ expected = DataFrame(
128
+ {"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
129
+ )
130
+ tm.assert_frame_equal(result, expected)
131
+
132
+ result = concat(
133
+ [named_series, unnamed_series1, unnamed_series2],
134
+ axis=1,
135
+ keys=["red", "blue", "yellow"],
136
+ )
137
+ expected = DataFrame(
138
+ {"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
139
+ columns=["red", "blue", "yellow"],
140
+ )
141
+ tm.assert_frame_equal(result, expected)
142
+
143
+ result = concat(
144
+ [named_series, unnamed_series1, unnamed_series2], axis=1, ignore_index=True
145
+ )
146
+ expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
147
+ tm.assert_frame_equal(result, expected)
148
+
149
+ def test_concat_series_length_one_reversed(self, frame_or_series):
150
+ # GH39401
151
+ obj = frame_or_series([100])
152
+ result = concat([obj.iloc[::-1]])
153
+ tm.assert_equal(result, obj)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_sort.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import DataFrame
6
+ import pandas._testing as tm
7
+
8
+
9
+ class TestConcatSort:
10
+ def test_concat_sorts_columns(self, sort):
11
+ # GH-4588
12
+ df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
13
+ df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
14
+
15
+ # for sort=True/None
16
+ expected = DataFrame(
17
+ {"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
18
+ columns=["a", "b", "c"],
19
+ )
20
+
21
+ if sort is False:
22
+ expected = expected[["b", "a", "c"]]
23
+
24
+ # default
25
+ with tm.assert_produces_warning(None):
26
+ result = pd.concat([df1, df2], ignore_index=True, sort=sort)
27
+ tm.assert_frame_equal(result, expected)
28
+
29
+ def test_concat_sorts_index(self, sort):
30
+ df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
31
+ df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
32
+
33
+ # For True/None
34
+ expected = DataFrame(
35
+ {"a": [2, 3, 1], "b": [1, 2, None]},
36
+ index=["a", "b", "c"],
37
+ columns=["a", "b"],
38
+ )
39
+ if sort is False:
40
+ expected = expected.loc[["c", "a", "b"]]
41
+
42
+ # Warn and sort by default
43
+ with tm.assert_produces_warning(None):
44
+ result = pd.concat([df1, df2], axis=1, sort=sort)
45
+ tm.assert_frame_equal(result, expected)
46
+
47
+ def test_concat_inner_sort(self, sort):
48
+ # https://github.com/pandas-dev/pandas/pull/20613
49
+ df1 = DataFrame(
50
+ {"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"]
51
+ )
52
+ df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
53
+
54
+ with tm.assert_produces_warning(None):
55
+ # unset sort should *not* warn for inner join
56
+ # since that never sorted
57
+ result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
58
+
59
+ expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
60
+ if sort is True:
61
+ expected = expected[["a", "b"]]
62
+ tm.assert_frame_equal(result, expected)
63
+
64
+ def test_concat_aligned_sort(self):
65
+ # GH-4588
66
+ df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
67
+ result = pd.concat([df, df], sort=True, ignore_index=True)
68
+ expected = DataFrame(
69
+ {"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
70
+ columns=["a", "b", "c"],
71
+ )
72
+ tm.assert_frame_equal(result, expected)
73
+
74
+ result = pd.concat(
75
+ [df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True
76
+ )
77
+ expected = expected[["b", "c"]]
78
+ tm.assert_frame_equal(result, expected)
79
+
80
+ def test_concat_aligned_sort_does_not_raise(self):
81
+ # GH-4588
82
+ # We catch TypeErrors from sorting internally and do not re-raise.
83
+ df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
84
+ expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
85
+ result = pd.concat([df, df], ignore_index=True, sort=True)
86
+ tm.assert_frame_equal(result, expected)
87
+
88
+ def test_concat_frame_with_sort_false(self):
89
+ # GH 43375
90
+ result = pd.concat(
91
+ [DataFrame({i: i}, index=[i]) for i in range(2, 0, -1)], sort=False
92
+ )
93
+ expected = DataFrame([[2, np.nan], [np.nan, 1]], index=[2, 1], columns=[2, 1])
94
+
95
+ tm.assert_frame_equal(result, expected)
96
+
97
+ # GH 37937
98
+ df1 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[1, 2, 3])
99
+ df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}, index=[3, 1, 6])
100
+ result = pd.concat([df2, df1], axis=1, sort=False)
101
+ expected = DataFrame(
102
+ [
103
+ [7.0, 10.0, 3.0, 6.0],
104
+ [8.0, 11.0, 1.0, 4.0],
105
+ [9.0, 12.0, np.nan, np.nan],
106
+ [np.nan, np.nan, 2.0, 5.0],
107
+ ],
108
+ index=[3, 1, 6, 2],
109
+ columns=["c", "d", "a", "b"],
110
+ )
111
+ tm.assert_frame_equal(result, expected)
112
+
113
+ def test_concat_sort_none_raises(self):
114
+ # GH#41518
115
+ df = DataFrame({1: [1, 2], "a": [3, 4]})
116
+ msg = "The 'sort' keyword only accepts boolean values; None was passed."
117
+ with pytest.raises(ValueError, match=msg):
118
+ pd.concat([df, df], sort=None)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_join.cpython-310.pyc ADDED
Binary file (28.1 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge.cpython-310.pyc ADDED
Binary file (70.1 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_cross.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_index_as_string.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_ordered.cpython-310.pyc ADDED
Binary file (5.78 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_multi.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_join.py ADDED
@@ -0,0 +1,994 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ Categorical,
7
+ DataFrame,
8
+ Index,
9
+ MultiIndex,
10
+ Series,
11
+ Timestamp,
12
+ concat,
13
+ merge,
14
+ )
15
+ import pandas._testing as tm
16
+
17
+
18
+ def get_test_data(ngroups=8, n=50):
19
+ unique_groups = list(range(ngroups))
20
+ arr = np.asarray(np.tile(unique_groups, n // ngroups))
21
+
22
+ if len(arr) < n:
23
+ arr = np.asarray(list(arr) + unique_groups[: n - len(arr)])
24
+
25
+ np.random.shuffle(arr)
26
+ return arr
27
+
28
+
29
+ class TestJoin:
30
+ # aggregate multiple columns
31
+ @pytest.fixture
32
+ def df(self):
33
+ df = DataFrame(
34
+ {
35
+ "key1": get_test_data(),
36
+ "key2": get_test_data(),
37
+ "data1": np.random.randn(50),
38
+ "data2": np.random.randn(50),
39
+ }
40
+ )
41
+
42
+ # exclude a couple keys for fun
43
+ df = df[df["key2"] > 1]
44
+ return df
45
+
46
+ @pytest.fixture
47
+ def df2(self):
48
+ return DataFrame(
49
+ {
50
+ "key1": get_test_data(n=10),
51
+ "key2": get_test_data(ngroups=4, n=10),
52
+ "value": np.random.randn(10),
53
+ }
54
+ )
55
+
56
+ @pytest.fixture
57
+ def target_source(self):
58
+ index, data = tm.getMixedTypeDict()
59
+ target = DataFrame(data, index=index)
60
+
61
+ # Join on string value
62
+
63
+ source = DataFrame(
64
+ {"MergedA": data["A"], "MergedD": data["D"]}, index=data["C"]
65
+ )
66
+ return target, source
67
+
68
+ def test_left_outer_join(self, df, df2):
69
+ joined_key2 = merge(df, df2, on="key2")
70
+ _check_join(df, df2, joined_key2, ["key2"], how="left")
71
+
72
+ joined_both = merge(df, df2)
73
+ _check_join(df, df2, joined_both, ["key1", "key2"], how="left")
74
+
75
+ def test_right_outer_join(self, df, df2):
76
+ joined_key2 = merge(df, df2, on="key2", how="right")
77
+ _check_join(df, df2, joined_key2, ["key2"], how="right")
78
+
79
+ joined_both = merge(df, df2, how="right")
80
+ _check_join(df, df2, joined_both, ["key1", "key2"], how="right")
81
+
82
+ def test_full_outer_join(self, df, df2):
83
+ joined_key2 = merge(df, df2, on="key2", how="outer")
84
+ _check_join(df, df2, joined_key2, ["key2"], how="outer")
85
+
86
+ joined_both = merge(df, df2, how="outer")
87
+ _check_join(df, df2, joined_both, ["key1", "key2"], how="outer")
88
+
89
+ def test_inner_join(self, df, df2):
90
+ joined_key2 = merge(df, df2, on="key2", how="inner")
91
+ _check_join(df, df2, joined_key2, ["key2"], how="inner")
92
+
93
+ joined_both = merge(df, df2, how="inner")
94
+ _check_join(df, df2, joined_both, ["key1", "key2"], how="inner")
95
+
96
+ def test_handle_overlap(self, df, df2):
97
+ joined = merge(df, df2, on="key2", suffixes=(".foo", ".bar"))
98
+
99
+ assert "key1.foo" in joined
100
+ assert "key1.bar" in joined
101
+
102
+ def test_handle_overlap_arbitrary_key(self, df, df2):
103
+ joined = merge(
104
+ df,
105
+ df2,
106
+ left_on="key2",
107
+ right_on="key1",
108
+ suffixes=(".foo", ".bar"),
109
+ )
110
+ assert "key1.foo" in joined
111
+ assert "key2.bar" in joined
112
+
113
+ def test_join_on(self, target_source):
114
+ target, source = target_source
115
+
116
+ merged = target.join(source, on="C")
117
+ tm.assert_series_equal(merged["MergedA"], target["A"], check_names=False)
118
+ tm.assert_series_equal(merged["MergedD"], target["D"], check_names=False)
119
+
120
+ # join with duplicates (fix regression from DataFrame/Matrix merge)
121
+ df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
122
+ df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
123
+ joined = df.join(df2, on="key")
124
+ expected = DataFrame(
125
+ {"key": ["a", "a", "b", "b", "c"], "value": [0, 0, 1, 1, 2]}
126
+ )
127
+ tm.assert_frame_equal(joined, expected)
128
+
129
+ # Test when some are missing
130
+ df_a = DataFrame([[1], [2], [3]], index=["a", "b", "c"], columns=["one"])
131
+ df_b = DataFrame([["foo"], ["bar"]], index=[1, 2], columns=["two"])
132
+ df_c = DataFrame([[1], [2]], index=[1, 2], columns=["three"])
133
+ joined = df_a.join(df_b, on="one")
134
+ joined = joined.join(df_c, on="one")
135
+ assert np.isnan(joined["two"]["c"])
136
+ assert np.isnan(joined["three"]["c"])
137
+
138
+ # merge column not p resent
139
+ with pytest.raises(KeyError, match="^'E'$"):
140
+ target.join(source, on="E")
141
+
142
+ # overlap
143
+ source_copy = source.copy()
144
+ source_copy["A"] = 0
145
+ msg = (
146
+ "You are trying to merge on float64 and object columns. If "
147
+ "you wish to proceed you should use pd.concat"
148
+ )
149
+ with pytest.raises(ValueError, match=msg):
150
+ target.join(source_copy, on="A")
151
+
152
+ def test_join_on_fails_with_different_right_index(self):
153
+ df = DataFrame(
154
+ {"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)}
155
+ )
156
+ df2 = DataFrame(
157
+ {"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)},
158
+ index=tm.makeCustomIndex(10, 2),
159
+ )
160
+ msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
161
+ with pytest.raises(ValueError, match=msg):
162
+ merge(df, df2, left_on="a", right_index=True)
163
+
164
+ def test_join_on_fails_with_different_left_index(self):
165
+ df = DataFrame(
166
+ {"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)},
167
+ index=tm.makeCustomIndex(3, 2),
168
+ )
169
+ df2 = DataFrame(
170
+ {"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)}
171
+ )
172
+ msg = r'len\(right_on\) must equal the number of levels in the index of "left"'
173
+ with pytest.raises(ValueError, match=msg):
174
+ merge(df, df2, right_on="b", left_index=True)
175
+
176
+ def test_join_on_fails_with_different_column_counts(self):
177
+ df = DataFrame(
178
+ {"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)}
179
+ )
180
+ df2 = DataFrame(
181
+ {"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)},
182
+ index=tm.makeCustomIndex(10, 2),
183
+ )
184
+ msg = r"len\(right_on\) must equal len\(left_on\)"
185
+ with pytest.raises(ValueError, match=msg):
186
+ merge(df, df2, right_on="a", left_on=["a", "b"])
187
+
188
+ @pytest.mark.parametrize("wrong_type", [2, "str", None, np.array([0, 1])])
189
+ def test_join_on_fails_with_wrong_object_type(self, wrong_type):
190
+ # GH12081 - original issue
191
+
192
+ # GH21220 - merging of Series and DataFrame is now allowed
193
+ # Edited test to remove the Series object from test parameters
194
+
195
+ df = DataFrame({"a": [1, 1]})
196
+ msg = (
197
+ "Can only merge Series or DataFrame objects, "
198
+ f"a {type(wrong_type)} was passed"
199
+ )
200
+ with pytest.raises(TypeError, match=msg):
201
+ merge(wrong_type, df, left_on="a", right_on="a")
202
+ with pytest.raises(TypeError, match=msg):
203
+ merge(df, wrong_type, left_on="a", right_on="a")
204
+
205
+ def test_join_on_pass_vector(self, target_source):
206
+ target, source = target_source
207
+ expected = target.join(source, on="C")
208
+ del expected["C"]
209
+
210
+ join_col = target.pop("C")
211
+ result = target.join(source, on=join_col)
212
+ tm.assert_frame_equal(result, expected)
213
+
214
+ def test_join_with_len0(self, target_source):
215
+ # nothing to merge
216
+ target, source = target_source
217
+ merged = target.join(source.reindex([]), on="C")
218
+ for col in source:
219
+ assert col in merged
220
+ assert merged[col].isna().all()
221
+
222
+ merged2 = target.join(source.reindex([]), on="C", how="inner")
223
+ tm.assert_index_equal(merged2.columns, merged.columns)
224
+ assert len(merged2) == 0
225
+
226
+ def test_join_on_inner(self):
227
+ df = DataFrame({"key": ["a", "a", "d", "b", "b", "c"]})
228
+ df2 = DataFrame({"value": [0, 1]}, index=["a", "b"])
229
+
230
+ joined = df.join(df2, on="key", how="inner")
231
+
232
+ expected = df.join(df2, on="key")
233
+ expected = expected[expected["value"].notna()]
234
+ tm.assert_series_equal(joined["key"], expected["key"])
235
+ tm.assert_series_equal(joined["value"], expected["value"], check_dtype=False)
236
+ tm.assert_index_equal(joined.index, expected.index)
237
+
238
+ def test_join_on_singlekey_list(self):
239
+ df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
240
+ df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
241
+
242
+ # corner cases
243
+ joined = df.join(df2, on=["key"])
244
+ expected = df.join(df2, on="key")
245
+
246
+ tm.assert_frame_equal(joined, expected)
247
+
248
+ def test_join_on_series(self, target_source):
249
+ target, source = target_source
250
+ result = target.join(source["MergedA"], on="C")
251
+ expected = target.join(source[["MergedA"]], on="C")
252
+ tm.assert_frame_equal(result, expected)
253
+
254
+ def test_join_on_series_buglet(self):
255
+ # GH #638
256
+ df = DataFrame({"a": [1, 1]})
257
+ ds = Series([2], index=[1], name="b")
258
+ result = df.join(ds, on="a")
259
+ expected = DataFrame({"a": [1, 1], "b": [2, 2]}, index=df.index)
260
+ tm.assert_frame_equal(result, expected)
261
+
262
+ def test_join_index_mixed(self, join_type):
263
+ # no overlapping blocks
264
+ df1 = DataFrame(index=np.arange(10))
265
+ df1["bool"] = True
266
+ df1["string"] = "foo"
267
+
268
+ df2 = DataFrame(index=np.arange(5, 15))
269
+ df2["int"] = 1
270
+ df2["float"] = 1.0
271
+
272
+ joined = df1.join(df2, how=join_type)
273
+ expected = _join_by_hand(df1, df2, how=join_type)
274
+ tm.assert_frame_equal(joined, expected)
275
+
276
+ joined = df2.join(df1, how=join_type)
277
+ expected = _join_by_hand(df2, df1, how=join_type)
278
+ tm.assert_frame_equal(joined, expected)
279
+
280
+ def test_join_index_mixed_overlap(self):
281
+ df1 = DataFrame(
282
+ {"A": 1.0, "B": 2, "C": "foo", "D": True},
283
+ index=np.arange(10),
284
+ columns=["A", "B", "C", "D"],
285
+ )
286
+ assert df1["B"].dtype == np.int64
287
+ assert df1["D"].dtype == np.bool_
288
+
289
+ df2 = DataFrame(
290
+ {"A": 1.0, "B": 2, "C": "foo", "D": True},
291
+ index=np.arange(0, 10, 2),
292
+ columns=["A", "B", "C", "D"],
293
+ )
294
+
295
+ # overlap
296
+ joined = df1.join(df2, lsuffix="_one", rsuffix="_two")
297
+ expected_columns = [
298
+ "A_one",
299
+ "B_one",
300
+ "C_one",
301
+ "D_one",
302
+ "A_two",
303
+ "B_two",
304
+ "C_two",
305
+ "D_two",
306
+ ]
307
+ df1.columns = expected_columns[:4]
308
+ df2.columns = expected_columns[4:]
309
+ expected = _join_by_hand(df1, df2)
310
+ tm.assert_frame_equal(joined, expected)
311
+
312
+ def test_join_empty_bug(self):
313
+ # generated an exception in 0.4.3
314
+ x = DataFrame()
315
+ x.join(DataFrame([3], index=[0], columns=["A"]), how="outer")
316
+
317
+ def test_join_unconsolidated(self):
318
+ # GH #331
319
+ a = DataFrame(np.random.randn(30, 2), columns=["a", "b"])
320
+ c = Series(np.random.randn(30))
321
+ a["c"] = c
322
+ d = DataFrame(np.random.randn(30, 1), columns=["q"])
323
+
324
+ # it works!
325
+ a.join(d)
326
+ d.join(a)
327
+
328
+ def test_join_multiindex(self):
329
+ index1 = MultiIndex.from_arrays(
330
+ [["a", "a", "a", "b", "b", "b"], [1, 2, 3, 1, 2, 3]],
331
+ names=["first", "second"],
332
+ )
333
+
334
+ index2 = MultiIndex.from_arrays(
335
+ [["b", "b", "b", "c", "c", "c"], [1, 2, 3, 1, 2, 3]],
336
+ names=["first", "second"],
337
+ )
338
+
339
+ df1 = DataFrame(data=np.random.randn(6), index=index1, columns=["var X"])
340
+ df2 = DataFrame(data=np.random.randn(6), index=index2, columns=["var Y"])
341
+
342
+ df1 = df1.sort_index(level=0)
343
+ df2 = df2.sort_index(level=0)
344
+
345
+ joined = df1.join(df2, how="outer")
346
+ ex_index = Index(index1.values).union(Index(index2.values))
347
+ expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
348
+ expected.index.names = index1.names
349
+ tm.assert_frame_equal(joined, expected)
350
+ assert joined.index.names == index1.names
351
+
352
+ df1 = df1.sort_index(level=1)
353
+ df2 = df2.sort_index(level=1)
354
+
355
+ joined = df1.join(df2, how="outer").sort_index(level=0)
356
+ ex_index = Index(index1.values).union(Index(index2.values))
357
+ expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
358
+ expected.index.names = index1.names
359
+
360
+ tm.assert_frame_equal(joined, expected)
361
+ assert joined.index.names == index1.names
362
+
363
+ def test_join_inner_multiindex(self, lexsorted_two_level_string_multiindex):
364
+ key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"]
365
+ key2 = [
366
+ "two",
367
+ "one",
368
+ "three",
369
+ "one",
370
+ "two",
371
+ "one",
372
+ "two",
373
+ "two",
374
+ "three",
375
+ "one",
376
+ ]
377
+
378
+ data = np.random.randn(len(key1))
379
+ data = DataFrame({"key1": key1, "key2": key2, "data": data})
380
+
381
+ index = lexsorted_two_level_string_multiindex
382
+ to_join = DataFrame(
383
+ np.random.randn(10, 3), index=index, columns=["j_one", "j_two", "j_three"]
384
+ )
385
+
386
+ joined = data.join(to_join, on=["key1", "key2"], how="inner")
387
+ expected = merge(
388
+ data,
389
+ to_join.reset_index(),
390
+ left_on=["key1", "key2"],
391
+ right_on=["first", "second"],
392
+ how="inner",
393
+ sort=False,
394
+ )
395
+
396
+ expected2 = merge(
397
+ to_join,
398
+ data,
399
+ right_on=["key1", "key2"],
400
+ left_index=True,
401
+ how="inner",
402
+ sort=False,
403
+ )
404
+ tm.assert_frame_equal(joined, expected2.reindex_like(joined))
405
+
406
+ expected2 = merge(
407
+ to_join,
408
+ data,
409
+ right_on=["key1", "key2"],
410
+ left_index=True,
411
+ how="inner",
412
+ sort=False,
413
+ )
414
+
415
+ expected = expected.drop(["first", "second"], axis=1)
416
+ expected.index = joined.index
417
+
418
+ assert joined.index.is_monotonic_increasing
419
+ tm.assert_frame_equal(joined, expected)
420
+
421
+ # _assert_same_contents(expected, expected2.loc[:, expected.columns])
422
+
423
+ def test_join_hierarchical_mixed_raises(self):
424
+ # GH 2024
425
+ # GH 40993: For raising, enforced in 2.0
426
+ df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "c"])
427
+ new_df = df.groupby(["a"]).agg({"b": [np.mean, np.sum]})
428
+ other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"])
429
+ other_df.set_index("a", inplace=True)
430
+ # GH 9455, 12219
431
+ with pytest.raises(
432
+ pd.errors.MergeError, match="Not allowed to merge between different levels"
433
+ ):
434
+ merge(new_df, other_df, left_index=True, right_index=True)
435
+
436
+ def test_join_float64_float32(self):
437
+ a = DataFrame(np.random.randn(10, 2), columns=["a", "b"], dtype=np.float64)
438
+ b = DataFrame(np.random.randn(10, 1), columns=["c"], dtype=np.float32)
439
+ joined = a.join(b)
440
+ assert joined.dtypes["a"] == "float64"
441
+ assert joined.dtypes["b"] == "float64"
442
+ assert joined.dtypes["c"] == "float32"
443
+
444
+ a = np.random.randint(0, 5, 100).astype("int64")
445
+ b = np.random.random(100).astype("float64")
446
+ c = np.random.random(100).astype("float32")
447
+ df = DataFrame({"a": a, "b": b, "c": c})
448
+ xpdf = DataFrame({"a": a, "b": b, "c": c})
449
+ s = DataFrame(np.random.random(5).astype("float32"), columns=["md"])
450
+ rs = df.merge(s, left_on="a", right_index=True)
451
+ assert rs.dtypes["a"] == "int64"
452
+ assert rs.dtypes["b"] == "float64"
453
+ assert rs.dtypes["c"] == "float32"
454
+ assert rs.dtypes["md"] == "float32"
455
+
456
+ xp = xpdf.merge(s, left_on="a", right_index=True)
457
+ tm.assert_frame_equal(rs, xp)
458
+
459
+ def test_join_many_non_unique_index(self):
460
+ df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
461
+ df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
462
+ df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
463
+ idf1 = df1.set_index(["a", "b"])
464
+ idf2 = df2.set_index(["a", "b"])
465
+ idf3 = df3.set_index(["a", "b"])
466
+
467
+ result = idf1.join([idf2, idf3], how="outer")
468
+
469
+ df_partially_merged = merge(df1, df2, on=["a", "b"], how="outer")
470
+ expected = merge(df_partially_merged, df3, on=["a", "b"], how="outer")
471
+
472
+ result = result.reset_index()
473
+ expected = expected[result.columns]
474
+ expected["a"] = expected.a.astype("int64")
475
+ expected["b"] = expected.b.astype("int64")
476
+ tm.assert_frame_equal(result, expected)
477
+
478
+ df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
479
+ df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
480
+ df3 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
481
+ idf1 = df1.set_index(["a", "b"])
482
+ idf2 = df2.set_index(["a", "b"])
483
+ idf3 = df3.set_index(["a", "b"])
484
+ result = idf1.join([idf2, idf3], how="inner")
485
+
486
+ df_partially_merged = merge(df1, df2, on=["a", "b"], how="inner")
487
+ expected = merge(df_partially_merged, df3, on=["a", "b"], how="inner")
488
+
489
+ result = result.reset_index()
490
+
491
+ tm.assert_frame_equal(result, expected.loc[:, result.columns])
492
+
493
+ # GH 11519
494
+ df = DataFrame(
495
+ {
496
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
497
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
498
+ "C": np.random.randn(8),
499
+ "D": np.random.randn(8),
500
+ }
501
+ )
502
+ s = Series(
503
+ np.repeat(np.arange(8), 2), index=np.repeat(np.arange(8), 2), name="TEST"
504
+ )
505
+ inner = df.join(s, how="inner")
506
+ outer = df.join(s, how="outer")
507
+ left = df.join(s, how="left")
508
+ right = df.join(s, how="right")
509
+ tm.assert_frame_equal(inner, outer)
510
+ tm.assert_frame_equal(inner, left)
511
+ tm.assert_frame_equal(inner, right)
512
+
513
+ def test_join_sort(self):
514
+ left = DataFrame({"key": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]})
515
+ right = DataFrame({"value2": ["a", "b", "c"]}, index=["bar", "baz", "foo"])
516
+
517
+ joined = left.join(right, on="key", sort=True)
518
+ expected = DataFrame(
519
+ {
520
+ "key": ["bar", "baz", "foo", "foo"],
521
+ "value": [2, 3, 1, 4],
522
+ "value2": ["a", "b", "c", "c"],
523
+ },
524
+ index=[1, 2, 0, 3],
525
+ )
526
+ tm.assert_frame_equal(joined, expected)
527
+
528
+ # smoke test
529
+ joined = left.join(right, on="key", sort=False)
530
+ tm.assert_index_equal(joined.index, Index(range(4)), exact=True)
531
+
532
+ def test_join_mixed_non_unique_index(self):
533
+ # GH 12814, unorderable types in py3 with a non-unique index
534
+ df1 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 3, "a"])
535
+ df2 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 3, 3, 4])
536
+ result = df1.join(df2)
537
+ expected = DataFrame(
538
+ {"a": [1, 2, 3, 3, 4], "b": [5, np.nan, 6, 7, np.nan]},
539
+ index=[1, 2, 3, 3, "a"],
540
+ )
541
+ tm.assert_frame_equal(result, expected)
542
+
543
+ df3 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 2, "a"])
544
+ df4 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 2, 3, 4])
545
+ result = df3.join(df4)
546
+ expected = DataFrame(
547
+ {"a": [1, 2, 3, 4], "b": [5, 6, 6, np.nan]}, index=[1, 2, 2, "a"]
548
+ )
549
+ tm.assert_frame_equal(result, expected)
550
+
551
+ def test_join_non_unique_period_index(self):
552
+ # GH #16871
553
+ index = pd.period_range("2016-01-01", periods=16, freq="M")
554
+ df = DataFrame(list(range(len(index))), index=index, columns=["pnum"])
555
+ df2 = concat([df, df])
556
+ result = df.join(df2, how="inner", rsuffix="_df2")
557
+ expected = DataFrame(
558
+ np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
559
+ columns=["pnum", "pnum_df2"],
560
+ index=df2.sort_index().index,
561
+ )
562
+ tm.assert_frame_equal(result, expected)
563
+
564
+ def test_mixed_type_join_with_suffix(self):
565
+ # GH #916
566
+ df = DataFrame(np.random.randn(20, 6), columns=["a", "b", "c", "d", "e", "f"])
567
+ df.insert(0, "id", 0)
568
+ df.insert(5, "dt", "foo")
569
+
570
+ grouped = df.groupby("id")
571
+ with pytest.raises(TypeError, match="Could not convert"):
572
+ grouped.mean()
573
+ mn = grouped.mean(numeric_only=True)
574
+ cn = grouped.count()
575
+
576
+ # it works!
577
+ mn.join(cn, rsuffix="_right")
578
+
579
+ def test_join_many(self):
580
+ df = DataFrame(np.random.randn(10, 6), columns=list("abcdef"))
581
+ df_list = [df[["a", "b"]], df[["c", "d"]], df[["e", "f"]]]
582
+
583
+ joined = df_list[0].join(df_list[1:])
584
+ tm.assert_frame_equal(joined, df)
585
+
586
+ df_list = [df[["a", "b"]][:-2], df[["c", "d"]][2:], df[["e", "f"]][1:9]]
587
+
588
+ def _check_diff_index(df_list, result, exp_index):
589
+ reindexed = [x.reindex(exp_index) for x in df_list]
590
+ expected = reindexed[0].join(reindexed[1:])
591
+ tm.assert_frame_equal(result, expected)
592
+
593
+ # different join types
594
+ joined = df_list[0].join(df_list[1:], how="outer")
595
+ _check_diff_index(df_list, joined, df.index)
596
+
597
+ joined = df_list[0].join(df_list[1:])
598
+ _check_diff_index(df_list, joined, df_list[0].index)
599
+
600
+ joined = df_list[0].join(df_list[1:], how="inner")
601
+ _check_diff_index(df_list, joined, df.index[2:8])
602
+
603
+ msg = "Joining multiple DataFrames only supported for joining on index"
604
+ with pytest.raises(ValueError, match=msg):
605
+ df_list[0].join(df_list[1:], on="a")
606
+
607
+ def test_join_many_mixed(self):
608
+ df = DataFrame(np.random.randn(8, 4), columns=["A", "B", "C", "D"])
609
+ df["key"] = ["foo", "bar"] * 4
610
+ df1 = df.loc[:, ["A", "B"]]
611
+ df2 = df.loc[:, ["C", "D"]]
612
+ df3 = df.loc[:, ["key"]]
613
+
614
+ result = df1.join([df2, df3])
615
+ tm.assert_frame_equal(result, df)
616
+
617
+ def test_join_dups(self):
618
+ # joining dups
619
+ df = concat(
620
+ [
621
+ DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
622
+ DataFrame(
623
+ np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
624
+ ),
625
+ ],
626
+ axis=1,
627
+ )
628
+
629
+ expected = concat([df, df], axis=1)
630
+ result = df.join(df, rsuffix="_2")
631
+ result.columns = expected.columns
632
+ tm.assert_frame_equal(result, expected)
633
+
634
+ # GH 4975, invalid join on dups
635
+ w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
636
+ x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
637
+ y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
638
+ z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
639
+
640
+ dta = x.merge(y, left_index=True, right_index=True).merge(
641
+ z, left_index=True, right_index=True, how="outer"
642
+ )
643
+ # GH 40991: As of 2.0 causes duplicate columns
644
+ with pytest.raises(
645
+ pd.errors.MergeError,
646
+ match="Passing 'suffixes' which cause duplicate columns",
647
+ ):
648
+ dta.merge(w, left_index=True, right_index=True)
649
+
650
+ def test_join_multi_to_multi(self, join_type):
651
+ # GH 20475
652
+ leftindex = MultiIndex.from_product(
653
+ [list("abc"), list("xy"), [1, 2]], names=["abc", "xy", "num"]
654
+ )
655
+ left = DataFrame({"v1": range(12)}, index=leftindex)
656
+
657
+ rightindex = MultiIndex.from_product(
658
+ [list("abc"), list("xy")], names=["abc", "xy"]
659
+ )
660
+ right = DataFrame({"v2": [100 * i for i in range(1, 7)]}, index=rightindex)
661
+
662
+ result = left.join(right, on=["abc", "xy"], how=join_type)
663
+ expected = (
664
+ left.reset_index()
665
+ .merge(right.reset_index(), on=["abc", "xy"], how=join_type)
666
+ .set_index(["abc", "xy", "num"])
667
+ )
668
+ tm.assert_frame_equal(expected, result)
669
+
670
+ msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
671
+ with pytest.raises(ValueError, match=msg):
672
+ left.join(right, on="xy", how=join_type)
673
+
674
+ with pytest.raises(ValueError, match=msg):
675
+ right.join(left, on=["abc", "xy"], how=join_type)
676
+
677
+ def test_join_on_tz_aware_datetimeindex(self):
678
+ # GH 23931, 26335
679
+ df1 = DataFrame(
680
+ {
681
+ "date": pd.date_range(
682
+ start="2018-01-01", periods=5, tz="America/Chicago"
683
+ ),
684
+ "vals": list("abcde"),
685
+ }
686
+ )
687
+
688
+ df2 = DataFrame(
689
+ {
690
+ "date": pd.date_range(
691
+ start="2018-01-03", periods=5, tz="America/Chicago"
692
+ ),
693
+ "vals_2": list("tuvwx"),
694
+ }
695
+ )
696
+ result = df1.join(df2.set_index("date"), on="date")
697
+ expected = df1.copy()
698
+ expected["vals_2"] = Series([np.nan] * 2 + list("tuv"), dtype=object)
699
+ tm.assert_frame_equal(result, expected)
700
+
701
+ def test_join_datetime_string(self):
702
+ # GH 5647
703
+ dfa = DataFrame(
704
+ [
705
+ ["2012-08-02", "L", 10],
706
+ ["2012-08-02", "J", 15],
707
+ ["2013-04-06", "L", 20],
708
+ ["2013-04-06", "J", 25],
709
+ ],
710
+ columns=["x", "y", "a"],
711
+ )
712
+ dfa["x"] = pd.to_datetime(dfa["x"])
713
+ dfb = DataFrame(
714
+ [["2012-08-02", "J", 1], ["2013-04-06", "L", 2]],
715
+ columns=["x", "y", "z"],
716
+ index=[2, 4],
717
+ )
718
+ dfb["x"] = pd.to_datetime(dfb["x"])
719
+ result = dfb.join(dfa.set_index(["x", "y"]), on=["x", "y"])
720
+ expected = DataFrame(
721
+ [
722
+ [Timestamp("2012-08-02 00:00:00"), "J", 1, 15],
723
+ [Timestamp("2013-04-06 00:00:00"), "L", 2, 20],
724
+ ],
725
+ index=[2, 4],
726
+ columns=["x", "y", "z", "a"],
727
+ )
728
+ tm.assert_frame_equal(result, expected)
729
+
730
+ def test_join_with_categorical_index(self):
731
+ # GH47812
732
+ ix = ["a", "b"]
733
+ id1 = pd.CategoricalIndex(ix, categories=ix)
734
+ id2 = pd.CategoricalIndex(reversed(ix), categories=reversed(ix))
735
+
736
+ df1 = DataFrame({"c1": ix}, index=id1)
737
+ df2 = DataFrame({"c2": reversed(ix)}, index=id2)
738
+ result = df1.join(df2)
739
+ expected = DataFrame(
740
+ {"c1": ["a", "b"], "c2": ["a", "b"]},
741
+ index=pd.CategoricalIndex(["a", "b"], categories=["a", "b"]),
742
+ )
743
+ tm.assert_frame_equal(result, expected)
744
+
745
+
746
+ def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix="_y"):
747
+ # some smoke tests
748
+ for c in join_col:
749
+ assert result[c].notna().all()
750
+
751
+ left_grouped = left.groupby(join_col)
752
+ right_grouped = right.groupby(join_col)
753
+
754
+ for group_key, group in result.groupby(
755
+ join_col if len(join_col) > 1 else join_col[0]
756
+ ):
757
+ l_joined = _restrict_to_columns(group, left.columns, lsuffix)
758
+ r_joined = _restrict_to_columns(group, right.columns, rsuffix)
759
+
760
+ try:
761
+ lgroup = left_grouped.get_group(group_key)
762
+ except KeyError as err:
763
+ if how in ("left", "inner"):
764
+ raise AssertionError(
765
+ f"key {group_key} should not have been in the join"
766
+ ) from err
767
+
768
+ _assert_all_na(l_joined, left.columns, join_col)
769
+ else:
770
+ _assert_same_contents(l_joined, lgroup)
771
+
772
+ try:
773
+ rgroup = right_grouped.get_group(group_key)
774
+ except KeyError as err:
775
+ if how in ("right", "inner"):
776
+ raise AssertionError(
777
+ f"key {group_key} should not have been in the join"
778
+ ) from err
779
+
780
+ _assert_all_na(r_joined, right.columns, join_col)
781
+ else:
782
+ _assert_same_contents(r_joined, rgroup)
783
+
784
+
785
+ def _restrict_to_columns(group, columns, suffix):
786
+ found = [
787
+ c for c in group.columns if c in columns or c.replace(suffix, "") in columns
788
+ ]
789
+
790
+ # filter
791
+ group = group.loc[:, found]
792
+
793
+ # get rid of suffixes, if any
794
+ group = group.rename(columns=lambda x: x.replace(suffix, ""))
795
+
796
+ # put in the right order...
797
+ group = group.loc[:, columns]
798
+
799
+ return group
800
+
801
+
802
+ def _assert_same_contents(join_chunk, source):
803
+ NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
804
+
805
+ jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
806
+ svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
807
+
808
+ rows = {tuple(row) for row in jvalues}
809
+ assert len(rows) == len(source)
810
+ assert all(tuple(row) in rows for row in svalues)
811
+
812
+
813
+ def _assert_all_na(join_chunk, source_columns, join_col):
814
+ for c in source_columns:
815
+ if c in join_col:
816
+ continue
817
+ assert join_chunk[c].isna().all()
818
+
819
+
820
+ def _join_by_hand(a, b, how="left"):
821
+ join_index = a.index.join(b.index, how=how)
822
+
823
+ a_re = a.reindex(join_index)
824
+ b_re = b.reindex(join_index)
825
+
826
+ result_columns = a.columns.append(b.columns)
827
+
828
+ for col, s in b_re.items():
829
+ a_re[col] = s
830
+ return a_re.reindex(columns=result_columns)
831
+
832
+
833
+ def test_join_inner_multiindex_deterministic_order():
834
+ # GH: 36910
835
+ left = DataFrame(
836
+ data={"e": 5},
837
+ index=MultiIndex.from_tuples([(1, 2, 4)], names=("a", "b", "d")),
838
+ )
839
+ right = DataFrame(
840
+ data={"f": 6}, index=MultiIndex.from_tuples([(2, 3)], names=("b", "c"))
841
+ )
842
+ result = left.join(right, how="inner")
843
+ expected = DataFrame(
844
+ {"e": [5], "f": [6]},
845
+ index=MultiIndex.from_tuples([(2, 1, 4, 3)], names=("b", "a", "d", "c")),
846
+ )
847
+ tm.assert_frame_equal(result, expected)
848
+
849
+
850
+ @pytest.mark.parametrize(
851
+ ("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])]
852
+ )
853
+ def test_join_cross(input_col, output_cols):
854
+ # GH#5401
855
+ left = DataFrame({"a": [1, 3]})
856
+ right = DataFrame({input_col: [3, 4]})
857
+ result = left.join(right, how="cross", lsuffix="_x", rsuffix="_y")
858
+ expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]})
859
+ tm.assert_frame_equal(result, expected)
860
+
861
+
862
+ def test_join_multiindex_one_level(join_type):
863
+ # GH#36909
864
+ left = DataFrame(
865
+ data={"c": 3}, index=MultiIndex.from_tuples([(1, 2)], names=("a", "b"))
866
+ )
867
+ right = DataFrame(data={"d": 4}, index=MultiIndex.from_tuples([(2,)], names=("b",)))
868
+ result = left.join(right, how=join_type)
869
+ expected = DataFrame(
870
+ {"c": [3], "d": [4]},
871
+ index=MultiIndex.from_tuples([(2, 1)], names=["b", "a"]),
872
+ )
873
+ tm.assert_frame_equal(result, expected)
874
+
875
+
876
+ @pytest.mark.parametrize(
877
+ "categories, values",
878
+ [
879
+ (["Y", "X"], ["Y", "X", "X"]),
880
+ ([2, 1], [2, 1, 1]),
881
+ ([2.5, 1.5], [2.5, 1.5, 1.5]),
882
+ (
883
+ [Timestamp("2020-12-31"), Timestamp("2019-12-31")],
884
+ [Timestamp("2020-12-31"), Timestamp("2019-12-31"), Timestamp("2019-12-31")],
885
+ ),
886
+ ],
887
+ )
888
+ def test_join_multiindex_not_alphabetical_categorical(categories, values):
889
+ # GH#38502
890
+ left = DataFrame(
891
+ {
892
+ "first": ["A", "A"],
893
+ "second": Categorical(categories, categories=categories),
894
+ "value": [1, 2],
895
+ }
896
+ ).set_index(["first", "second"])
897
+ right = DataFrame(
898
+ {
899
+ "first": ["A", "A", "B"],
900
+ "second": Categorical(values, categories=categories),
901
+ "value": [3, 4, 5],
902
+ }
903
+ ).set_index(["first", "second"])
904
+ result = left.join(right, lsuffix="_left", rsuffix="_right")
905
+
906
+ expected = DataFrame(
907
+ {
908
+ "first": ["A", "A"],
909
+ "second": Categorical(categories, categories=categories),
910
+ "value_left": [1, 2],
911
+ "value_right": [3, 4],
912
+ }
913
+ ).set_index(["first", "second"])
914
+ tm.assert_frame_equal(result, expected)
915
+
916
+
917
+ @pytest.mark.parametrize(
918
+ "left_empty, how, exp",
919
+ [
920
+ (False, "left", "left"),
921
+ (False, "right", "empty"),
922
+ (False, "inner", "empty"),
923
+ (False, "outer", "left"),
924
+ (False, "cross", "empty"),
925
+ (True, "left", "empty"),
926
+ (True, "right", "right"),
927
+ (True, "inner", "empty"),
928
+ (True, "outer", "right"),
929
+ (True, "cross", "empty"),
930
+ ],
931
+ )
932
+ def test_join_empty(left_empty, how, exp):
933
+ left = DataFrame({"A": [2, 1], "B": [3, 4]}, dtype="int64").set_index("A")
934
+ right = DataFrame({"A": [1], "C": [5]}, dtype="int64").set_index("A")
935
+
936
+ if left_empty:
937
+ left = left.head(0)
938
+ else:
939
+ right = right.head(0)
940
+
941
+ result = left.join(right, how=how)
942
+
943
+ if exp == "left":
944
+ expected = DataFrame({"A": [2, 1], "B": [3, 4], "C": [np.nan, np.nan]})
945
+ expected = expected.set_index("A")
946
+ elif exp == "right":
947
+ expected = DataFrame({"B": [np.nan], "A": [1], "C": [5]})
948
+ expected = expected.set_index("A")
949
+ elif exp == "empty":
950
+ expected = DataFrame(columns=["B", "C"], dtype="int64")
951
+ if how != "cross":
952
+ expected = expected.rename_axis("A")
953
+
954
+ tm.assert_frame_equal(result, expected)
955
+
956
+
957
+ @pytest.mark.parametrize(
958
+ "how, values",
959
+ [
960
+ ("inner", [0, 1, 2]),
961
+ ("outer", [0, 1, 2]),
962
+ ("left", [0, 1, 2]),
963
+ ("right", [0, 2, 1]),
964
+ ],
965
+ )
966
+ def test_join_multiindex_categorical_output_index_dtype(how, values):
967
+ # GH#50906
968
+ df1 = DataFrame(
969
+ {
970
+ "a": Categorical([0, 1, 2]),
971
+ "b": Categorical([0, 1, 2]),
972
+ "c": [0, 1, 2],
973
+ }
974
+ ).set_index(["a", "b"])
975
+
976
+ df2 = DataFrame(
977
+ {
978
+ "a": Categorical([0, 2, 1]),
979
+ "b": Categorical([0, 2, 1]),
980
+ "d": [0, 2, 1],
981
+ }
982
+ ).set_index(["a", "b"])
983
+
984
+ expected = DataFrame(
985
+ {
986
+ "a": Categorical(values),
987
+ "b": Categorical(values),
988
+ "c": values,
989
+ "d": values,
990
+ }
991
+ ).set_index(["a", "b"])
992
+
993
+ result = df1.join(df2, how=how)
994
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge.py ADDED
@@ -0,0 +1,2781 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ datetime,
4
+ timedelta,
5
+ )
6
+ import re
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas.core.dtypes.common import (
12
+ is_categorical_dtype,
13
+ is_object_dtype,
14
+ )
15
+ from pandas.core.dtypes.dtypes import CategoricalDtype
16
+
17
+ import pandas as pd
18
+ from pandas import (
19
+ Categorical,
20
+ CategoricalIndex,
21
+ DataFrame,
22
+ DatetimeIndex,
23
+ Index,
24
+ IntervalIndex,
25
+ MultiIndex,
26
+ PeriodIndex,
27
+ RangeIndex,
28
+ Series,
29
+ TimedeltaIndex,
30
+ )
31
+ import pandas._testing as tm
32
+ from pandas.api.types import CategoricalDtype as CDT
33
+ from pandas.core.reshape.concat import concat
34
+ from pandas.core.reshape.merge import (
35
+ MergeError,
36
+ merge,
37
+ )
38
+
39
+
40
+ def get_test_data(ngroups=8, n=50):
41
+ unique_groups = list(range(ngroups))
42
+ arr = np.asarray(np.tile(unique_groups, n // ngroups))
43
+
44
+ if len(arr) < n:
45
+ arr = np.asarray(list(arr) + unique_groups[: n - len(arr)])
46
+
47
+ np.random.shuffle(arr)
48
+ return arr
49
+
50
+
51
+ def get_series():
52
+ return [
53
+ Series([1], dtype="int64"),
54
+ Series([1], dtype="Int64"),
55
+ Series([1.23]),
56
+ Series(["foo"]),
57
+ Series([True]),
58
+ Series([pd.Timestamp("2018-01-01")]),
59
+ Series([pd.Timestamp("2018-01-01", tz="US/Eastern")]),
60
+ ]
61
+
62
+
63
+ def get_series_na():
64
+ return [
65
+ Series([np.nan], dtype="Int64"),
66
+ Series([np.nan], dtype="float"),
67
+ Series([np.nan], dtype="object"),
68
+ Series([pd.NaT]),
69
+ ]
70
+
71
+
72
+ @pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
73
+ def series_of_dtype(request):
74
+ """
75
+ A parametrized fixture returning a variety of Series of different
76
+ dtypes
77
+ """
78
+ return request.param
79
+
80
+
81
+ @pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
82
+ def series_of_dtype2(request):
83
+ """
84
+ A duplicate of the series_of_dtype fixture, so that it can be used
85
+ twice by a single function
86
+ """
87
+ return request.param
88
+
89
+
90
+ @pytest.fixture(params=get_series_na(), ids=lambda x: x.dtype.name)
91
+ def series_of_dtype_all_na(request):
92
+ """
93
+ A parametrized fixture returning a variety of Series with all NA
94
+ values
95
+ """
96
+ return request.param
97
+
98
+
99
+ @pytest.fixture
100
+ def dfs_for_indicator():
101
+ df1 = DataFrame({"col1": [0, 1], "col_conflict": [1, 2], "col_left": ["a", "b"]})
102
+ df2 = DataFrame(
103
+ {
104
+ "col1": [1, 2, 3, 4, 5],
105
+ "col_conflict": [1, 2, 3, 4, 5],
106
+ "col_right": [2, 2, 2, 2, 2],
107
+ }
108
+ )
109
+ return df1, df2
110
+
111
+
112
+ class TestMerge:
113
+ @pytest.fixture
114
+ def df(self):
115
+ df = DataFrame(
116
+ {
117
+ "key1": get_test_data(),
118
+ "key2": get_test_data(),
119
+ "data1": np.random.randn(50),
120
+ "data2": np.random.randn(50),
121
+ }
122
+ )
123
+
124
+ # exclude a couple keys for fun
125
+ df = df[df["key2"] > 1]
126
+ return df
127
+
128
+ @pytest.fixture
129
+ def df2(self):
130
+ return DataFrame(
131
+ {
132
+ "key1": get_test_data(n=10),
133
+ "key2": get_test_data(ngroups=4, n=10),
134
+ "value": np.random.randn(10),
135
+ }
136
+ )
137
+
138
+ @pytest.fixture
139
+ def left(self):
140
+ return DataFrame(
141
+ {"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
142
+ )
143
+
144
+ @pytest.fixture
145
+ def right(self):
146
+ return DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
147
+
148
+ def test_merge_inner_join_empty(self):
149
+ # GH 15328
150
+ df_empty = DataFrame()
151
+ df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
152
+ result = merge(df_empty, df_a, left_index=True, right_index=True)
153
+ expected = DataFrame({"a": []}, dtype="int64")
154
+ tm.assert_frame_equal(result, expected)
155
+
156
+ def test_merge_common(self, df, df2):
157
+ joined = merge(df, df2)
158
+ exp = merge(df, df2, on=["key1", "key2"])
159
+ tm.assert_frame_equal(joined, exp)
160
+
161
+ def test_merge_non_string_columns(self):
162
+ # https://github.com/pandas-dev/pandas/issues/17962
163
+ # Checks that method runs for non string column names
164
+ left = DataFrame(
165
+ {0: [1, 0, 1, 0], 1: [0, 1, 0, 0], 2: [0, 0, 2, 0], 3: [1, 0, 0, 3]}
166
+ )
167
+
168
+ right = left.astype(float)
169
+ expected = left
170
+ result = merge(left, right)
171
+ tm.assert_frame_equal(expected, result)
172
+
173
+ def test_merge_index_as_on_arg(self, df, df2):
174
+ # GH14355
175
+
176
+ left = df.set_index("key1")
177
+ right = df2.set_index("key1")
178
+ result = merge(left, right, on="key1")
179
+ expected = merge(df, df2, on="key1").set_index("key1")
180
+ tm.assert_frame_equal(result, expected)
181
+
182
+ def test_merge_index_singlekey_right_vs_left(self):
183
+ left = DataFrame(
184
+ {"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
185
+ )
186
+ right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
187
+
188
+ merged1 = merge(
189
+ left, right, left_on="key", right_index=True, how="left", sort=False
190
+ )
191
+ merged2 = merge(
192
+ right, left, right_on="key", left_index=True, how="right", sort=False
193
+ )
194
+ tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
195
+
196
+ merged1 = merge(
197
+ left, right, left_on="key", right_index=True, how="left", sort=True
198
+ )
199
+ merged2 = merge(
200
+ right, left, right_on="key", left_index=True, how="right", sort=True
201
+ )
202
+ tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
203
+
204
+ def test_merge_index_singlekey_inner(self):
205
+ left = DataFrame(
206
+ {"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
207
+ )
208
+ right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
209
+
210
+ # inner join
211
+ result = merge(left, right, left_on="key", right_index=True, how="inner")
212
+ expected = left.join(right, on="key").loc[result.index]
213
+ tm.assert_frame_equal(result, expected)
214
+
215
+ result = merge(right, left, right_on="key", left_index=True, how="inner")
216
+ expected = left.join(right, on="key").loc[result.index]
217
+ tm.assert_frame_equal(result, expected.loc[:, result.columns])
218
+
219
+ def test_merge_misspecified(self, df, df2, left, right):
220
+ msg = "Must pass right_on or right_index=True"
221
+ with pytest.raises(pd.errors.MergeError, match=msg):
222
+ merge(left, right, left_index=True)
223
+ msg = "Must pass left_on or left_index=True"
224
+ with pytest.raises(pd.errors.MergeError, match=msg):
225
+ merge(left, right, right_index=True)
226
+
227
+ msg = (
228
+ 'Can only pass argument "on" OR "left_on" and "right_on", not '
229
+ "a combination of both"
230
+ )
231
+ with pytest.raises(pd.errors.MergeError, match=msg):
232
+ merge(left, left, left_on="key", on="key")
233
+
234
+ msg = r"len\(right_on\) must equal len\(left_on\)"
235
+ with pytest.raises(ValueError, match=msg):
236
+ merge(df, df2, left_on=["key1"], right_on=["key1", "key2"])
237
+
238
+ def test_index_and_on_parameters_confusion(self, df, df2):
239
+ msg = "right_index parameter must be of type bool, not <class 'list'>"
240
+ with pytest.raises(ValueError, match=msg):
241
+ merge(
242
+ df,
243
+ df2,
244
+ how="left",
245
+ left_index=False,
246
+ right_index=["key1", "key2"],
247
+ )
248
+ msg = "left_index parameter must be of type bool, not <class 'list'>"
249
+ with pytest.raises(ValueError, match=msg):
250
+ merge(
251
+ df,
252
+ df2,
253
+ how="left",
254
+ left_index=["key1", "key2"],
255
+ right_index=False,
256
+ )
257
+ with pytest.raises(ValueError, match=msg):
258
+ merge(
259
+ df,
260
+ df2,
261
+ how="left",
262
+ left_index=["key1", "key2"],
263
+ right_index=["key1", "key2"],
264
+ )
265
+
266
+ def test_merge_overlap(self, left):
267
+ merged = merge(left, left, on="key")
268
+ exp_len = (left["key"].value_counts() ** 2).sum()
269
+ assert len(merged) == exp_len
270
+ assert "v1_x" in merged
271
+ assert "v1_y" in merged
272
+
273
+ def test_merge_different_column_key_names(self):
274
+ left = DataFrame({"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]})
275
+ right = DataFrame({"rkey": ["foo", "bar", "qux", "foo"], "value": [5, 6, 7, 8]})
276
+
277
+ merged = left.merge(
278
+ right, left_on="lkey", right_on="rkey", how="outer", sort=True
279
+ )
280
+
281
+ exp = Series(["bar", "baz", "foo", "foo", "foo", "foo", np.nan], name="lkey")
282
+ tm.assert_series_equal(merged["lkey"], exp)
283
+
284
+ exp = Series(["bar", np.nan, "foo", "foo", "foo", "foo", "qux"], name="rkey")
285
+ tm.assert_series_equal(merged["rkey"], exp)
286
+
287
+ exp = Series([2, 3, 1, 1, 4, 4, np.nan], name="value_x")
288
+ tm.assert_series_equal(merged["value_x"], exp)
289
+
290
+ exp = Series([6, np.nan, 5, 8, 5, 8, 7], name="value_y")
291
+ tm.assert_series_equal(merged["value_y"], exp)
292
+
293
+ def test_merge_copy(self):
294
+ left = DataFrame({"a": 0, "b": 1}, index=range(10))
295
+ right = DataFrame({"c": "foo", "d": "bar"}, index=range(10))
296
+
297
+ merged = merge(left, right, left_index=True, right_index=True, copy=True)
298
+
299
+ merged["a"] = 6
300
+ assert (left["a"] == 0).all()
301
+
302
+ merged["d"] = "peekaboo"
303
+ assert (right["d"] == "bar").all()
304
+
305
+ def test_merge_nocopy(self, using_array_manager):
306
+ left = DataFrame({"a": 0, "b": 1}, index=range(10))
307
+ right = DataFrame({"c": "foo", "d": "bar"}, index=range(10))
308
+
309
+ merged = merge(left, right, left_index=True, right_index=True, copy=False)
310
+
311
+ assert np.shares_memory(merged["a"]._values, left["a"]._values)
312
+ assert np.shares_memory(merged["d"]._values, right["d"]._values)
313
+
314
+ def test_intelligently_handle_join_key(self):
315
+ # #733, be a bit more 1337 about not returning unconsolidated DataFrame
316
+
317
+ left = DataFrame(
318
+ {"key": [1, 1, 2, 2, 3], "value": list(range(5))}, columns=["value", "key"]
319
+ )
320
+ right = DataFrame({"key": [1, 1, 2, 3, 4, 5], "rvalue": list(range(6))})
321
+
322
+ joined = merge(left, right, on="key", how="outer")
323
+ expected = DataFrame(
324
+ {
325
+ "key": [1, 1, 1, 1, 2, 2, 3, 4, 5],
326
+ "value": np.array([0, 0, 1, 1, 2, 3, 4, np.nan, np.nan]),
327
+ "rvalue": [0, 1, 0, 1, 2, 2, 3, 4, 5],
328
+ },
329
+ columns=["value", "key", "rvalue"],
330
+ )
331
+ tm.assert_frame_equal(joined, expected)
332
+
333
+ def test_merge_join_key_dtype_cast(self):
334
+ # #8596
335
+
336
+ df1 = DataFrame({"key": [1], "v1": [10]})
337
+ df2 = DataFrame({"key": [2], "v1": [20]})
338
+ df = merge(df1, df2, how="outer")
339
+ assert df["key"].dtype == "int64"
340
+
341
+ df1 = DataFrame({"key": [True], "v1": [1]})
342
+ df2 = DataFrame({"key": [False], "v1": [0]})
343
+ df = merge(df1, df2, how="outer")
344
+
345
+ # GH13169
346
+ # GH#40073
347
+ assert df["key"].dtype == "bool"
348
+
349
+ df1 = DataFrame({"val": [1]})
350
+ df2 = DataFrame({"val": [2]})
351
+ lkey = np.array([1])
352
+ rkey = np.array([2])
353
+ df = merge(df1, df2, left_on=lkey, right_on=rkey, how="outer")
354
+ assert df["key_0"].dtype == np.int_
355
+
356
+ def test_handle_join_key_pass_array(self):
357
+ left = DataFrame(
358
+ {"key": [1, 1, 2, 2, 3], "value": np.arange(5)},
359
+ columns=["value", "key"],
360
+ dtype="int64",
361
+ )
362
+ right = DataFrame({"rvalue": np.arange(6)}, dtype="int64")
363
+ key = np.array([1, 1, 2, 3, 4, 5], dtype="int64")
364
+
365
+ merged = merge(left, right, left_on="key", right_on=key, how="outer")
366
+ merged2 = merge(right, left, left_on=key, right_on="key", how="outer")
367
+
368
+ tm.assert_series_equal(merged["key"], merged2["key"])
369
+ assert merged["key"].notna().all()
370
+ assert merged2["key"].notna().all()
371
+
372
+ left = DataFrame({"value": np.arange(5)}, columns=["value"])
373
+ right = DataFrame({"rvalue": np.arange(6)})
374
+ lkey = np.array([1, 1, 2, 2, 3])
375
+ rkey = np.array([1, 1, 2, 3, 4, 5])
376
+
377
+ merged = merge(left, right, left_on=lkey, right_on=rkey, how="outer")
378
+ expected = Series([1, 1, 1, 1, 2, 2, 3, 4, 5], dtype=np.int_, name="key_0")
379
+ tm.assert_series_equal(merged["key_0"], expected)
380
+
381
+ left = DataFrame({"value": np.arange(3)})
382
+ right = DataFrame({"rvalue": np.arange(6)})
383
+
384
+ key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64)
385
+ merged = merge(left, right, left_index=True, right_on=key, how="outer")
386
+ tm.assert_series_equal(merged["key_0"], Series(key, name="key_0"))
387
+
388
+ def test_no_overlap_more_informative_error(self):
389
+ dt = datetime.now()
390
+ df1 = DataFrame({"x": ["a"]}, index=[dt])
391
+
392
+ df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt])
393
+
394
+ msg = (
395
+ "No common columns to perform merge on. "
396
+ f"Merge options: left_on={None}, right_on={None}, "
397
+ f"left_index={False}, right_index={False}"
398
+ )
399
+
400
+ with pytest.raises(MergeError, match=msg):
401
+ merge(df1, df2)
402
+
403
+ def test_merge_non_unique_indexes(self):
404
+ dt = datetime(2012, 5, 1)
405
+ dt2 = datetime(2012, 5, 2)
406
+ dt3 = datetime(2012, 5, 3)
407
+ dt4 = datetime(2012, 5, 4)
408
+
409
+ df1 = DataFrame({"x": ["a"]}, index=[dt])
410
+ df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt])
411
+ _check_merge(df1, df2)
412
+
413
+ # Not monotonic
414
+ df1 = DataFrame({"x": ["a", "b", "q"]}, index=[dt2, dt, dt4])
415
+ df2 = DataFrame(
416
+ {"y": ["c", "d", "e", "f", "g", "h"]}, index=[dt3, dt3, dt2, dt2, dt, dt]
417
+ )
418
+ _check_merge(df1, df2)
419
+
420
+ df1 = DataFrame({"x": ["a", "b"]}, index=[dt, dt])
421
+ df2 = DataFrame({"y": ["c", "d"]}, index=[dt, dt])
422
+ _check_merge(df1, df2)
423
+
424
+ def test_merge_non_unique_index_many_to_many(self):
425
+ dt = datetime(2012, 5, 1)
426
+ dt2 = datetime(2012, 5, 2)
427
+ dt3 = datetime(2012, 5, 3)
428
+ df1 = DataFrame({"x": ["a", "b", "c", "d"]}, index=[dt2, dt2, dt, dt])
429
+ df2 = DataFrame(
430
+ {"y": ["e", "f", "g", " h", "i"]}, index=[dt2, dt2, dt3, dt, dt]
431
+ )
432
+ _check_merge(df1, df2)
433
+
434
+ def test_left_merge_empty_dataframe(self):
435
+ left = DataFrame({"key": [1], "value": [2]})
436
+ right = DataFrame({"key": []})
437
+
438
+ result = merge(left, right, on="key", how="left")
439
+ tm.assert_frame_equal(result, left)
440
+
441
+ result = merge(right, left, on="key", how="right")
442
+ tm.assert_frame_equal(result, left)
443
+
444
+ @pytest.mark.parametrize(
445
+ "kwarg",
446
+ [
447
+ {"left_index": True, "right_index": True},
448
+ {"left_index": True, "right_on": "x"},
449
+ {"left_on": "a", "right_index": True},
450
+ {"left_on": "a", "right_on": "x"},
451
+ ],
452
+ )
453
+ def test_merge_left_empty_right_empty(self, join_type, kwarg):
454
+ # GH 10824
455
+ left = DataFrame(columns=["a", "b", "c"])
456
+ right = DataFrame(columns=["x", "y", "z"])
457
+
458
+ exp_in = DataFrame(columns=["a", "b", "c", "x", "y", "z"], dtype=object)
459
+
460
+ result = merge(left, right, how=join_type, **kwarg)
461
+ tm.assert_frame_equal(result, exp_in)
462
+
463
+ def test_merge_left_empty_right_notempty(self):
464
+ # GH 10824
465
+ left = DataFrame(columns=["a", "b", "c"])
466
+ right = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["x", "y", "z"])
467
+
468
+ exp_out = DataFrame(
469
+ {
470
+ "a": np.array([np.nan] * 3, dtype=object),
471
+ "b": np.array([np.nan] * 3, dtype=object),
472
+ "c": np.array([np.nan] * 3, dtype=object),
473
+ "x": [1, 4, 7],
474
+ "y": [2, 5, 8],
475
+ "z": [3, 6, 9],
476
+ },
477
+ columns=["a", "b", "c", "x", "y", "z"],
478
+ )
479
+ exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
480
+
481
+ def check1(exp, kwarg):
482
+ result = merge(left, right, how="inner", **kwarg)
483
+ tm.assert_frame_equal(result, exp)
484
+ result = merge(left, right, how="left", **kwarg)
485
+ tm.assert_frame_equal(result, exp)
486
+
487
+ def check2(exp, kwarg):
488
+ result = merge(left, right, how="right", **kwarg)
489
+ tm.assert_frame_equal(result, exp)
490
+ result = merge(left, right, how="outer", **kwarg)
491
+ tm.assert_frame_equal(result, exp)
492
+
493
+ for kwarg in [
494
+ {"left_index": True, "right_index": True},
495
+ {"left_index": True, "right_on": "x"},
496
+ ]:
497
+ check1(exp_in, kwarg)
498
+ check2(exp_out, kwarg)
499
+
500
+ kwarg = {"left_on": "a", "right_index": True}
501
+ check1(exp_in, kwarg)
502
+ exp_out["a"] = [0, 1, 2]
503
+ check2(exp_out, kwarg)
504
+
505
+ kwarg = {"left_on": "a", "right_on": "x"}
506
+ check1(exp_in, kwarg)
507
+ exp_out["a"] = np.array([np.nan] * 3, dtype=object)
508
+ check2(exp_out, kwarg)
509
+
510
+ def test_merge_left_notempty_right_empty(self):
511
+ # GH 10824
512
+ left = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
513
+ right = DataFrame(columns=["x", "y", "z"])
514
+
515
+ exp_out = DataFrame(
516
+ {
517
+ "a": [1, 4, 7],
518
+ "b": [2, 5, 8],
519
+ "c": [3, 6, 9],
520
+ "x": np.array([np.nan] * 3, dtype=object),
521
+ "y": np.array([np.nan] * 3, dtype=object),
522
+ "z": np.array([np.nan] * 3, dtype=object),
523
+ },
524
+ columns=["a", "b", "c", "x", "y", "z"],
525
+ )
526
+ exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
527
+ # result will have object dtype
528
+ exp_in.index = exp_in.index.astype(object)
529
+
530
+ def check1(exp, kwarg):
531
+ result = merge(left, right, how="inner", **kwarg)
532
+ tm.assert_frame_equal(result, exp)
533
+ result = merge(left, right, how="right", **kwarg)
534
+ tm.assert_frame_equal(result, exp)
535
+
536
+ def check2(exp, kwarg):
537
+ result = merge(left, right, how="left", **kwarg)
538
+ tm.assert_frame_equal(result, exp)
539
+ result = merge(left, right, how="outer", **kwarg)
540
+ tm.assert_frame_equal(result, exp)
541
+
542
+ # TODO: should the next loop be un-indented? doing so breaks this test
543
+ for kwarg in [
544
+ {"left_index": True, "right_index": True},
545
+ {"left_index": True, "right_on": "x"},
546
+ {"left_on": "a", "right_index": True},
547
+ {"left_on": "a", "right_on": "x"},
548
+ ]:
549
+ check1(exp_in, kwarg)
550
+ check2(exp_out, kwarg)
551
+
552
+ def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2):
553
+ # GH 25183
554
+ df = DataFrame(
555
+ {"key": series_of_dtype, "value": series_of_dtype2},
556
+ columns=["key", "value"],
557
+ )
558
+ df_empty = df[:0]
559
+ expected = DataFrame(
560
+ {
561
+ "value_x": Series(dtype=df.dtypes["value"]),
562
+ "key": Series(dtype=df.dtypes["key"]),
563
+ "value_y": Series(dtype=df.dtypes["value"]),
564
+ },
565
+ columns=["value_x", "key", "value_y"],
566
+ )
567
+ actual = df_empty.merge(df, on="key")
568
+ tm.assert_frame_equal(actual, expected)
569
+
570
+ def test_merge_all_na_column(self, series_of_dtype, series_of_dtype_all_na):
571
+ # GH 25183
572
+ df_left = DataFrame(
573
+ {"key": series_of_dtype, "value": series_of_dtype_all_na},
574
+ columns=["key", "value"],
575
+ )
576
+ df_right = DataFrame(
577
+ {"key": series_of_dtype, "value": series_of_dtype_all_na},
578
+ columns=["key", "value"],
579
+ )
580
+ expected = DataFrame(
581
+ {
582
+ "key": series_of_dtype,
583
+ "value_x": series_of_dtype_all_na,
584
+ "value_y": series_of_dtype_all_na,
585
+ },
586
+ columns=["key", "value_x", "value_y"],
587
+ )
588
+ actual = df_left.merge(df_right, on="key")
589
+ tm.assert_frame_equal(actual, expected)
590
+
591
+ def test_merge_nosort(self):
592
+ # GH#2098
593
+
594
+ d = {
595
+ "var1": np.random.randint(0, 10, size=10),
596
+ "var2": np.random.randint(0, 10, size=10),
597
+ "var3": [
598
+ datetime(2012, 1, 12),
599
+ datetime(2011, 2, 4),
600
+ datetime(2010, 2, 3),
601
+ datetime(2012, 1, 12),
602
+ datetime(2011, 2, 4),
603
+ datetime(2012, 4, 3),
604
+ datetime(2012, 3, 4),
605
+ datetime(2008, 5, 1),
606
+ datetime(2010, 2, 3),
607
+ datetime(2012, 2, 3),
608
+ ],
609
+ }
610
+ df = DataFrame.from_dict(d)
611
+ var3 = df.var3.unique()
612
+ var3 = np.sort(var3)
613
+ new = DataFrame.from_dict({"var3": var3, "var8": np.random.random(7)})
614
+
615
+ result = df.merge(new, on="var3", sort=False)
616
+ exp = merge(df, new, on="var3", sort=False)
617
+ tm.assert_frame_equal(result, exp)
618
+
619
+ assert (df.var3.unique() == result.var3.unique()).all()
620
+
621
+ @pytest.mark.parametrize(
622
+ ("sort", "values"), [(False, [1, 1, 0, 1, 1]), (True, [0, 1, 1, 1, 1])]
623
+ )
624
+ @pytest.mark.parametrize("how", ["left", "right"])
625
+ def test_merge_same_order_left_right(self, sort, values, how):
626
+ # GH#35382
627
+ df = DataFrame({"a": [1, 0, 1]})
628
+
629
+ result = df.merge(df, on="a", how=how, sort=sort)
630
+ expected = DataFrame(values, columns=["a"])
631
+ tm.assert_frame_equal(result, expected)
632
+
633
+ def test_merge_nan_right(self):
634
+ df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]})
635
+ df2 = DataFrame({"i1": [0], "i3": [0]})
636
+ result = df1.join(df2, on="i1", rsuffix="_")
637
+ expected = (
638
+ DataFrame(
639
+ {
640
+ "i1": {0: 0.0, 1: 1},
641
+ "i2": {0: 0, 1: 1},
642
+ "i1_": {0: 0, 1: np.nan},
643
+ "i3": {0: 0.0, 1: np.nan},
644
+ None: {0: 0, 1: 0},
645
+ }
646
+ )
647
+ .set_index(None)
648
+ .reset_index()[["i1", "i2", "i1_", "i3"]]
649
+ )
650
+ tm.assert_frame_equal(result, expected, check_dtype=False)
651
+
652
+ def test_merge_nan_right2(self):
653
+ df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]})
654
+ df2 = DataFrame({"i1": [0], "i3": [0.7]})
655
+ result = df1.join(df2, rsuffix="_", on="i1")
656
+ expected = DataFrame(
657
+ {
658
+ "i1": {0: 0, 1: 1},
659
+ "i1_": {0: 0.0, 1: np.nan},
660
+ "i2": {0: 0.5, 1: 1.5},
661
+ "i3": {0: 0.69999999999999996, 1: np.nan},
662
+ }
663
+ )[["i1", "i2", "i1_", "i3"]]
664
+ tm.assert_frame_equal(result, expected)
665
+
666
+ def test_merge_type(self, df, df2):
667
+ class NotADataFrame(DataFrame):
668
+ @property
669
+ def _constructor(self):
670
+ return NotADataFrame
671
+
672
+ nad = NotADataFrame(df)
673
+ result = nad.merge(df2, on="key1")
674
+
675
+ assert isinstance(result, NotADataFrame)
676
+
677
+ def test_join_append_timedeltas(self, using_array_manager):
678
+ # timedelta64 issues with join/merge
679
+ # GH 5695
680
+
681
+ d = DataFrame.from_dict(
682
+ {"d": [datetime(2013, 11, 5, 5, 56)], "t": [timedelta(0, 22500)]}
683
+ )
684
+ df = DataFrame(columns=list("dt"))
685
+ df = concat([df, d], ignore_index=True)
686
+ result = concat([df, d], ignore_index=True)
687
+ expected = DataFrame(
688
+ {
689
+ "d": [datetime(2013, 11, 5, 5, 56), datetime(2013, 11, 5, 5, 56)],
690
+ "t": [timedelta(0, 22500), timedelta(0, 22500)],
691
+ }
692
+ )
693
+ if using_array_manager:
694
+ # TODO(ArrayManager) decide on exact casting rules in concat
695
+ expected = expected.astype(object)
696
+ tm.assert_frame_equal(result, expected)
697
+
698
+ def test_join_append_timedeltas2(self):
699
+ # timedelta64 issues with join/merge
700
+ # GH 5695
701
+ td = np.timedelta64(300000000)
702
+ lhs = DataFrame(Series([td, td], index=["A", "B"]))
703
+ rhs = DataFrame(Series([td], index=["A"]))
704
+
705
+ result = lhs.join(rhs, rsuffix="r", how="left")
706
+ expected = DataFrame(
707
+ {
708
+ "0": Series([td, td], index=list("AB")),
709
+ "0r": Series([td, pd.NaT], index=list("AB")),
710
+ }
711
+ )
712
+ tm.assert_frame_equal(result, expected)
713
+
714
+ @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
715
+ def test_other_datetime_unit(self, unit):
716
+ # GH 13389
717
+ df1 = DataFrame({"entity_id": [101, 102]})
718
+ ser = Series([None, None], index=[101, 102], name="days")
719
+
720
+ dtype = f"datetime64[{unit}]"
721
+
722
+ if unit in ["D", "h", "m"]:
723
+ # not supported so we cast to the nearest supported unit, seconds
724
+ exp_dtype = "datetime64[s]"
725
+ else:
726
+ exp_dtype = dtype
727
+ df2 = ser.astype(exp_dtype).to_frame("days")
728
+ assert df2["days"].dtype == exp_dtype
729
+
730
+ result = df1.merge(df2, left_on="entity_id", right_index=True)
731
+
732
+ days = np.array(["nat", "nat"], dtype=exp_dtype)
733
+ days = pd.core.arrays.DatetimeArray._simple_new(days, dtype=days.dtype)
734
+ exp = DataFrame(
735
+ {
736
+ "entity_id": [101, 102],
737
+ "days": days,
738
+ },
739
+ columns=["entity_id", "days"],
740
+ )
741
+ assert exp["days"].dtype == exp_dtype
742
+ tm.assert_frame_equal(result, exp)
743
+
744
+ @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
745
+ def test_other_timedelta_unit(self, unit):
746
+ # GH 13389
747
+ df1 = DataFrame({"entity_id": [101, 102]})
748
+ ser = Series([None, None], index=[101, 102], name="days")
749
+
750
+ dtype = f"m8[{unit}]"
751
+ if unit in ["D", "h", "m"]:
752
+ # We cannot astype, instead do nearest supported unit, i.e. "s"
753
+ msg = "Supported resolutions are 's', 'ms', 'us', 'ns'"
754
+ with pytest.raises(ValueError, match=msg):
755
+ ser.astype(dtype)
756
+
757
+ df2 = ser.astype("m8[s]").to_frame("days")
758
+ else:
759
+ df2 = ser.astype(dtype).to_frame("days")
760
+ assert df2["days"].dtype == dtype
761
+
762
+ result = df1.merge(df2, left_on="entity_id", right_index=True)
763
+
764
+ exp = DataFrame(
765
+ {"entity_id": [101, 102], "days": np.array(["nat", "nat"], dtype=dtype)},
766
+ columns=["entity_id", "days"],
767
+ )
768
+ tm.assert_frame_equal(result, exp)
769
+
770
+ def test_overlapping_columns_error_message(self):
771
+ df = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]})
772
+ df2 = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]})
773
+
774
+ df.columns = ["key", "foo", "foo"]
775
+ df2.columns = ["key", "bar", "bar"]
776
+ expected = DataFrame(
777
+ {
778
+ "key": [1, 2, 3],
779
+ "v1": [4, 5, 6],
780
+ "v2": [7, 8, 9],
781
+ "v3": [4, 5, 6],
782
+ "v4": [7, 8, 9],
783
+ }
784
+ )
785
+ expected.columns = ["key", "foo", "foo", "bar", "bar"]
786
+ tm.assert_frame_equal(merge(df, df2), expected)
787
+
788
+ # #2649, #10639
789
+ df2.columns = ["key1", "foo", "foo"]
790
+ msg = r"Data columns not unique: Index\(\['foo'\], dtype='object'\)"
791
+ with pytest.raises(MergeError, match=msg):
792
+ merge(df, df2)
793
+
794
+ def test_merge_on_datetime64tz(self):
795
+ # GH11405
796
+ left = DataFrame(
797
+ {
798
+ "key": pd.date_range("20151010", periods=2, tz="US/Eastern"),
799
+ "value": [1, 2],
800
+ }
801
+ )
802
+ right = DataFrame(
803
+ {
804
+ "key": pd.date_range("20151011", periods=3, tz="US/Eastern"),
805
+ "value": [1, 2, 3],
806
+ }
807
+ )
808
+
809
+ expected = DataFrame(
810
+ {
811
+ "key": pd.date_range("20151010", periods=4, tz="US/Eastern"),
812
+ "value_x": [1, 2, np.nan, np.nan],
813
+ "value_y": [np.nan, 1, 2, 3],
814
+ }
815
+ )
816
+ result = merge(left, right, on="key", how="outer")
817
+ tm.assert_frame_equal(result, expected)
818
+
819
+ def test_merge_datetime64tz_values(self):
820
+ left = DataFrame(
821
+ {
822
+ "key": [1, 2],
823
+ "value": pd.date_range("20151010", periods=2, tz="US/Eastern"),
824
+ }
825
+ )
826
+ right = DataFrame(
827
+ {
828
+ "key": [2, 3],
829
+ "value": pd.date_range("20151011", periods=2, tz="US/Eastern"),
830
+ }
831
+ )
832
+ expected = DataFrame(
833
+ {
834
+ "key": [1, 2, 3],
835
+ "value_x": list(pd.date_range("20151010", periods=2, tz="US/Eastern"))
836
+ + [pd.NaT],
837
+ "value_y": [pd.NaT]
838
+ + list(pd.date_range("20151011", periods=2, tz="US/Eastern")),
839
+ }
840
+ )
841
+ result = merge(left, right, on="key", how="outer")
842
+ tm.assert_frame_equal(result, expected)
843
+ assert result["value_x"].dtype == "datetime64[ns, US/Eastern]"
844
+ assert result["value_y"].dtype == "datetime64[ns, US/Eastern]"
845
+
846
+ def test_merge_on_datetime64tz_empty(self):
847
+ # https://github.com/pandas-dev/pandas/issues/25014
848
+ dtz = pd.DatetimeTZDtype(tz="UTC")
849
+ right = DataFrame(
850
+ {
851
+ "date": [pd.Timestamp("2018", tz=dtz.tz)],
852
+ "value": [4.0],
853
+ "date2": [pd.Timestamp("2019", tz=dtz.tz)],
854
+ },
855
+ columns=["date", "value", "date2"],
856
+ )
857
+ left = right[:0]
858
+ result = left.merge(right, on="date")
859
+ expected = DataFrame(
860
+ {
861
+ "value_x": Series(dtype=float),
862
+ "date2_x": Series(dtype=dtz),
863
+ "date": Series(dtype=dtz),
864
+ "value_y": Series(dtype=float),
865
+ "date2_y": Series(dtype=dtz),
866
+ },
867
+ columns=["value_x", "date2_x", "date", "value_y", "date2_y"],
868
+ )
869
+ tm.assert_frame_equal(result, expected)
870
+
871
+ def test_merge_datetime64tz_with_dst_transition(self):
872
+ # GH 18885
873
+ df1 = DataFrame(
874
+ pd.date_range("2017-10-29 01:00", periods=4, freq="H", tz="Europe/Madrid"),
875
+ columns=["date"],
876
+ )
877
+ df1["value"] = 1
878
+ df2 = DataFrame(
879
+ {
880
+ "date": pd.to_datetime(
881
+ [
882
+ "2017-10-29 03:00:00",
883
+ "2017-10-29 04:00:00",
884
+ "2017-10-29 05:00:00",
885
+ ]
886
+ ),
887
+ "value": 2,
888
+ }
889
+ )
890
+ df2["date"] = df2["date"].dt.tz_localize("UTC").dt.tz_convert("Europe/Madrid")
891
+ result = merge(df1, df2, how="outer", on="date")
892
+ expected = DataFrame(
893
+ {
894
+ "date": pd.date_range(
895
+ "2017-10-29 01:00", periods=7, freq="H", tz="Europe/Madrid"
896
+ ),
897
+ "value_x": [1] * 4 + [np.nan] * 3,
898
+ "value_y": [np.nan] * 4 + [2] * 3,
899
+ }
900
+ )
901
+ tm.assert_frame_equal(result, expected)
902
+
903
+ def test_merge_non_unique_period_index(self):
904
+ # GH #16871
905
+ index = pd.period_range("2016-01-01", periods=16, freq="M")
906
+ df = DataFrame(list(range(len(index))), index=index, columns=["pnum"])
907
+ df2 = concat([df, df])
908
+ result = df.merge(df2, left_index=True, right_index=True, how="inner")
909
+ expected = DataFrame(
910
+ np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
911
+ columns=["pnum_x", "pnum_y"],
912
+ index=df2.sort_index().index,
913
+ )
914
+ tm.assert_frame_equal(result, expected)
915
+
916
+ def test_merge_on_periods(self):
917
+ left = DataFrame(
918
+ {"key": pd.period_range("20151010", periods=2, freq="D"), "value": [1, 2]}
919
+ )
920
+ right = DataFrame(
921
+ {
922
+ "key": pd.period_range("20151011", periods=3, freq="D"),
923
+ "value": [1, 2, 3],
924
+ }
925
+ )
926
+
927
+ expected = DataFrame(
928
+ {
929
+ "key": pd.period_range("20151010", periods=4, freq="D"),
930
+ "value_x": [1, 2, np.nan, np.nan],
931
+ "value_y": [np.nan, 1, 2, 3],
932
+ }
933
+ )
934
+ result = merge(left, right, on="key", how="outer")
935
+ tm.assert_frame_equal(result, expected)
936
+
937
+ def test_merge_period_values(self):
938
+ left = DataFrame(
939
+ {"key": [1, 2], "value": pd.period_range("20151010", periods=2, freq="D")}
940
+ )
941
+ right = DataFrame(
942
+ {"key": [2, 3], "value": pd.period_range("20151011", periods=2, freq="D")}
943
+ )
944
+
945
+ exp_x = pd.period_range("20151010", periods=2, freq="D")
946
+ exp_y = pd.period_range("20151011", periods=2, freq="D")
947
+ expected = DataFrame(
948
+ {
949
+ "key": [1, 2, 3],
950
+ "value_x": list(exp_x) + [pd.NaT],
951
+ "value_y": [pd.NaT] + list(exp_y),
952
+ }
953
+ )
954
+ result = merge(left, right, on="key", how="outer")
955
+ tm.assert_frame_equal(result, expected)
956
+ assert result["value_x"].dtype == "Period[D]"
957
+ assert result["value_y"].dtype == "Period[D]"
958
+
959
+ def test_indicator(self, dfs_for_indicator):
960
+ # PR #10054. xref #7412 and closes #8790.
961
+ df1, df2 = dfs_for_indicator
962
+ df1_copy = df1.copy()
963
+
964
+ df2_copy = df2.copy()
965
+
966
+ df_result = DataFrame(
967
+ {
968
+ "col1": [0, 1, 2, 3, 4, 5],
969
+ "col_conflict_x": [1, 2, np.nan, np.nan, np.nan, np.nan],
970
+ "col_left": ["a", "b", np.nan, np.nan, np.nan, np.nan],
971
+ "col_conflict_y": [np.nan, 1, 2, 3, 4, 5],
972
+ "col_right": [np.nan, 2, 2, 2, 2, 2],
973
+ }
974
+ )
975
+ df_result["_merge"] = Categorical(
976
+ [
977
+ "left_only",
978
+ "both",
979
+ "right_only",
980
+ "right_only",
981
+ "right_only",
982
+ "right_only",
983
+ ],
984
+ categories=["left_only", "right_only", "both"],
985
+ )
986
+
987
+ df_result = df_result[
988
+ [
989
+ "col1",
990
+ "col_conflict_x",
991
+ "col_left",
992
+ "col_conflict_y",
993
+ "col_right",
994
+ "_merge",
995
+ ]
996
+ ]
997
+
998
+ test = merge(df1, df2, on="col1", how="outer", indicator=True)
999
+ tm.assert_frame_equal(test, df_result)
1000
+ test = df1.merge(df2, on="col1", how="outer", indicator=True)
1001
+ tm.assert_frame_equal(test, df_result)
1002
+
1003
+ # No side effects
1004
+ tm.assert_frame_equal(df1, df1_copy)
1005
+ tm.assert_frame_equal(df2, df2_copy)
1006
+
1007
+ # Check with custom name
1008
+ df_result_custom_name = df_result
1009
+ df_result_custom_name = df_result_custom_name.rename(
1010
+ columns={"_merge": "custom_name"}
1011
+ )
1012
+
1013
+ test_custom_name = merge(
1014
+ df1, df2, on="col1", how="outer", indicator="custom_name"
1015
+ )
1016
+ tm.assert_frame_equal(test_custom_name, df_result_custom_name)
1017
+ test_custom_name = df1.merge(
1018
+ df2, on="col1", how="outer", indicator="custom_name"
1019
+ )
1020
+ tm.assert_frame_equal(test_custom_name, df_result_custom_name)
1021
+
1022
+ def test_merge_indicator_arg_validation(self, dfs_for_indicator):
1023
+ # Check only accepts strings and booleans
1024
+ df1, df2 = dfs_for_indicator
1025
+
1026
+ msg = "indicator option can only accept boolean or string arguments"
1027
+ with pytest.raises(ValueError, match=msg):
1028
+ merge(df1, df2, on="col1", how="outer", indicator=5)
1029
+ with pytest.raises(ValueError, match=msg):
1030
+ df1.merge(df2, on="col1", how="outer", indicator=5)
1031
+
1032
+ def test_merge_indicator_result_integrity(self, dfs_for_indicator):
1033
+ # Check result integrity
1034
+ df1, df2 = dfs_for_indicator
1035
+
1036
+ test2 = merge(df1, df2, on="col1", how="left", indicator=True)
1037
+ assert (test2._merge != "right_only").all()
1038
+ test2 = df1.merge(df2, on="col1", how="left", indicator=True)
1039
+ assert (test2._merge != "right_only").all()
1040
+
1041
+ test3 = merge(df1, df2, on="col1", how="right", indicator=True)
1042
+ assert (test3._merge != "left_only").all()
1043
+ test3 = df1.merge(df2, on="col1", how="right", indicator=True)
1044
+ assert (test3._merge != "left_only").all()
1045
+
1046
+ test4 = merge(df1, df2, on="col1", how="inner", indicator=True)
1047
+ assert (test4._merge == "both").all()
1048
+ test4 = df1.merge(df2, on="col1", how="inner", indicator=True)
1049
+ assert (test4._merge == "both").all()
1050
+
1051
+ def test_merge_indicator_invalid(self, dfs_for_indicator):
1052
+ # Check if working name in df
1053
+ df1, _ = dfs_for_indicator
1054
+
1055
+ for i in ["_right_indicator", "_left_indicator", "_merge"]:
1056
+ df_badcolumn = DataFrame({"col1": [1, 2], i: [2, 2]})
1057
+
1058
+ msg = (
1059
+ "Cannot use `indicator=True` option when data contains a "
1060
+ f"column named {i}|"
1061
+ "Cannot use name of an existing column for indicator column"
1062
+ )
1063
+ with pytest.raises(ValueError, match=msg):
1064
+ merge(df1, df_badcolumn, on="col1", how="outer", indicator=True)
1065
+ with pytest.raises(ValueError, match=msg):
1066
+ df1.merge(df_badcolumn, on="col1", how="outer", indicator=True)
1067
+
1068
+ # Check for name conflict with custom name
1069
+ df_badcolumn = DataFrame({"col1": [1, 2], "custom_column_name": [2, 2]})
1070
+
1071
+ msg = "Cannot use name of an existing column for indicator column"
1072
+ with pytest.raises(ValueError, match=msg):
1073
+ merge(
1074
+ df1,
1075
+ df_badcolumn,
1076
+ on="col1",
1077
+ how="outer",
1078
+ indicator="custom_column_name",
1079
+ )
1080
+ with pytest.raises(ValueError, match=msg):
1081
+ df1.merge(
1082
+ df_badcolumn, on="col1", how="outer", indicator="custom_column_name"
1083
+ )
1084
+
1085
+ def test_merge_indicator_multiple_columns(self):
1086
+ # Merge on multiple columns
1087
+ df3 = DataFrame({"col1": [0, 1], "col2": ["a", "b"]})
1088
+
1089
+ df4 = DataFrame({"col1": [1, 1, 3], "col2": ["b", "x", "y"]})
1090
+
1091
+ hand_coded_result = DataFrame(
1092
+ {"col1": [0, 1, 1, 3], "col2": ["a", "b", "x", "y"]}
1093
+ )
1094
+ hand_coded_result["_merge"] = Categorical(
1095
+ ["left_only", "both", "right_only", "right_only"],
1096
+ categories=["left_only", "right_only", "both"],
1097
+ )
1098
+
1099
+ test5 = merge(df3, df4, on=["col1", "col2"], how="outer", indicator=True)
1100
+ tm.assert_frame_equal(test5, hand_coded_result)
1101
+ test5 = df3.merge(df4, on=["col1", "col2"], how="outer", indicator=True)
1102
+ tm.assert_frame_equal(test5, hand_coded_result)
1103
+
1104
+ def test_validation(self):
1105
+ left = DataFrame(
1106
+ {"a": ["a", "b", "c", "d"], "b": ["cat", "dog", "weasel", "horse"]},
1107
+ index=range(4),
1108
+ )
1109
+
1110
+ right = DataFrame(
1111
+ {
1112
+ "a": ["a", "b", "c", "d", "e"],
1113
+ "c": ["meow", "bark", "um... weasel noise?", "nay", "chirp"],
1114
+ },
1115
+ index=range(5),
1116
+ )
1117
+
1118
+ # Make sure no side effects.
1119
+ left_copy = left.copy()
1120
+ right_copy = right.copy()
1121
+
1122
+ result = merge(left, right, left_index=True, right_index=True, validate="1:1")
1123
+ tm.assert_frame_equal(left, left_copy)
1124
+ tm.assert_frame_equal(right, right_copy)
1125
+
1126
+ # make sure merge still correct
1127
+ expected = DataFrame(
1128
+ {
1129
+ "a_x": ["a", "b", "c", "d"],
1130
+ "b": ["cat", "dog", "weasel", "horse"],
1131
+ "a_y": ["a", "b", "c", "d"],
1132
+ "c": ["meow", "bark", "um... weasel noise?", "nay"],
1133
+ },
1134
+ index=range(4),
1135
+ columns=["a_x", "b", "a_y", "c"],
1136
+ )
1137
+
1138
+ result = merge(
1139
+ left, right, left_index=True, right_index=True, validate="one_to_one"
1140
+ )
1141
+ tm.assert_frame_equal(result, expected)
1142
+
1143
+ expected_2 = DataFrame(
1144
+ {
1145
+ "a": ["a", "b", "c", "d"],
1146
+ "b": ["cat", "dog", "weasel", "horse"],
1147
+ "c": ["meow", "bark", "um... weasel noise?", "nay"],
1148
+ },
1149
+ index=range(4),
1150
+ )
1151
+
1152
+ result = merge(left, right, on="a", validate="1:1")
1153
+ tm.assert_frame_equal(left, left_copy)
1154
+ tm.assert_frame_equal(right, right_copy)
1155
+ tm.assert_frame_equal(result, expected_2)
1156
+
1157
+ result = merge(left, right, on="a", validate="one_to_one")
1158
+ tm.assert_frame_equal(result, expected_2)
1159
+
1160
+ # One index, one column
1161
+ expected_3 = DataFrame(
1162
+ {
1163
+ "b": ["cat", "dog", "weasel", "horse"],
1164
+ "a": ["a", "b", "c", "d"],
1165
+ "c": ["meow", "bark", "um... weasel noise?", "nay"],
1166
+ },
1167
+ columns=["b", "a", "c"],
1168
+ index=range(4),
1169
+ )
1170
+
1171
+ left_index_reset = left.set_index("a")
1172
+ result = merge(
1173
+ left_index_reset,
1174
+ right,
1175
+ left_index=True,
1176
+ right_on="a",
1177
+ validate="one_to_one",
1178
+ )
1179
+ tm.assert_frame_equal(result, expected_3)
1180
+
1181
+ # Dups on right
1182
+ right_w_dups = concat([right, DataFrame({"a": ["e"], "c": ["moo"]}, index=[4])])
1183
+ merge(
1184
+ left,
1185
+ right_w_dups,
1186
+ left_index=True,
1187
+ right_index=True,
1188
+ validate="one_to_many",
1189
+ )
1190
+
1191
+ msg = "Merge keys are not unique in right dataset; not a one-to-one merge"
1192
+ with pytest.raises(MergeError, match=msg):
1193
+ merge(
1194
+ left,
1195
+ right_w_dups,
1196
+ left_index=True,
1197
+ right_index=True,
1198
+ validate="one_to_one",
1199
+ )
1200
+
1201
+ with pytest.raises(MergeError, match=msg):
1202
+ merge(left, right_w_dups, on="a", validate="one_to_one")
1203
+
1204
+ # Dups on left
1205
+ left_w_dups = concat(
1206
+ [left, DataFrame({"a": ["a"], "c": ["cow"]}, index=[3])], sort=True
1207
+ )
1208
+ merge(
1209
+ left_w_dups,
1210
+ right,
1211
+ left_index=True,
1212
+ right_index=True,
1213
+ validate="many_to_one",
1214
+ )
1215
+
1216
+ msg = "Merge keys are not unique in left dataset; not a one-to-one merge"
1217
+ with pytest.raises(MergeError, match=msg):
1218
+ merge(
1219
+ left_w_dups,
1220
+ right,
1221
+ left_index=True,
1222
+ right_index=True,
1223
+ validate="one_to_one",
1224
+ )
1225
+
1226
+ with pytest.raises(MergeError, match=msg):
1227
+ merge(left_w_dups, right, on="a", validate="one_to_one")
1228
+
1229
+ # Dups on both
1230
+ merge(left_w_dups, right_w_dups, on="a", validate="many_to_many")
1231
+
1232
+ msg = "Merge keys are not unique in right dataset; not a many-to-one merge"
1233
+ with pytest.raises(MergeError, match=msg):
1234
+ merge(
1235
+ left_w_dups,
1236
+ right_w_dups,
1237
+ left_index=True,
1238
+ right_index=True,
1239
+ validate="many_to_one",
1240
+ )
1241
+
1242
+ msg = "Merge keys are not unique in left dataset; not a one-to-many merge"
1243
+ with pytest.raises(MergeError, match=msg):
1244
+ merge(left_w_dups, right_w_dups, on="a", validate="one_to_many")
1245
+
1246
+ # Check invalid arguments
1247
+ msg = (
1248
+ '"jibberish" is not a valid argument. '
1249
+ "Valid arguments are:\n"
1250
+ '- "1:1"\n'
1251
+ '- "1:m"\n'
1252
+ '- "m:1"\n'
1253
+ '- "m:m"\n'
1254
+ '- "one_to_one"\n'
1255
+ '- "one_to_many"\n'
1256
+ '- "many_to_one"\n'
1257
+ '- "many_to_many"'
1258
+ )
1259
+ with pytest.raises(ValueError, match=msg):
1260
+ merge(left, right, on="a", validate="jibberish")
1261
+
1262
+ # Two column merge, dups in both, but jointly no dups.
1263
+ left = DataFrame(
1264
+ {
1265
+ "a": ["a", "a", "b", "b"],
1266
+ "b": [0, 1, 0, 1],
1267
+ "c": ["cat", "dog", "weasel", "horse"],
1268
+ },
1269
+ index=range(4),
1270
+ )
1271
+
1272
+ right = DataFrame(
1273
+ {
1274
+ "a": ["a", "a", "b"],
1275
+ "b": [0, 1, 0],
1276
+ "d": ["meow", "bark", "um... weasel noise?"],
1277
+ },
1278
+ index=range(3),
1279
+ )
1280
+
1281
+ expected_multi = DataFrame(
1282
+ {
1283
+ "a": ["a", "a", "b"],
1284
+ "b": [0, 1, 0],
1285
+ "c": ["cat", "dog", "weasel"],
1286
+ "d": ["meow", "bark", "um... weasel noise?"],
1287
+ },
1288
+ index=range(3),
1289
+ )
1290
+
1291
+ msg = (
1292
+ "Merge keys are not unique in either left or right dataset; "
1293
+ "not a one-to-one merge"
1294
+ )
1295
+ with pytest.raises(MergeError, match=msg):
1296
+ merge(left, right, on="a", validate="1:1")
1297
+
1298
+ result = merge(left, right, on=["a", "b"], validate="1:1")
1299
+ tm.assert_frame_equal(result, expected_multi)
1300
+
1301
+ def test_merge_two_empty_df_no_division_error(self):
1302
+ # GH17776, PR #17846
1303
+ a = DataFrame({"a": [], "b": [], "c": []})
1304
+ with np.errstate(divide="raise"):
1305
+ merge(a, a, on=("a", "b"))
1306
+
1307
+ @pytest.mark.parametrize("how", ["right", "outer"])
1308
+ @pytest.mark.parametrize(
1309
+ "index,expected_index",
1310
+ [
1311
+ (
1312
+ CategoricalIndex([1, 2, 4]),
1313
+ CategoricalIndex([1, 2, 4, None, None, None]),
1314
+ ),
1315
+ (
1316
+ DatetimeIndex(["2001-01-01", "2002-02-02", "2003-03-03"]),
1317
+ DatetimeIndex(
1318
+ ["2001-01-01", "2002-02-02", "2003-03-03", pd.NaT, pd.NaT, pd.NaT]
1319
+ ),
1320
+ ),
1321
+ *[
1322
+ (
1323
+ Index([1, 2, 3], dtype=dtyp),
1324
+ Index([1, 2, 3, None, None, None], dtype=np.float64),
1325
+ )
1326
+ for dtyp in tm.ALL_REAL_NUMPY_DTYPES
1327
+ ],
1328
+ (
1329
+ IntervalIndex.from_tuples([(1, 2), (2, 3), (3, 4)]),
1330
+ IntervalIndex.from_tuples(
1331
+ [(1, 2), (2, 3), (3, 4), np.nan, np.nan, np.nan]
1332
+ ),
1333
+ ),
1334
+ (
1335
+ PeriodIndex(["2001-01-01", "2001-01-02", "2001-01-03"], freq="D"),
1336
+ PeriodIndex(
1337
+ ["2001-01-01", "2001-01-02", "2001-01-03", pd.NaT, pd.NaT, pd.NaT],
1338
+ freq="D",
1339
+ ),
1340
+ ),
1341
+ (
1342
+ TimedeltaIndex(["1d", "2d", "3d"]),
1343
+ TimedeltaIndex(["1d", "2d", "3d", pd.NaT, pd.NaT, pd.NaT]),
1344
+ ),
1345
+ ],
1346
+ )
1347
+ def test_merge_on_index_with_more_values(self, how, index, expected_index):
1348
+ # GH 24212
1349
+ # pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that
1350
+ # -1 is interpreted as a missing value instead of the last element
1351
+ df1 = DataFrame({"a": [0, 1, 2], "key": [0, 1, 2]}, index=index)
1352
+ df2 = DataFrame({"b": [0, 1, 2, 3, 4, 5]})
1353
+ result = df1.merge(df2, left_on="key", right_index=True, how=how)
1354
+ expected = DataFrame(
1355
+ [
1356
+ [0, 0, 0],
1357
+ [1, 1, 1],
1358
+ [2, 2, 2],
1359
+ [np.nan, 3, 3],
1360
+ [np.nan, 4, 4],
1361
+ [np.nan, 5, 5],
1362
+ ],
1363
+ columns=["a", "key", "b"],
1364
+ )
1365
+ expected.set_index(expected_index, inplace=True)
1366
+ tm.assert_frame_equal(result, expected)
1367
+
1368
+ def test_merge_right_index_right(self):
1369
+ # Note: the expected output here is probably incorrect.
1370
+ # See https://github.com/pandas-dev/pandas/issues/17257 for more.
1371
+ # We include this as a regression test for GH-24897.
1372
+ left = DataFrame({"a": [1, 2, 3], "key": [0, 1, 1]})
1373
+ right = DataFrame({"b": [1, 2, 3]})
1374
+
1375
+ expected = DataFrame(
1376
+ {"a": [1, 2, 3, None], "key": [0, 1, 1, 2], "b": [1, 2, 2, 3]},
1377
+ columns=["a", "key", "b"],
1378
+ index=[0, 1, 2, np.nan],
1379
+ )
1380
+ result = left.merge(right, left_on="key", right_index=True, how="right")
1381
+ tm.assert_frame_equal(result, expected)
1382
+
1383
+ @pytest.mark.parametrize("how", ["left", "right"])
1384
+ def test_merge_preserves_row_order(self, how):
1385
+ # GH 27453
1386
+ left_df = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]})
1387
+ right_df = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]})
1388
+ result = left_df.merge(right_df, on=["animal", "max_speed"], how=how)
1389
+ if how == "right":
1390
+ expected = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]})
1391
+ else:
1392
+ expected = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]})
1393
+ tm.assert_frame_equal(result, expected)
1394
+
1395
+ def test_merge_take_missing_values_from_index_of_other_dtype(self):
1396
+ # GH 24212
1397
+ left = DataFrame(
1398
+ {
1399
+ "a": [1, 2, 3],
1400
+ "key": Categorical(["a", "a", "b"], categories=list("abc")),
1401
+ }
1402
+ )
1403
+ right = DataFrame({"b": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"]))
1404
+ result = left.merge(right, left_on="key", right_index=True, how="right")
1405
+ expected = DataFrame(
1406
+ {
1407
+ "a": [1, 2, 3, None],
1408
+ "key": Categorical(["a", "a", "b", "c"]),
1409
+ "b": [1, 1, 2, 3],
1410
+ },
1411
+ index=[0, 1, 2, np.nan],
1412
+ )
1413
+ expected = expected.reindex(columns=["a", "key", "b"])
1414
+ tm.assert_frame_equal(result, expected)
1415
+
1416
+ def test_merge_readonly(self):
1417
+ # https://github.com/pandas-dev/pandas/issues/27943
1418
+ data1 = DataFrame(
1419
+ np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"]
1420
+ )
1421
+ data2 = DataFrame(
1422
+ np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"]
1423
+ )
1424
+
1425
+ # make each underlying block array / column array read-only
1426
+ for arr in data1._mgr.arrays:
1427
+ arr.flags.writeable = False
1428
+
1429
+ data1.merge(data2) # no error
1430
+
1431
+
1432
+ def _check_merge(x, y):
1433
+ for how in ["inner", "left", "outer"]:
1434
+ result = x.join(y, how=how)
1435
+
1436
+ expected = merge(x.reset_index(), y.reset_index(), how=how, sort=True)
1437
+ expected = expected.set_index("index")
1438
+
1439
+ # TODO check_names on merge?
1440
+ tm.assert_frame_equal(result, expected, check_names=False)
1441
+
1442
+
1443
+ class TestMergeDtypes:
1444
+ @pytest.mark.parametrize(
1445
+ "right_vals", [["foo", "bar"], Series(["foo", "bar"]).astype("category")]
1446
+ )
1447
+ def test_different(self, right_vals):
1448
+ left = DataFrame(
1449
+ {
1450
+ "A": ["foo", "bar"],
1451
+ "B": Series(["foo", "bar"]).astype("category"),
1452
+ "C": [1, 2],
1453
+ "D": [1.0, 2.0],
1454
+ "E": Series([1, 2], dtype="uint64"),
1455
+ "F": Series([1, 2], dtype="int32"),
1456
+ }
1457
+ )
1458
+ right = DataFrame({"A": right_vals})
1459
+
1460
+ # GH 9780
1461
+ # We allow merging on object and categorical cols and cast
1462
+ # categorical cols to object
1463
+ result = merge(left, right, on="A")
1464
+ assert is_object_dtype(result.A.dtype)
1465
+
1466
+ @pytest.mark.parametrize(
1467
+ "d1", [np.int64, np.int32, np.intc, np.int16, np.int8, np.uint8]
1468
+ )
1469
+ @pytest.mark.parametrize("d2", [np.int64, np.float64, np.float32, np.float16])
1470
+ def test_join_multi_dtypes(self, d1, d2):
1471
+ dtype1 = np.dtype(d1)
1472
+ dtype2 = np.dtype(d2)
1473
+
1474
+ left = DataFrame(
1475
+ {
1476
+ "k1": np.array([0, 1, 2] * 8, dtype=dtype1),
1477
+ "k2": ["foo", "bar"] * 12,
1478
+ "v": np.array(np.arange(24), dtype=np.int64),
1479
+ }
1480
+ )
1481
+
1482
+ index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")])
1483
+ right = DataFrame({"v2": np.array([5, 7], dtype=dtype2)}, index=index)
1484
+
1485
+ result = left.join(right, on=["k1", "k2"])
1486
+
1487
+ expected = left.copy()
1488
+
1489
+ if dtype2.kind == "i":
1490
+ dtype2 = np.dtype("float64")
1491
+ expected["v2"] = np.array(np.nan, dtype=dtype2)
1492
+ expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5
1493
+ expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7
1494
+
1495
+ tm.assert_frame_equal(result, expected)
1496
+
1497
+ result = left.join(right, on=["k1", "k2"], sort=True)
1498
+ expected.sort_values(["k1", "k2"], kind="mergesort", inplace=True)
1499
+ tm.assert_frame_equal(result, expected)
1500
+
1501
+ @pytest.mark.parametrize(
1502
+ "int_vals, float_vals, exp_vals",
1503
+ [
1504
+ ([1, 2, 3], [1.0, 2.0, 3.0], {"X": [1, 2, 3], "Y": [1.0, 2.0, 3.0]}),
1505
+ ([1, 2, 3], [1.0, 3.0], {"X": [1, 3], "Y": [1.0, 3.0]}),
1506
+ ([1, 2], [1.0, 2.0, 3.0], {"X": [1, 2], "Y": [1.0, 2.0]}),
1507
+ ],
1508
+ )
1509
+ def test_merge_on_ints_floats(self, int_vals, float_vals, exp_vals):
1510
+ # GH 16572
1511
+ # Check that float column is not cast to object if
1512
+ # merging on float and int columns
1513
+ A = DataFrame({"X": int_vals})
1514
+ B = DataFrame({"Y": float_vals})
1515
+ expected = DataFrame(exp_vals)
1516
+
1517
+ result = A.merge(B, left_on="X", right_on="Y")
1518
+ tm.assert_frame_equal(result, expected)
1519
+
1520
+ result = B.merge(A, left_on="Y", right_on="X")
1521
+ tm.assert_frame_equal(result, expected[["Y", "X"]])
1522
+
1523
+ def test_merge_key_dtype_cast(self):
1524
+ # GH 17044
1525
+ df1 = DataFrame({"key": [1.0, 2.0], "v1": [10, 20]}, columns=["key", "v1"])
1526
+ df2 = DataFrame({"key": [2], "v2": [200]}, columns=["key", "v2"])
1527
+ result = df1.merge(df2, on="key", how="left")
1528
+ expected = DataFrame(
1529
+ {"key": [1.0, 2.0], "v1": [10, 20], "v2": [np.nan, 200.0]},
1530
+ columns=["key", "v1", "v2"],
1531
+ )
1532
+ tm.assert_frame_equal(result, expected)
1533
+
1534
+ def test_merge_on_ints_floats_warning(self):
1535
+ # GH 16572
1536
+ # merge will produce a warning when merging on int and
1537
+ # float columns where the float values are not exactly
1538
+ # equal to their int representation
1539
+ A = DataFrame({"X": [1, 2, 3]})
1540
+ B = DataFrame({"Y": [1.1, 2.5, 3.0]})
1541
+ expected = DataFrame({"X": [3], "Y": [3.0]})
1542
+
1543
+ with tm.assert_produces_warning(UserWarning):
1544
+ result = A.merge(B, left_on="X", right_on="Y")
1545
+ tm.assert_frame_equal(result, expected)
1546
+
1547
+ with tm.assert_produces_warning(UserWarning):
1548
+ result = B.merge(A, left_on="Y", right_on="X")
1549
+ tm.assert_frame_equal(result, expected[["Y", "X"]])
1550
+
1551
+ # test no warning if float has NaNs
1552
+ B = DataFrame({"Y": [np.nan, np.nan, 3.0]})
1553
+
1554
+ with tm.assert_produces_warning(None):
1555
+ result = B.merge(A, left_on="Y", right_on="X")
1556
+ tm.assert_frame_equal(result, expected[["Y", "X"]])
1557
+
1558
+ def test_merge_incompat_infer_boolean_object(self):
1559
+ # GH21119: bool + object bool merge OK
1560
+ df1 = DataFrame({"key": Series([True, False], dtype=object)})
1561
+ df2 = DataFrame({"key": [True, False]})
1562
+
1563
+ expected = DataFrame({"key": [True, False]}, dtype=object)
1564
+ result = merge(df1, df2, on="key")
1565
+ tm.assert_frame_equal(result, expected)
1566
+ result = merge(df2, df1, on="key")
1567
+ tm.assert_frame_equal(result, expected)
1568
+
1569
+ def test_merge_incompat_infer_boolean_object_with_missing(self):
1570
+ # GH21119: bool + object bool merge OK
1571
+ # with missing value
1572
+ df1 = DataFrame({"key": Series([True, False, np.nan], dtype=object)})
1573
+ df2 = DataFrame({"key": [True, False]})
1574
+
1575
+ expected = DataFrame({"key": [True, False]}, dtype=object)
1576
+ result = merge(df1, df2, on="key")
1577
+ tm.assert_frame_equal(result, expected)
1578
+ result = merge(df2, df1, on="key")
1579
+ tm.assert_frame_equal(result, expected)
1580
+
1581
+ @pytest.mark.parametrize(
1582
+ "df1_vals, df2_vals",
1583
+ [
1584
+ # merge on category coerces to object
1585
+ ([0, 1, 2], Series(["a", "b", "a"]).astype("category")),
1586
+ ([0.0, 1.0, 2.0], Series(["a", "b", "a"]).astype("category")),
1587
+ # no not infer
1588
+ ([0, 1], Series([False, True], dtype=object)),
1589
+ ([0, 1], Series([False, True], dtype=bool)),
1590
+ ],
1591
+ )
1592
+ def test_merge_incompat_dtypes_are_ok(self, df1_vals, df2_vals):
1593
+ # these are explicitly allowed incompat merges, that pass thru
1594
+ # the result type is dependent on if the values on the rhs are
1595
+ # inferred, otherwise these will be coerced to object
1596
+
1597
+ df1 = DataFrame({"A": df1_vals})
1598
+ df2 = DataFrame({"A": df2_vals})
1599
+
1600
+ result = merge(df1, df2, on=["A"])
1601
+ assert is_object_dtype(result.A.dtype)
1602
+ result = merge(df2, df1, on=["A"])
1603
+ assert is_object_dtype(result.A.dtype)
1604
+
1605
+ @pytest.mark.parametrize(
1606
+ "df1_vals, df2_vals",
1607
+ [
1608
+ # do not infer to numeric
1609
+ (Series([1, 2], dtype="uint64"), ["a", "b", "c"]),
1610
+ (Series([1, 2], dtype="int32"), ["a", "b", "c"]),
1611
+ ([0, 1, 2], ["0", "1", "2"]),
1612
+ ([0.0, 1.0, 2.0], ["0", "1", "2"]),
1613
+ ([0, 1, 2], ["0", "1", "2"]),
1614
+ (
1615
+ pd.date_range("1/1/2011", periods=2, freq="D"),
1616
+ ["2011-01-01", "2011-01-02"],
1617
+ ),
1618
+ (pd.date_range("1/1/2011", periods=2, freq="D"), [0, 1]),
1619
+ (pd.date_range("1/1/2011", periods=2, freq="D"), [0.0, 1.0]),
1620
+ (
1621
+ pd.date_range("20130101", periods=3),
1622
+ pd.date_range("20130101", periods=3, tz="US/Eastern"),
1623
+ ),
1624
+ ],
1625
+ )
1626
+ def test_merge_incompat_dtypes_error(self, df1_vals, df2_vals):
1627
+ # GH 9780, GH 15800
1628
+ # Raise a ValueError when a user tries to merge on
1629
+ # dtypes that are incompatible (e.g., obj and int/float)
1630
+
1631
+ df1 = DataFrame({"A": df1_vals})
1632
+ df2 = DataFrame({"A": df2_vals})
1633
+
1634
+ msg = (
1635
+ f"You are trying to merge on {df1['A'].dtype} and "
1636
+ f"{df2['A'].dtype} columns. If you wish to proceed "
1637
+ "you should use pd.concat"
1638
+ )
1639
+ msg = re.escape(msg)
1640
+ with pytest.raises(ValueError, match=msg):
1641
+ merge(df1, df2, on=["A"])
1642
+
1643
+ # Check that error still raised when swapping order of dataframes
1644
+ msg = (
1645
+ f"You are trying to merge on {df2['A'].dtype} and "
1646
+ f"{df1['A'].dtype} columns. If you wish to proceed "
1647
+ "you should use pd.concat"
1648
+ )
1649
+ msg = re.escape(msg)
1650
+ with pytest.raises(ValueError, match=msg):
1651
+ merge(df2, df1, on=["A"])
1652
+
1653
+ @pytest.mark.parametrize(
1654
+ "expected_data, how",
1655
+ [
1656
+ ([1, 2], "outer"),
1657
+ ([], "inner"),
1658
+ ([2], "right"),
1659
+ ([1], "left"),
1660
+ ],
1661
+ )
1662
+ def test_merge_EA_dtype(self, any_numeric_ea_dtype, how, expected_data):
1663
+ # GH#40073
1664
+ d1 = DataFrame([(1,)], columns=["id"], dtype=any_numeric_ea_dtype)
1665
+ d2 = DataFrame([(2,)], columns=["id"], dtype=any_numeric_ea_dtype)
1666
+ result = merge(d1, d2, how=how)
1667
+ exp_index = RangeIndex(len(expected_data))
1668
+ expected = DataFrame(
1669
+ expected_data, index=exp_index, columns=["id"], dtype=any_numeric_ea_dtype
1670
+ )
1671
+ tm.assert_frame_equal(result, expected)
1672
+
1673
+ @pytest.mark.parametrize(
1674
+ "expected_data, how",
1675
+ [
1676
+ (["a", "b"], "outer"),
1677
+ ([], "inner"),
1678
+ (["b"], "right"),
1679
+ (["a"], "left"),
1680
+ ],
1681
+ )
1682
+ def test_merge_string_dtype(self, how, expected_data, any_string_dtype):
1683
+ # GH#40073
1684
+ d1 = DataFrame([("a",)], columns=["id"], dtype=any_string_dtype)
1685
+ d2 = DataFrame([("b",)], columns=["id"], dtype=any_string_dtype)
1686
+ result = merge(d1, d2, how=how)
1687
+ exp_idx = RangeIndex(len(expected_data))
1688
+ expected = DataFrame(
1689
+ expected_data, index=exp_idx, columns=["id"], dtype=any_string_dtype
1690
+ )
1691
+ tm.assert_frame_equal(result, expected)
1692
+
1693
+ @pytest.mark.parametrize(
1694
+ "how, expected_data",
1695
+ [
1696
+ ("inner", [[True, 1, 4], [False, 5, 3]]),
1697
+ ("outer", [[True, 1, 4], [False, 5, 3]]),
1698
+ ("left", [[True, 1, 4], [False, 5, 3]]),
1699
+ ("right", [[False, 5, 3], [True, 1, 4]]),
1700
+ ],
1701
+ )
1702
+ def test_merge_bool_dtype(self, how, expected_data):
1703
+ # GH#40073
1704
+ df1 = DataFrame({"A": [True, False], "B": [1, 5]})
1705
+ df2 = DataFrame({"A": [False, True], "C": [3, 4]})
1706
+ result = merge(df1, df2, how=how)
1707
+ expected = DataFrame(expected_data, columns=["A", "B", "C"])
1708
+ tm.assert_frame_equal(result, expected)
1709
+
1710
+ def test_merge_ea_with_string(self, join_type, string_dtype):
1711
+ # GH 43734 Avoid the use of `assign` with multi-index
1712
+ df1 = DataFrame(
1713
+ data={
1714
+ ("lvl0", "lvl1-a"): ["1", "2", "3", "4", None],
1715
+ ("lvl0", "lvl1-b"): ["4", "5", "6", "7", "8"],
1716
+ },
1717
+ dtype=pd.StringDtype(),
1718
+ )
1719
+ df1_copy = df1.copy()
1720
+ df2 = DataFrame(
1721
+ data={
1722
+ ("lvl0", "lvl1-a"): ["1", "2", "3", pd.NA, "5"],
1723
+ ("lvl0", "lvl1-c"): ["7", "8", "9", pd.NA, "11"],
1724
+ },
1725
+ dtype=string_dtype,
1726
+ )
1727
+ df2_copy = df2.copy()
1728
+ merged = merge(left=df1, right=df2, on=[("lvl0", "lvl1-a")], how=join_type)
1729
+
1730
+ # No change in df1 and df2
1731
+ tm.assert_frame_equal(df1, df1_copy)
1732
+ tm.assert_frame_equal(df2, df2_copy)
1733
+
1734
+ # Check the expected types for the merged data frame
1735
+ expected = Series(
1736
+ [np.dtype("O"), pd.StringDtype(), np.dtype("O")],
1737
+ index=MultiIndex.from_tuples(
1738
+ [("lvl0", "lvl1-a"), ("lvl0", "lvl1-b"), ("lvl0", "lvl1-c")]
1739
+ ),
1740
+ )
1741
+ tm.assert_series_equal(merged.dtypes, expected)
1742
+
1743
+ @pytest.mark.parametrize(
1744
+ "left_empty, how, exp",
1745
+ [
1746
+ (False, "left", "left"),
1747
+ (False, "right", "empty"),
1748
+ (False, "inner", "empty"),
1749
+ (False, "outer", "left"),
1750
+ (False, "cross", "empty_cross"),
1751
+ (True, "left", "empty"),
1752
+ (True, "right", "right"),
1753
+ (True, "inner", "empty"),
1754
+ (True, "outer", "right"),
1755
+ (True, "cross", "empty_cross"),
1756
+ ],
1757
+ )
1758
+ def test_merge_empty(self, left_empty, how, exp):
1759
+ left = DataFrame({"A": [2, 1], "B": [3, 4]})
1760
+ right = DataFrame({"A": [1], "C": [5]}, dtype="int64")
1761
+
1762
+ if left_empty:
1763
+ left = left.head(0)
1764
+ else:
1765
+ right = right.head(0)
1766
+
1767
+ result = left.merge(right, how=how)
1768
+
1769
+ if exp == "left":
1770
+ expected = DataFrame({"A": [2, 1], "B": [3, 4], "C": [np.nan, np.nan]})
1771
+ elif exp == "right":
1772
+ expected = DataFrame({"B": [np.nan], "A": [1], "C": [5]})
1773
+ elif exp == "empty":
1774
+ expected = DataFrame(columns=["A", "B", "C"], dtype="int64")
1775
+ if left_empty:
1776
+ expected = expected[["B", "A", "C"]]
1777
+ elif exp == "empty_cross":
1778
+ expected = DataFrame(columns=["A_x", "B", "A_y", "C"], dtype="int64")
1779
+
1780
+ tm.assert_frame_equal(result, expected)
1781
+
1782
+
1783
+ @pytest.fixture
1784
+ def left():
1785
+ np.random.seed(1234)
1786
+ return DataFrame(
1787
+ {
1788
+ "X": Series(np.random.choice(["foo", "bar"], size=(10,))).astype(
1789
+ CDT(["foo", "bar"])
1790
+ ),
1791
+ "Y": np.random.choice(["one", "two", "three"], size=(10,)),
1792
+ }
1793
+ )
1794
+
1795
+
1796
+ @pytest.fixture
1797
+ def right():
1798
+ np.random.seed(1234)
1799
+ return DataFrame(
1800
+ {"X": Series(["foo", "bar"]).astype(CDT(["foo", "bar"])), "Z": [1, 2]}
1801
+ )
1802
+
1803
+
1804
+ class TestMergeCategorical:
1805
+ def test_identical(self, left):
1806
+ # merging on the same, should preserve dtypes
1807
+ merged = merge(left, left, on="X")
1808
+ result = merged.dtypes.sort_index()
1809
+ expected = Series(
1810
+ [CategoricalDtype(categories=["foo", "bar"]), np.dtype("O"), np.dtype("O")],
1811
+ index=["X", "Y_x", "Y_y"],
1812
+ )
1813
+ tm.assert_series_equal(result, expected)
1814
+
1815
+ def test_basic(self, left, right):
1816
+ # we have matching Categorical dtypes in X
1817
+ # so should preserve the merged column
1818
+ merged = merge(left, right, on="X")
1819
+ result = merged.dtypes.sort_index()
1820
+ expected = Series(
1821
+ [
1822
+ CategoricalDtype(categories=["foo", "bar"]),
1823
+ np.dtype("O"),
1824
+ np.dtype("int64"),
1825
+ ],
1826
+ index=["X", "Y", "Z"],
1827
+ )
1828
+ tm.assert_series_equal(result, expected)
1829
+
1830
+ def test_merge_categorical(self):
1831
+ # GH 9426
1832
+
1833
+ right = DataFrame(
1834
+ {
1835
+ "c": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e"},
1836
+ "d": {0: "null", 1: "null", 2: "null", 3: "null", 4: "null"},
1837
+ }
1838
+ )
1839
+ left = DataFrame(
1840
+ {
1841
+ "a": {0: "f", 1: "f", 2: "f", 3: "f", 4: "f"},
1842
+ "b": {0: "g", 1: "g", 2: "g", 3: "g", 4: "g"},
1843
+ }
1844
+ )
1845
+ df = merge(left, right, how="left", left_on="b", right_on="c")
1846
+
1847
+ # object-object
1848
+ expected = df.copy()
1849
+
1850
+ # object-cat
1851
+ # note that we propagate the category
1852
+ # because we don't have any matching rows
1853
+ cright = right.copy()
1854
+ cright["d"] = cright["d"].astype("category")
1855
+ result = merge(left, cright, how="left", left_on="b", right_on="c")
1856
+ expected["d"] = expected["d"].astype(CategoricalDtype(["null"]))
1857
+ tm.assert_frame_equal(result, expected)
1858
+
1859
+ # cat-object
1860
+ cleft = left.copy()
1861
+ cleft["b"] = cleft["b"].astype("category")
1862
+ result = merge(cleft, cright, how="left", left_on="b", right_on="c")
1863
+ tm.assert_frame_equal(result, expected)
1864
+
1865
+ # cat-cat
1866
+ cright = right.copy()
1867
+ cright["d"] = cright["d"].astype("category")
1868
+ cleft = left.copy()
1869
+ cleft["b"] = cleft["b"].astype("category")
1870
+ result = merge(cleft, cright, how="left", left_on="b", right_on="c")
1871
+ tm.assert_frame_equal(result, expected)
1872
+
1873
+ def tests_merge_categorical_unordered_equal(self):
1874
+ # GH-19551
1875
+ df1 = DataFrame(
1876
+ {
1877
+ "Foo": Categorical(["A", "B", "C"], categories=["A", "B", "C"]),
1878
+ "Left": ["A0", "B0", "C0"],
1879
+ }
1880
+ )
1881
+
1882
+ df2 = DataFrame(
1883
+ {
1884
+ "Foo": Categorical(["C", "B", "A"], categories=["C", "B", "A"]),
1885
+ "Right": ["C1", "B1", "A1"],
1886
+ }
1887
+ )
1888
+ result = merge(df1, df2, on=["Foo"])
1889
+ expected = DataFrame(
1890
+ {
1891
+ "Foo": Categorical(["A", "B", "C"]),
1892
+ "Left": ["A0", "B0", "C0"],
1893
+ "Right": ["A1", "B1", "C1"],
1894
+ }
1895
+ )
1896
+ tm.assert_frame_equal(result, expected)
1897
+
1898
+ @pytest.mark.parametrize("ordered", [True, False])
1899
+ def test_multiindex_merge_with_unordered_categoricalindex(self, ordered):
1900
+ # GH 36973
1901
+ pcat = CategoricalDtype(categories=["P2", "P1"], ordered=ordered)
1902
+ df1 = DataFrame(
1903
+ {
1904
+ "id": ["C", "C", "D"],
1905
+ "p": Categorical(["P2", "P1", "P2"], dtype=pcat),
1906
+ "a": [0, 1, 2],
1907
+ }
1908
+ ).set_index(["id", "p"])
1909
+ df2 = DataFrame(
1910
+ {
1911
+ "id": ["A", "C", "C"],
1912
+ "p": Categorical(["P2", "P2", "P1"], dtype=pcat),
1913
+ "d1": [10, 11, 12],
1914
+ }
1915
+ ).set_index(["id", "p"])
1916
+ result = merge(df1, df2, how="left", left_index=True, right_index=True)
1917
+ expected = DataFrame(
1918
+ {
1919
+ "id": ["C", "C", "D"],
1920
+ "p": Categorical(["P2", "P1", "P2"], dtype=pcat),
1921
+ "a": [0, 1, 2],
1922
+ "d1": [11.0, 12.0, np.nan],
1923
+ }
1924
+ ).set_index(["id", "p"])
1925
+ tm.assert_frame_equal(result, expected)
1926
+
1927
+ def test_other_columns(self, left, right):
1928
+ # non-merge columns should preserve if possible
1929
+ right = right.assign(Z=right.Z.astype("category"))
1930
+
1931
+ merged = merge(left, right, on="X")
1932
+ result = merged.dtypes.sort_index()
1933
+ expected = Series(
1934
+ [
1935
+ CategoricalDtype(categories=["foo", "bar"]),
1936
+ np.dtype("O"),
1937
+ CategoricalDtype(categories=[1, 2]),
1938
+ ],
1939
+ index=["X", "Y", "Z"],
1940
+ )
1941
+ tm.assert_series_equal(result, expected)
1942
+
1943
+ # categories are preserved
1944
+ assert left.X.values._categories_match_up_to_permutation(merged.X.values)
1945
+ assert right.Z.values._categories_match_up_to_permutation(merged.Z.values)
1946
+
1947
+ @pytest.mark.parametrize(
1948
+ "change",
1949
+ [
1950
+ lambda x: x,
1951
+ lambda x: x.astype(CDT(["foo", "bar", "bah"])),
1952
+ lambda x: x.astype(CDT(ordered=True)),
1953
+ ],
1954
+ )
1955
+ def test_dtype_on_merged_different(self, change, join_type, left, right):
1956
+ # our merging columns, X now has 2 different dtypes
1957
+ # so we must be object as a result
1958
+
1959
+ X = change(right.X.astype("object"))
1960
+ right = right.assign(X=X)
1961
+ assert is_categorical_dtype(left.X.values.dtype)
1962
+ # assert not left.X.values._categories_match_up_to_permutation(right.X.values)
1963
+
1964
+ merged = merge(left, right, on="X", how=join_type)
1965
+
1966
+ result = merged.dtypes.sort_index()
1967
+ expected = Series(
1968
+ [np.dtype("O"), np.dtype("O"), np.dtype("int64")], index=["X", "Y", "Z"]
1969
+ )
1970
+ tm.assert_series_equal(result, expected)
1971
+
1972
+ def test_self_join_multiple_categories(self):
1973
+ # GH 16767
1974
+ # non-duplicates should work with multiple categories
1975
+ m = 5
1976
+ df = DataFrame(
1977
+ {
1978
+ "a": ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"] * m,
1979
+ "b": ["t", "w", "x", "y", "z"] * 2 * m,
1980
+ "c": [
1981
+ letter
1982
+ for each in ["m", "n", "u", "p", "o"]
1983
+ for letter in [each] * 2 * m
1984
+ ],
1985
+ "d": [
1986
+ letter
1987
+ for each in [
1988
+ "aa",
1989
+ "bb",
1990
+ "cc",
1991
+ "dd",
1992
+ "ee",
1993
+ "ff",
1994
+ "gg",
1995
+ "hh",
1996
+ "ii",
1997
+ "jj",
1998
+ ]
1999
+ for letter in [each] * m
2000
+ ],
2001
+ }
2002
+ )
2003
+
2004
+ # change them all to categorical variables
2005
+ df = df.apply(lambda x: x.astype("category"))
2006
+
2007
+ # self-join should equal ourselves
2008
+ result = merge(df, df, on=list(df.columns))
2009
+
2010
+ tm.assert_frame_equal(result, df)
2011
+
2012
+ def test_dtype_on_categorical_dates(self):
2013
+ # GH 16900
2014
+ # dates should not be coerced to ints
2015
+
2016
+ df = DataFrame(
2017
+ [[date(2001, 1, 1), 1.1], [date(2001, 1, 2), 1.3]], columns=["date", "num2"]
2018
+ )
2019
+ df["date"] = df["date"].astype("category")
2020
+
2021
+ df2 = DataFrame(
2022
+ [[date(2001, 1, 1), 1.3], [date(2001, 1, 3), 1.4]], columns=["date", "num4"]
2023
+ )
2024
+ df2["date"] = df2["date"].astype("category")
2025
+
2026
+ expected_outer = DataFrame(
2027
+ [
2028
+ [pd.Timestamp("2001-01-01").date(), 1.1, 1.3],
2029
+ [pd.Timestamp("2001-01-02").date(), 1.3, np.nan],
2030
+ [pd.Timestamp("2001-01-03").date(), np.nan, 1.4],
2031
+ ],
2032
+ columns=["date", "num2", "num4"],
2033
+ )
2034
+ result_outer = merge(df, df2, how="outer", on=["date"])
2035
+ tm.assert_frame_equal(result_outer, expected_outer)
2036
+
2037
+ expected_inner = DataFrame(
2038
+ [[pd.Timestamp("2001-01-01").date(), 1.1, 1.3]],
2039
+ columns=["date", "num2", "num4"],
2040
+ )
2041
+ result_inner = merge(df, df2, how="inner", on=["date"])
2042
+ tm.assert_frame_equal(result_inner, expected_inner)
2043
+
2044
+ @pytest.mark.parametrize("ordered", [True, False])
2045
+ @pytest.mark.parametrize(
2046
+ "category_column,categories,expected_categories",
2047
+ [
2048
+ ([False, True, True, False], [True, False], [True, False]),
2049
+ ([2, 1, 1, 2], [1, 2], [1, 2]),
2050
+ (["False", "True", "True", "False"], ["True", "False"], ["True", "False"]),
2051
+ ],
2052
+ )
2053
+ def test_merging_with_bool_or_int_cateorical_column(
2054
+ self, category_column, categories, expected_categories, ordered
2055
+ ):
2056
+ # GH 17187
2057
+ # merging with a boolean/int categorical column
2058
+ df1 = DataFrame({"id": [1, 2, 3, 4], "cat": category_column})
2059
+ df1["cat"] = df1["cat"].astype(CDT(categories, ordered=ordered))
2060
+ df2 = DataFrame({"id": [2, 4], "num": [1, 9]})
2061
+ result = df1.merge(df2)
2062
+ expected = DataFrame({"id": [2, 4], "cat": expected_categories, "num": [1, 9]})
2063
+ expected["cat"] = expected["cat"].astype(CDT(categories, ordered=ordered))
2064
+ tm.assert_frame_equal(expected, result)
2065
+
2066
+ def test_merge_on_int_array(self):
2067
+ # GH 23020
2068
+ df = DataFrame({"A": Series([1, 2, np.nan], dtype="Int64"), "B": 1})
2069
+ result = merge(df, df, on="A")
2070
+ expected = DataFrame(
2071
+ {"A": Series([1, 2, np.nan], dtype="Int64"), "B_x": 1, "B_y": 1}
2072
+ )
2073
+ tm.assert_frame_equal(result, expected)
2074
+
2075
+
2076
+ @pytest.fixture
2077
+ def left_df():
2078
+ return DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0])
2079
+
2080
+
2081
+ @pytest.fixture
2082
+ def right_df():
2083
+ return DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2])
2084
+
2085
+
2086
+ class TestMergeOnIndexes:
2087
+ @pytest.mark.parametrize(
2088
+ "how, sort, expected",
2089
+ [
2090
+ ("inner", False, DataFrame({"a": [20, 10], "b": [200, 100]}, index=[2, 1])),
2091
+ ("inner", True, DataFrame({"a": [10, 20], "b": [100, 200]}, index=[1, 2])),
2092
+ (
2093
+ "left",
2094
+ False,
2095
+ DataFrame({"a": [20, 10, 0], "b": [200, 100, np.nan]}, index=[2, 1, 0]),
2096
+ ),
2097
+ (
2098
+ "left",
2099
+ True,
2100
+ DataFrame({"a": [0, 10, 20], "b": [np.nan, 100, 200]}, index=[0, 1, 2]),
2101
+ ),
2102
+ (
2103
+ "right",
2104
+ False,
2105
+ DataFrame(
2106
+ {"a": [np.nan, 10, 20], "b": [300, 100, 200]}, index=[3, 1, 2]
2107
+ ),
2108
+ ),
2109
+ (
2110
+ "right",
2111
+ True,
2112
+ DataFrame(
2113
+ {"a": [10, 20, np.nan], "b": [100, 200, 300]}, index=[1, 2, 3]
2114
+ ),
2115
+ ),
2116
+ (
2117
+ "outer",
2118
+ False,
2119
+ DataFrame(
2120
+ {"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
2121
+ index=[0, 1, 2, 3],
2122
+ ),
2123
+ ),
2124
+ (
2125
+ "outer",
2126
+ True,
2127
+ DataFrame(
2128
+ {"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
2129
+ index=[0, 1, 2, 3],
2130
+ ),
2131
+ ),
2132
+ ],
2133
+ )
2134
+ def test_merge_on_indexes(self, left_df, right_df, how, sort, expected):
2135
+ result = merge(
2136
+ left_df, right_df, left_index=True, right_index=True, how=how, sort=sort
2137
+ )
2138
+ tm.assert_frame_equal(result, expected)
2139
+
2140
+
2141
+ @pytest.mark.parametrize(
2142
+ "index",
2143
+ [Index([1, 2], dtype=dtyp, name="index_col") for dtyp in tm.ALL_REAL_NUMPY_DTYPES]
2144
+ + [
2145
+ CategoricalIndex(["A", "B"], categories=["A", "B"], name="index_col"),
2146
+ RangeIndex(start=0, stop=2, name="index_col"),
2147
+ DatetimeIndex(["2018-01-01", "2018-01-02"], name="index_col"),
2148
+ ],
2149
+ ids=lambda x: f"{type(x).__name__}[{x.dtype}]",
2150
+ )
2151
+ def test_merge_index_types(index):
2152
+ # gh-20777
2153
+ # assert key access is consistent across index types
2154
+ left = DataFrame({"left_data": [1, 2]}, index=index)
2155
+ right = DataFrame({"right_data": [1.0, 2.0]}, index=index)
2156
+
2157
+ result = left.merge(right, on=["index_col"])
2158
+
2159
+ expected = DataFrame({"left_data": [1, 2], "right_data": [1.0, 2.0]}, index=index)
2160
+ tm.assert_frame_equal(result, expected)
2161
+
2162
+
2163
+ @pytest.mark.parametrize(
2164
+ "on,left_on,right_on,left_index,right_index,nm",
2165
+ [
2166
+ (["outer", "inner"], None, None, False, False, "B"),
2167
+ (None, None, None, True, True, "B"),
2168
+ (None, ["outer", "inner"], None, False, True, "B"),
2169
+ (None, None, ["outer", "inner"], True, False, "B"),
2170
+ (["outer", "inner"], None, None, False, False, None),
2171
+ (None, None, None, True, True, None),
2172
+ (None, ["outer", "inner"], None, False, True, None),
2173
+ (None, None, ["outer", "inner"], True, False, None),
2174
+ ],
2175
+ )
2176
+ def test_merge_series(on, left_on, right_on, left_index, right_index, nm):
2177
+ # GH 21220
2178
+ a = DataFrame(
2179
+ {"A": [1, 2, 3, 4]},
2180
+ index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]),
2181
+ )
2182
+ b = Series(
2183
+ [1, 2, 3, 4],
2184
+ index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]),
2185
+ name=nm,
2186
+ )
2187
+ expected = DataFrame(
2188
+ {"A": [2, 4], "B": [1, 3]},
2189
+ index=MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]),
2190
+ )
2191
+ if nm is not None:
2192
+ result = merge(
2193
+ a,
2194
+ b,
2195
+ on=on,
2196
+ left_on=left_on,
2197
+ right_on=right_on,
2198
+ left_index=left_index,
2199
+ right_index=right_index,
2200
+ )
2201
+ tm.assert_frame_equal(result, expected)
2202
+ else:
2203
+ msg = "Cannot merge a Series without a name"
2204
+ with pytest.raises(ValueError, match=msg):
2205
+ result = merge(
2206
+ a,
2207
+ b,
2208
+ on=on,
2209
+ left_on=left_on,
2210
+ right_on=right_on,
2211
+ left_index=left_index,
2212
+ right_index=right_index,
2213
+ )
2214
+
2215
+
2216
+ def test_merge_series_multilevel():
2217
+ # GH#47946
2218
+ # GH 40993: For raising, enforced in 2.0
2219
+ a = DataFrame(
2220
+ {"A": [1, 2, 3, 4]},
2221
+ index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]),
2222
+ )
2223
+ b = Series(
2224
+ [1, 2, 3, 4],
2225
+ index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]),
2226
+ name=("B", "C"),
2227
+ )
2228
+ with pytest.raises(
2229
+ MergeError, match="Not allowed to merge between different levels"
2230
+ ):
2231
+ merge(a, b, on=["outer", "inner"])
2232
+
2233
+
2234
+ @pytest.mark.parametrize(
2235
+ "col1, col2, kwargs, expected_cols",
2236
+ [
2237
+ (0, 0, {"suffixes": ("", "_dup")}, ["0", "0_dup"]),
2238
+ (0, 0, {"suffixes": (None, "_dup")}, [0, "0_dup"]),
2239
+ (0, 0, {"suffixes": ("_x", "_y")}, ["0_x", "0_y"]),
2240
+ (0, 0, {"suffixes": ["_x", "_y"]}, ["0_x", "0_y"]),
2241
+ ("a", 0, {"suffixes": (None, "_y")}, ["a", 0]),
2242
+ (0.0, 0.0, {"suffixes": ("_x", None)}, ["0.0_x", 0.0]),
2243
+ ("b", "b", {"suffixes": (None, "_y")}, ["b", "b_y"]),
2244
+ ("a", "a", {"suffixes": ("_x", None)}, ["a_x", "a"]),
2245
+ ("a", "b", {"suffixes": ("_x", None)}, ["a", "b"]),
2246
+ ("a", "a", {"suffixes": (None, "_x")}, ["a", "a_x"]),
2247
+ (0, 0, {"suffixes": ("_a", None)}, ["0_a", 0]),
2248
+ ("a", "a", {}, ["a_x", "a_y"]),
2249
+ (0, 0, {}, ["0_x", "0_y"]),
2250
+ ],
2251
+ )
2252
+ def test_merge_suffix(col1, col2, kwargs, expected_cols):
2253
+ # issue: 24782
2254
+ a = DataFrame({col1: [1, 2, 3]})
2255
+ b = DataFrame({col2: [4, 5, 6]})
2256
+
2257
+ expected = DataFrame([[1, 4], [2, 5], [3, 6]], columns=expected_cols)
2258
+
2259
+ result = a.merge(b, left_index=True, right_index=True, **kwargs)
2260
+ tm.assert_frame_equal(result, expected)
2261
+
2262
+ result = merge(a, b, left_index=True, right_index=True, **kwargs)
2263
+ tm.assert_frame_equal(result, expected)
2264
+
2265
+
2266
+ @pytest.mark.parametrize(
2267
+ "how,expected",
2268
+ [
2269
+ (
2270
+ "right",
2271
+ DataFrame(
2272
+ {"A": [100, 200, 300], "B1": [60, 70, np.nan], "B2": [600, 700, 800]}
2273
+ ),
2274
+ ),
2275
+ (
2276
+ "outer",
2277
+ DataFrame(
2278
+ {
2279
+ "A": [100, 200, 1, 300],
2280
+ "B1": [60, 70, 80, np.nan],
2281
+ "B2": [600, 700, np.nan, 800],
2282
+ }
2283
+ ),
2284
+ ),
2285
+ ],
2286
+ )
2287
+ def test_merge_duplicate_suffix(how, expected):
2288
+ left_df = DataFrame({"A": [100, 200, 1], "B": [60, 70, 80]})
2289
+ right_df = DataFrame({"A": [100, 200, 300], "B": [600, 700, 800]})
2290
+ result = merge(left_df, right_df, on="A", how=how, suffixes=("_x", "_x"))
2291
+ expected.columns = ["A", "B_x", "B_x"]
2292
+
2293
+ tm.assert_frame_equal(result, expected)
2294
+
2295
+
2296
+ @pytest.mark.parametrize(
2297
+ "col1, col2, suffixes",
2298
+ [("a", "a", (None, None)), ("a", "a", ("", None)), (0, 0, (None, ""))],
2299
+ )
2300
+ def test_merge_suffix_error(col1, col2, suffixes):
2301
+ # issue: 24782
2302
+ a = DataFrame({col1: [1, 2, 3]})
2303
+ b = DataFrame({col2: [3, 4, 5]})
2304
+
2305
+ # TODO: might reconsider current raise behaviour, see issue 24782
2306
+ msg = "columns overlap but no suffix specified"
2307
+ with pytest.raises(ValueError, match=msg):
2308
+ merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
2309
+
2310
+
2311
+ @pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}])
2312
+ def test_merge_suffix_raises(suffixes):
2313
+ a = DataFrame({"a": [1, 2, 3]})
2314
+ b = DataFrame({"b": [3, 4, 5]})
2315
+
2316
+ with pytest.raises(TypeError, match="Passing 'suffixes' as a"):
2317
+ merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
2318
+
2319
+
2320
+ @pytest.mark.parametrize(
2321
+ "col1, col2, suffixes, msg",
2322
+ [
2323
+ ("a", "a", ("a", "b", "c"), r"too many values to unpack \(expected 2\)"),
2324
+ ("a", "a", tuple("a"), r"not enough values to unpack \(expected 2, got 1\)"),
2325
+ ],
2326
+ )
2327
+ def test_merge_suffix_length_error(col1, col2, suffixes, msg):
2328
+ a = DataFrame({col1: [1, 2, 3]})
2329
+ b = DataFrame({col2: [3, 4, 5]})
2330
+
2331
+ with pytest.raises(ValueError, match=msg):
2332
+ merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
2333
+
2334
+
2335
+ @pytest.mark.parametrize("cat_dtype", ["one", "two"])
2336
+ @pytest.mark.parametrize("reverse", [True, False])
2337
+ def test_merge_equal_cat_dtypes(cat_dtype, reverse):
2338
+ # see gh-22501
2339
+ cat_dtypes = {
2340
+ "one": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
2341
+ "two": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
2342
+ }
2343
+
2344
+ df1 = DataFrame(
2345
+ {"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]), "left": [1, 2, 3]}
2346
+ ).set_index("foo")
2347
+
2348
+ data_foo = ["a", "b", "c"]
2349
+ data_right = [1, 2, 3]
2350
+
2351
+ if reverse:
2352
+ data_foo.reverse()
2353
+ data_right.reverse()
2354
+
2355
+ df2 = DataFrame(
2356
+ {"foo": Series(data_foo).astype(cat_dtypes[cat_dtype]), "right": data_right}
2357
+ ).set_index("foo")
2358
+
2359
+ result = df1.merge(df2, left_index=True, right_index=True)
2360
+
2361
+ expected = DataFrame(
2362
+ {
2363
+ "left": [1, 2, 3],
2364
+ "right": [1, 2, 3],
2365
+ "foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]),
2366
+ }
2367
+ ).set_index("foo")
2368
+
2369
+ tm.assert_frame_equal(result, expected)
2370
+
2371
+
2372
+ def test_merge_equal_cat_dtypes2():
2373
+ # see gh-22501
2374
+ cat_dtype = CategoricalDtype(categories=["a", "b", "c"], ordered=False)
2375
+
2376
+ # Test Data
2377
+ df1 = DataFrame(
2378
+ {"foo": Series(["a", "b"]).astype(cat_dtype), "left": [1, 2]}
2379
+ ).set_index("foo")
2380
+
2381
+ df2 = DataFrame(
2382
+ {"foo": Series(["a", "b", "c"]).astype(cat_dtype), "right": [3, 2, 1]}
2383
+ ).set_index("foo")
2384
+
2385
+ result = df1.merge(df2, left_index=True, right_index=True)
2386
+
2387
+ expected = DataFrame(
2388
+ {"left": [1, 2], "right": [3, 2], "foo": Series(["a", "b"]).astype(cat_dtype)}
2389
+ ).set_index("foo")
2390
+
2391
+ tm.assert_frame_equal(result, expected)
2392
+
2393
+
2394
+ def test_merge_on_cat_and_ext_array():
2395
+ # GH 28668
2396
+ right = DataFrame(
2397
+ {"a": Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")}
2398
+ )
2399
+ left = right.copy()
2400
+ left["a"] = left["a"].astype("category")
2401
+
2402
+ result = merge(left, right, how="inner", on="a")
2403
+ expected = right.copy()
2404
+
2405
+ tm.assert_frame_equal(result, expected)
2406
+
2407
+
2408
+ def test_merge_multiindex_columns():
2409
+ # Issue #28518
2410
+ # Verify that merging two dataframes give the expected labels
2411
+ # The original cause of this issue come from a bug lexsort_depth and is tested in
2412
+ # test_lexsort_depth
2413
+
2414
+ letters = ["a", "b", "c", "d"]
2415
+ numbers = ["1", "2", "3"]
2416
+ index = MultiIndex.from_product((letters, numbers), names=["outer", "inner"])
2417
+
2418
+ frame_x = DataFrame(columns=index)
2419
+ frame_x["id"] = ""
2420
+ frame_y = DataFrame(columns=index)
2421
+ frame_y["id"] = ""
2422
+
2423
+ l_suf = "_x"
2424
+ r_suf = "_y"
2425
+ result = frame_x.merge(frame_y, on="id", suffixes=((l_suf, r_suf)))
2426
+
2427
+ # Constructing the expected results
2428
+ expected_labels = [letter + l_suf for letter in letters] + [
2429
+ letter + r_suf for letter in letters
2430
+ ]
2431
+ expected_index = MultiIndex.from_product(
2432
+ [expected_labels, numbers], names=["outer", "inner"]
2433
+ )
2434
+ expected = DataFrame(columns=expected_index)
2435
+ expected["id"] = ""
2436
+
2437
+ tm.assert_frame_equal(result, expected)
2438
+
2439
+
2440
+ def test_merge_datetime_upcast_dtype():
2441
+ # https://github.com/pandas-dev/pandas/issues/31208
2442
+ df1 = DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]})
2443
+ df2 = DataFrame(
2444
+ {"y": ["1", "2", "3"], "z": pd.to_datetime(["2000", "2001", "2002"])}
2445
+ )
2446
+ result = merge(df1, df2, how="left", on="y")
2447
+ expected = DataFrame(
2448
+ {
2449
+ "x": ["a", "b", "c"],
2450
+ "y": ["1", "2", "4"],
2451
+ "z": pd.to_datetime(["2000", "2001", "NaT"]),
2452
+ }
2453
+ )
2454
+ tm.assert_frame_equal(result, expected)
2455
+
2456
+
2457
+ @pytest.mark.parametrize("n_categories", [5, 128])
2458
+ def test_categorical_non_unique_monotonic(n_categories):
2459
+ # GH 28189
2460
+ # With n_categories as 5, we test the int8 case is hit in libjoin,
2461
+ # with n_categories as 128 we test the int16 case.
2462
+ left_index = CategoricalIndex([0] + list(range(n_categories)))
2463
+ df1 = DataFrame(range(n_categories + 1), columns=["value"], index=left_index)
2464
+ df2 = DataFrame(
2465
+ [[6]],
2466
+ columns=["value"],
2467
+ index=CategoricalIndex([0], categories=list(range(n_categories))),
2468
+ )
2469
+
2470
+ result = merge(df1, df2, how="left", left_index=True, right_index=True)
2471
+ expected = DataFrame(
2472
+ [[i, 6.0] if i < 2 else [i, np.nan] for i in range(n_categories + 1)],
2473
+ columns=["value_x", "value_y"],
2474
+ index=left_index,
2475
+ )
2476
+ tm.assert_frame_equal(expected, result)
2477
+
2478
+
2479
+ def test_merge_join_categorical_multiindex():
2480
+ # From issue 16627
2481
+ a = {
2482
+ "Cat1": Categorical(["a", "b", "a", "c", "a", "b"], ["a", "b", "c"]),
2483
+ "Int1": [0, 1, 0, 1, 0, 0],
2484
+ }
2485
+ a = DataFrame(a)
2486
+
2487
+ b = {
2488
+ "Cat": Categorical(["a", "b", "c", "a", "b", "c"], ["a", "b", "c"]),
2489
+ "Int": [0, 0, 0, 1, 1, 1],
2490
+ "Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
2491
+ }
2492
+ b = DataFrame(b).set_index(["Cat", "Int"])["Factor"]
2493
+
2494
+ expected = merge(
2495
+ a,
2496
+ b.reset_index(),
2497
+ left_on=["Cat1", "Int1"],
2498
+ right_on=["Cat", "Int"],
2499
+ how="left",
2500
+ )
2501
+ expected = expected.drop(["Cat", "Int"], axis=1)
2502
+ result = a.join(b, on=["Cat1", "Int1"])
2503
+ tm.assert_frame_equal(expected, result)
2504
+
2505
+ # Same test, but with ordered categorical
2506
+ a = {
2507
+ "Cat1": Categorical(
2508
+ ["a", "b", "a", "c", "a", "b"], ["b", "a", "c"], ordered=True
2509
+ ),
2510
+ "Int1": [0, 1, 0, 1, 0, 0],
2511
+ }
2512
+ a = DataFrame(a)
2513
+
2514
+ b = {
2515
+ "Cat": Categorical(
2516
+ ["a", "b", "c", "a", "b", "c"], ["b", "a", "c"], ordered=True
2517
+ ),
2518
+ "Int": [0, 0, 0, 1, 1, 1],
2519
+ "Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
2520
+ }
2521
+ b = DataFrame(b).set_index(["Cat", "Int"])["Factor"]
2522
+
2523
+ expected = merge(
2524
+ a,
2525
+ b.reset_index(),
2526
+ left_on=["Cat1", "Int1"],
2527
+ right_on=["Cat", "Int"],
2528
+ how="left",
2529
+ )
2530
+ expected = expected.drop(["Cat", "Int"], axis=1)
2531
+ result = a.join(b, on=["Cat1", "Int1"])
2532
+ tm.assert_frame_equal(expected, result)
2533
+
2534
+
2535
+ @pytest.mark.parametrize("func", ["merge", "merge_asof"])
2536
+ @pytest.mark.parametrize(
2537
+ ("kwargs", "err_msg"),
2538
+ [
2539
+ ({"left_on": "a", "left_index": True}, ["left_on", "left_index"]),
2540
+ ({"right_on": "a", "right_index": True}, ["right_on", "right_index"]),
2541
+ ],
2542
+ )
2543
+ def test_merge_join_cols_error_reporting_duplicates(func, kwargs, err_msg):
2544
+ # GH: 16228
2545
+ left = DataFrame({"a": [1, 2], "b": [3, 4]})
2546
+ right = DataFrame({"a": [1, 1], "c": [5, 6]})
2547
+ msg = rf'Can only pass argument "{err_msg[0]}" OR "{err_msg[1]}" not both\.'
2548
+ with pytest.raises(MergeError, match=msg):
2549
+ getattr(pd, func)(left, right, **kwargs)
2550
+
2551
+
2552
+ @pytest.mark.parametrize("func", ["merge", "merge_asof"])
2553
+ @pytest.mark.parametrize(
2554
+ ("kwargs", "err_msg"),
2555
+ [
2556
+ ({"left_on": "a"}, ["right_on", "right_index"]),
2557
+ ({"right_on": "a"}, ["left_on", "left_index"]),
2558
+ ],
2559
+ )
2560
+ def test_merge_join_cols_error_reporting_missing(func, kwargs, err_msg):
2561
+ # GH: 16228
2562
+ left = DataFrame({"a": [1, 2], "b": [3, 4]})
2563
+ right = DataFrame({"a": [1, 1], "c": [5, 6]})
2564
+ msg = rf'Must pass "{err_msg[0]}" OR "{err_msg[1]}"\.'
2565
+ with pytest.raises(MergeError, match=msg):
2566
+ getattr(pd, func)(left, right, **kwargs)
2567
+
2568
+
2569
+ @pytest.mark.parametrize("func", ["merge", "merge_asof"])
2570
+ @pytest.mark.parametrize(
2571
+ "kwargs",
2572
+ [
2573
+ {"right_index": True},
2574
+ {"left_index": True},
2575
+ ],
2576
+ )
2577
+ def test_merge_join_cols_error_reporting_on_and_index(func, kwargs):
2578
+ # GH: 16228
2579
+ left = DataFrame({"a": [1, 2], "b": [3, 4]})
2580
+ right = DataFrame({"a": [1, 1], "c": [5, 6]})
2581
+ msg = (
2582
+ r'Can only pass argument "on" OR "left_index" '
2583
+ r'and "right_index", not a combination of both\.'
2584
+ )
2585
+ with pytest.raises(MergeError, match=msg):
2586
+ getattr(pd, func)(left, right, on="a", **kwargs)
2587
+
2588
+
2589
+ def test_merge_right_left_index():
2590
+ # GH#38616
2591
+ left = DataFrame({"x": [1, 1], "z": ["foo", "foo"]})
2592
+ right = DataFrame({"x": [1, 1], "z": ["foo", "foo"]})
2593
+ result = merge(left, right, how="right", left_index=True, right_on="x")
2594
+ expected = DataFrame(
2595
+ {
2596
+ "x": [1, 1],
2597
+ "x_x": [1, 1],
2598
+ "z_x": ["foo", "foo"],
2599
+ "x_y": [1, 1],
2600
+ "z_y": ["foo", "foo"],
2601
+ }
2602
+ )
2603
+ tm.assert_frame_equal(result, expected)
2604
+
2605
+
2606
+ def test_merge_result_empty_index_and_on():
2607
+ # GH#33814
2608
+ df1 = DataFrame({"a": [1], "b": [2]}).set_index(["a", "b"])
2609
+ df2 = DataFrame({"b": [1]}).set_index(["b"])
2610
+ expected = DataFrame({"a": [], "b": []}, dtype=np.int64).set_index(["a", "b"])
2611
+ result = merge(df1, df2, left_on=["b"], right_index=True)
2612
+ tm.assert_frame_equal(result, expected)
2613
+
2614
+ result = merge(df2, df1, left_index=True, right_on=["b"])
2615
+ tm.assert_frame_equal(result, expected)
2616
+
2617
+
2618
+ def test_merge_suffixes_produce_dup_columns_raises():
2619
+ # GH#22818; Enforced in 2.0
2620
+ left = DataFrame({"a": [1, 2, 3], "b": 1, "b_x": 2})
2621
+ right = DataFrame({"a": [1, 2, 3], "b": 2})
2622
+
2623
+ with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"):
2624
+ merge(left, right, on="a")
2625
+
2626
+ with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"):
2627
+ merge(right, left, on="a", suffixes=("_y", "_x"))
2628
+
2629
+
2630
+ def test_merge_duplicate_columns_with_suffix_no_warning():
2631
+ # GH#22818
2632
+ # Do not raise warning when duplicates are caused by duplicates in origin
2633
+ left = DataFrame([[1, 1, 1], [2, 2, 2]], columns=["a", "b", "b"])
2634
+ right = DataFrame({"a": [1, 3], "b": 2})
2635
+ result = merge(left, right, on="a")
2636
+ expected = DataFrame([[1, 1, 1, 2]], columns=["a", "b_x", "b_x", "b_y"])
2637
+ tm.assert_frame_equal(result, expected)
2638
+
2639
+
2640
+ def test_merge_duplicate_columns_with_suffix_causing_another_duplicate_raises():
2641
+ # GH#22818, Enforced in 2.0
2642
+ # This should raise warning because suffixes cause another collision
2643
+ left = DataFrame([[1, 1, 1, 1], [2, 2, 2, 2]], columns=["a", "b", "b", "b_x"])
2644
+ right = DataFrame({"a": [1, 3], "b": 2})
2645
+ with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"):
2646
+ merge(left, right, on="a")
2647
+
2648
+
2649
+ def test_merge_string_float_column_result():
2650
+ # GH 13353
2651
+ df1 = DataFrame([[1, 2], [3, 4]], columns=Index(["a", 114.0]))
2652
+ df2 = DataFrame([[9, 10], [11, 12]], columns=["x", "y"])
2653
+ result = merge(df2, df1, how="inner", left_index=True, right_index=True)
2654
+ expected = DataFrame(
2655
+ [[9, 10, 1, 2], [11, 12, 3, 4]], columns=Index(["x", "y", "a", 114.0])
2656
+ )
2657
+ tm.assert_frame_equal(result, expected)
2658
+
2659
+
2660
+ def test_mergeerror_on_left_index_mismatched_dtypes():
2661
+ # GH 22449
2662
+ df_1 = DataFrame(data=["X"], columns=["C"], index=[22])
2663
+ df_2 = DataFrame(data=["X"], columns=["C"], index=[999])
2664
+ with pytest.raises(MergeError, match="Can only pass argument"):
2665
+ merge(df_1, df_2, on=["C"], left_index=True)
2666
+
2667
+
2668
+ def test_merge_on_left_categoricalindex():
2669
+ # GH#48464 don't raise when left_on is a CategoricalIndex
2670
+ ci = CategoricalIndex(range(3))
2671
+
2672
+ right = DataFrame({"A": ci, "B": range(3)})
2673
+ left = DataFrame({"C": range(3, 6)})
2674
+
2675
+ res = merge(left, right, left_on=ci, right_on="A")
2676
+ expected = merge(left, right, left_on=ci._data, right_on="A")
2677
+ tm.assert_frame_equal(res, expected)
2678
+
2679
+
2680
+ @pytest.mark.parametrize("dtype", [None, "Int64"])
2681
+ def test_merge_outer_with_NaN(dtype):
2682
+ # GH#43550
2683
+ left = DataFrame({"key": [1, 2], "col1": [1, 2]}, dtype=dtype)
2684
+ right = DataFrame({"key": [np.nan, np.nan], "col2": [3, 4]}, dtype=dtype)
2685
+ result = merge(left, right, on="key", how="outer")
2686
+ expected = DataFrame(
2687
+ {
2688
+ "key": [1, 2, np.nan, np.nan],
2689
+ "col1": [1, 2, np.nan, np.nan],
2690
+ "col2": [np.nan, np.nan, 3, 4],
2691
+ },
2692
+ dtype=dtype,
2693
+ )
2694
+ tm.assert_frame_equal(result, expected)
2695
+
2696
+ # switch left and right
2697
+ result = merge(right, left, on="key", how="outer")
2698
+ expected = DataFrame(
2699
+ {
2700
+ "key": [np.nan, np.nan, 1, 2],
2701
+ "col2": [3, 4, np.nan, np.nan],
2702
+ "col1": [np.nan, np.nan, 1, 2],
2703
+ },
2704
+ dtype=dtype,
2705
+ )
2706
+ tm.assert_frame_equal(result, expected)
2707
+
2708
+
2709
+ def test_merge_different_index_names():
2710
+ # GH#45094
2711
+ left = DataFrame({"a": [1]}, index=Index([1], name="c"))
2712
+ right = DataFrame({"a": [1]}, index=Index([1], name="d"))
2713
+ result = merge(left, right, left_on="c", right_on="d")
2714
+ expected = DataFrame({"a_x": [1], "a_y": 1})
2715
+ tm.assert_frame_equal(result, expected)
2716
+
2717
+
2718
+ def test_merge_ea(any_numeric_ea_dtype, join_type):
2719
+ # GH#44240
2720
+ left = DataFrame({"a": [1, 2, 3], "b": 1}, dtype=any_numeric_ea_dtype)
2721
+ right = DataFrame({"a": [1, 2, 3], "c": 2}, dtype=any_numeric_ea_dtype)
2722
+ result = left.merge(right, how=join_type)
2723
+ expected = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2}, dtype=any_numeric_ea_dtype)
2724
+ tm.assert_frame_equal(result, expected)
2725
+
2726
+
2727
+ def test_merge_ea_and_non_ea(any_numeric_ea_dtype, join_type):
2728
+ # GH#44240
2729
+ left = DataFrame({"a": [1, 2, 3], "b": 1}, dtype=any_numeric_ea_dtype)
2730
+ right = DataFrame({"a": [1, 2, 3], "c": 2}, dtype=any_numeric_ea_dtype.lower())
2731
+ result = left.merge(right, how=join_type)
2732
+ expected = DataFrame(
2733
+ {
2734
+ "a": Series([1, 2, 3], dtype=any_numeric_ea_dtype),
2735
+ "b": Series([1, 1, 1], dtype=any_numeric_ea_dtype),
2736
+ "c": Series([2, 2, 2], dtype=any_numeric_ea_dtype.lower()),
2737
+ }
2738
+ )
2739
+ tm.assert_frame_equal(result, expected)
2740
+
2741
+
2742
+ @pytest.mark.parametrize("dtype", ["int64", "int64[pyarrow]"])
2743
+ def test_merge_arrow_and_numpy_dtypes(dtype):
2744
+ # GH#52406
2745
+ pytest.importorskip("pyarrow")
2746
+ df = DataFrame({"a": [1, 2]}, dtype=dtype)
2747
+ df2 = DataFrame({"a": [1, 2]}, dtype="int64[pyarrow]")
2748
+ result = df.merge(df2)
2749
+ expected = df.copy()
2750
+ tm.assert_frame_equal(result, expected)
2751
+
2752
+ result = df2.merge(df)
2753
+ expected = df2.copy()
2754
+ tm.assert_frame_equal(result, expected)
2755
+
2756
+
2757
+ @pytest.mark.parametrize("how", ["inner", "left", "outer", "right"])
2758
+ @pytest.mark.parametrize("tz", [None, "America/Chicago"])
2759
+ def test_merge_datetime_different_resolution(tz, how):
2760
+ # https://github.com/pandas-dev/pandas/issues/53200
2761
+ vals = [
2762
+ pd.Timestamp(2023, 5, 12, tz=tz),
2763
+ pd.Timestamp(2023, 5, 13, tz=tz),
2764
+ pd.Timestamp(2023, 5, 14, tz=tz),
2765
+ ]
2766
+ df1 = DataFrame({"t": vals[:2], "a": [1.0, 2.0]})
2767
+ df1["t"] = df1["t"].dt.as_unit("ns")
2768
+ df2 = DataFrame({"t": vals[1:], "b": [1.0, 2.0]})
2769
+ df2["t"] = df2["t"].dt.as_unit("s")
2770
+
2771
+ expected = DataFrame({"t": vals, "a": [1.0, 2.0, np.nan], "b": [np.nan, 1.0, 2.0]})
2772
+ expected["t"] = expected["t"].dt.as_unit("ns")
2773
+ if how == "inner":
2774
+ expected = expected.iloc[[1]].reset_index(drop=True)
2775
+ elif how == "left":
2776
+ expected = expected.iloc[[0, 1]]
2777
+ elif how == "right":
2778
+ expected = expected.iloc[[1, 2]].reset_index(drop=True)
2779
+
2780
+ result = df1.merge(df2, on="t", how=how)
2781
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_asof.py ADDED
@@ -0,0 +1,1591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+
3
+ import numpy as np
4
+ import pytest
5
+ import pytz
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ Index,
10
+ Timedelta,
11
+ merge_asof,
12
+ read_csv,
13
+ to_datetime,
14
+ )
15
+ import pandas._testing as tm
16
+ from pandas.core.reshape.merge import MergeError
17
+
18
+
19
+ @pytest.fixture(params=["s", "ms", "us", "ns"])
20
+ def unit(request):
21
+ """
22
+ Resolution for datetimelike dtypes.
23
+ """
24
+ return request.param
25
+
26
+
27
+ class TestAsOfMerge:
28
+ def read_data(self, datapath, name, dedupe=False):
29
+ path = datapath("reshape", "merge", "data", name)
30
+ x = read_csv(path)
31
+ if dedupe:
32
+ x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
33
+ drop=True
34
+ )
35
+ x.time = to_datetime(x.time)
36
+ return x
37
+
38
+ @pytest.fixture
39
+ def trades(self, datapath):
40
+ return self.read_data(datapath, "trades.csv")
41
+
42
+ @pytest.fixture
43
+ def quotes(self, datapath):
44
+ return self.read_data(datapath, "quotes.csv", dedupe=True)
45
+
46
+ @pytest.fixture
47
+ def asof(self, datapath):
48
+ return self.read_data(datapath, "asof.csv")
49
+
50
+ @pytest.fixture
51
+ def tolerance(self, datapath):
52
+ return self.read_data(datapath, "tolerance.csv")
53
+
54
+ @pytest.fixture
55
+ def allow_exact_matches(self, datapath):
56
+ return self.read_data(datapath, "allow_exact_matches.csv")
57
+
58
+ @pytest.fixture
59
+ def allow_exact_matches_and_tolerance(self, datapath):
60
+ return self.read_data(datapath, "allow_exact_matches_and_tolerance.csv")
61
+
62
+ def test_examples1(self):
63
+ """doc-string examples"""
64
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
65
+ right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
66
+
67
+ expected = pd.DataFrame(
68
+ {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
69
+ )
70
+
71
+ result = merge_asof(left, right, on="a")
72
+ tm.assert_frame_equal(result, expected)
73
+
74
+ def test_examples2(self, unit):
75
+ """doc-string examples"""
76
+ if unit == "s":
77
+ pytest.skip(
78
+ "This test is invalid for unit='s' because that would "
79
+ "round the trades['time']]"
80
+ )
81
+ trades = pd.DataFrame(
82
+ {
83
+ "time": to_datetime(
84
+ [
85
+ "20160525 13:30:00.023",
86
+ "20160525 13:30:00.038",
87
+ "20160525 13:30:00.048",
88
+ "20160525 13:30:00.048",
89
+ "20160525 13:30:00.048",
90
+ ]
91
+ ).astype(f"M8[{unit}]"),
92
+ "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
93
+ "price": [51.95, 51.95, 720.77, 720.92, 98.00],
94
+ "quantity": [75, 155, 100, 100, 100],
95
+ },
96
+ columns=["time", "ticker", "price", "quantity"],
97
+ )
98
+
99
+ quotes = pd.DataFrame(
100
+ {
101
+ "time": to_datetime(
102
+ [
103
+ "20160525 13:30:00.023",
104
+ "20160525 13:30:00.023",
105
+ "20160525 13:30:00.030",
106
+ "20160525 13:30:00.041",
107
+ "20160525 13:30:00.048",
108
+ "20160525 13:30:00.049",
109
+ "20160525 13:30:00.072",
110
+ "20160525 13:30:00.075",
111
+ ]
112
+ ).astype(f"M8[{unit}]"),
113
+ "ticker": [
114
+ "GOOG",
115
+ "MSFT",
116
+ "MSFT",
117
+ "MSFT",
118
+ "GOOG",
119
+ "AAPL",
120
+ "GOOG",
121
+ "MSFT",
122
+ ],
123
+ "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
124
+ "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
125
+ },
126
+ columns=["time", "ticker", "bid", "ask"],
127
+ )
128
+
129
+ merge_asof(trades, quotes, on="time", by="ticker")
130
+
131
+ merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("2ms"))
132
+
133
+ expected = pd.DataFrame(
134
+ {
135
+ "time": to_datetime(
136
+ [
137
+ "20160525 13:30:00.023",
138
+ "20160525 13:30:00.038",
139
+ "20160525 13:30:00.048",
140
+ "20160525 13:30:00.048",
141
+ "20160525 13:30:00.048",
142
+ ]
143
+ ).astype(f"M8[{unit}]"),
144
+ "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
145
+ "price": [51.95, 51.95, 720.77, 720.92, 98.00],
146
+ "quantity": [75, 155, 100, 100, 100],
147
+ "bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
148
+ "ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
149
+ },
150
+ columns=["time", "ticker", "price", "quantity", "bid", "ask"],
151
+ )
152
+
153
+ result = merge_asof(
154
+ trades,
155
+ quotes,
156
+ on="time",
157
+ by="ticker",
158
+ tolerance=Timedelta("10ms"),
159
+ allow_exact_matches=False,
160
+ )
161
+ tm.assert_frame_equal(result, expected)
162
+
163
+ def test_examples3(self):
164
+ """doc-string examples"""
165
+ # GH14887
166
+
167
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
168
+ right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
169
+
170
+ expected = pd.DataFrame(
171
+ {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
172
+ )
173
+
174
+ result = merge_asof(left, right, on="a", direction="forward")
175
+ tm.assert_frame_equal(result, expected)
176
+
177
+ def test_examples4(self):
178
+ """doc-string examples"""
179
+ # GH14887
180
+
181
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
182
+ right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
183
+
184
+ expected = pd.DataFrame(
185
+ {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
186
+ )
187
+
188
+ result = merge_asof(left, right, on="a", direction="nearest")
189
+ tm.assert_frame_equal(result, expected)
190
+
191
+ def test_basic(self, trades, asof, quotes):
192
+ expected = asof
193
+
194
+ result = merge_asof(trades, quotes, on="time", by="ticker")
195
+ tm.assert_frame_equal(result, expected)
196
+
197
+ def test_basic_categorical(self, trades, asof, quotes):
198
+ expected = asof
199
+ trades.ticker = trades.ticker.astype("category")
200
+ quotes.ticker = quotes.ticker.astype("category")
201
+ expected.ticker = expected.ticker.astype("category")
202
+
203
+ result = merge_asof(trades, quotes, on="time", by="ticker")
204
+ tm.assert_frame_equal(result, expected)
205
+
206
+ def test_basic_left_index(self, trades, asof, quotes):
207
+ # GH14253
208
+ expected = asof
209
+ trades = trades.set_index("time")
210
+
211
+ result = merge_asof(
212
+ trades, quotes, left_index=True, right_on="time", by="ticker"
213
+ )
214
+ # left-only index uses right"s index, oddly
215
+ expected.index = result.index
216
+ # time column appears after left"s columns
217
+ expected = expected[result.columns]
218
+ tm.assert_frame_equal(result, expected)
219
+
220
+ def test_basic_right_index(self, trades, asof, quotes):
221
+ expected = asof
222
+ quotes = quotes.set_index("time")
223
+
224
+ result = merge_asof(
225
+ trades, quotes, left_on="time", right_index=True, by="ticker"
226
+ )
227
+ tm.assert_frame_equal(result, expected)
228
+
229
+ def test_basic_left_index_right_index(self, trades, asof, quotes):
230
+ expected = asof.set_index("time")
231
+ trades = trades.set_index("time")
232
+ quotes = quotes.set_index("time")
233
+
234
+ result = merge_asof(
235
+ trades, quotes, left_index=True, right_index=True, by="ticker"
236
+ )
237
+ tm.assert_frame_equal(result, expected)
238
+
239
+ def test_multi_index_left(self, trades, quotes):
240
+ # MultiIndex is prohibited
241
+ trades = trades.set_index(["time", "price"])
242
+ quotes = quotes.set_index("time")
243
+ with pytest.raises(MergeError, match="left can only have one index"):
244
+ merge_asof(trades, quotes, left_index=True, right_index=True)
245
+
246
+ def test_multi_index_right(self, trades, quotes):
247
+ # MultiIndex is prohibited
248
+ trades = trades.set_index("time")
249
+ quotes = quotes.set_index(["time", "bid"])
250
+ with pytest.raises(MergeError, match="right can only have one index"):
251
+ merge_asof(trades, quotes, left_index=True, right_index=True)
252
+
253
+ def test_on_and_index_left_on(self, trades, quotes):
254
+ # "on" parameter and index together is prohibited
255
+ trades = trades.set_index("time")
256
+ quotes = quotes.set_index("time")
257
+ msg = 'Can only pass argument "left_on" OR "left_index" not both.'
258
+ with pytest.raises(MergeError, match=msg):
259
+ merge_asof(
260
+ trades, quotes, left_on="price", left_index=True, right_index=True
261
+ )
262
+
263
+ def test_on_and_index_right_on(self, trades, quotes):
264
+ trades = trades.set_index("time")
265
+ quotes = quotes.set_index("time")
266
+ msg = 'Can only pass argument "right_on" OR "right_index" not both.'
267
+ with pytest.raises(MergeError, match=msg):
268
+ merge_asof(
269
+ trades, quotes, right_on="bid", left_index=True, right_index=True
270
+ )
271
+
272
+ def test_basic_left_by_right_by(self, trades, asof, quotes):
273
+ # GH14253
274
+ expected = asof
275
+
276
+ result = merge_asof(
277
+ trades, quotes, on="time", left_by="ticker", right_by="ticker"
278
+ )
279
+ tm.assert_frame_equal(result, expected)
280
+
281
+ def test_missing_right_by(self, trades, asof, quotes):
282
+ expected = asof
283
+
284
+ q = quotes[quotes.ticker != "MSFT"]
285
+ result = merge_asof(trades, q, on="time", by="ticker")
286
+ expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
287
+ tm.assert_frame_equal(result, expected)
288
+
289
+ def test_multiby(self):
290
+ # GH13936
291
+ trades = pd.DataFrame(
292
+ {
293
+ "time": to_datetime(
294
+ [
295
+ "20160525 13:30:00.023",
296
+ "20160525 13:30:00.023",
297
+ "20160525 13:30:00.046",
298
+ "20160525 13:30:00.048",
299
+ "20160525 13:30:00.050",
300
+ ]
301
+ ),
302
+ "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
303
+ "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
304
+ "price": [51.95, 51.95, 720.77, 720.92, 98.00],
305
+ "quantity": [75, 155, 100, 100, 100],
306
+ },
307
+ columns=["time", "ticker", "exch", "price", "quantity"],
308
+ )
309
+
310
+ quotes = pd.DataFrame(
311
+ {
312
+ "time": to_datetime(
313
+ [
314
+ "20160525 13:30:00.023",
315
+ "20160525 13:30:00.023",
316
+ "20160525 13:30:00.030",
317
+ "20160525 13:30:00.041",
318
+ "20160525 13:30:00.045",
319
+ "20160525 13:30:00.049",
320
+ ]
321
+ ),
322
+ "ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
323
+ "exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
324
+ "bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
325
+ "ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
326
+ },
327
+ columns=["time", "ticker", "exch", "bid", "ask"],
328
+ )
329
+
330
+ expected = pd.DataFrame(
331
+ {
332
+ "time": to_datetime(
333
+ [
334
+ "20160525 13:30:00.023",
335
+ "20160525 13:30:00.023",
336
+ "20160525 13:30:00.046",
337
+ "20160525 13:30:00.048",
338
+ "20160525 13:30:00.050",
339
+ ]
340
+ ),
341
+ "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
342
+ "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
343
+ "price": [51.95, 51.95, 720.77, 720.92, 98.00],
344
+ "quantity": [75, 155, 100, 100, 100],
345
+ "bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
346
+ "ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
347
+ },
348
+ columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
349
+ )
350
+
351
+ result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
352
+ tm.assert_frame_equal(result, expected)
353
+
354
+ def test_multiby_heterogeneous_types(self):
355
+ # GH13936
356
+ trades = pd.DataFrame(
357
+ {
358
+ "time": to_datetime(
359
+ [
360
+ "20160525 13:30:00.023",
361
+ "20160525 13:30:00.023",
362
+ "20160525 13:30:00.046",
363
+ "20160525 13:30:00.048",
364
+ "20160525 13:30:00.050",
365
+ ]
366
+ ),
367
+ "ticker": [0, 0, 1, 1, 2],
368
+ "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
369
+ "price": [51.95, 51.95, 720.77, 720.92, 98.00],
370
+ "quantity": [75, 155, 100, 100, 100],
371
+ },
372
+ columns=["time", "ticker", "exch", "price", "quantity"],
373
+ )
374
+
375
+ quotes = pd.DataFrame(
376
+ {
377
+ "time": to_datetime(
378
+ [
379
+ "20160525 13:30:00.023",
380
+ "20160525 13:30:00.023",
381
+ "20160525 13:30:00.030",
382
+ "20160525 13:30:00.041",
383
+ "20160525 13:30:00.045",
384
+ "20160525 13:30:00.049",
385
+ ]
386
+ ),
387
+ "ticker": [1, 0, 0, 0, 1, 2],
388
+ "exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
389
+ "bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
390
+ "ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
391
+ },
392
+ columns=["time", "ticker", "exch", "bid", "ask"],
393
+ )
394
+
395
+ expected = pd.DataFrame(
396
+ {
397
+ "time": to_datetime(
398
+ [
399
+ "20160525 13:30:00.023",
400
+ "20160525 13:30:00.023",
401
+ "20160525 13:30:00.046",
402
+ "20160525 13:30:00.048",
403
+ "20160525 13:30:00.050",
404
+ ]
405
+ ),
406
+ "ticker": [0, 0, 1, 1, 2],
407
+ "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
408
+ "price": [51.95, 51.95, 720.77, 720.92, 98.00],
409
+ "quantity": [75, 155, 100, 100, 100],
410
+ "bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
411
+ "ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
412
+ },
413
+ columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
414
+ )
415
+
416
+ result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
417
+ tm.assert_frame_equal(result, expected)
418
+
419
+ def test_multiby_indexed(self):
420
+ # GH15676
421
+ left = pd.DataFrame(
422
+ [
423
+ [to_datetime("20160602"), 1, "a"],
424
+ [to_datetime("20160602"), 2, "a"],
425
+ [to_datetime("20160603"), 1, "b"],
426
+ [to_datetime("20160603"), 2, "b"],
427
+ ],
428
+ columns=["time", "k1", "k2"],
429
+ ).set_index("time")
430
+
431
+ right = pd.DataFrame(
432
+ [
433
+ [to_datetime("20160502"), 1, "a", 1.0],
434
+ [to_datetime("20160502"), 2, "a", 2.0],
435
+ [to_datetime("20160503"), 1, "b", 3.0],
436
+ [to_datetime("20160503"), 2, "b", 4.0],
437
+ ],
438
+ columns=["time", "k1", "k2", "value"],
439
+ ).set_index("time")
440
+
441
+ expected = pd.DataFrame(
442
+ [
443
+ [to_datetime("20160602"), 1, "a", 1.0],
444
+ [to_datetime("20160602"), 2, "a", 2.0],
445
+ [to_datetime("20160603"), 1, "b", 3.0],
446
+ [to_datetime("20160603"), 2, "b", 4.0],
447
+ ],
448
+ columns=["time", "k1", "k2", "value"],
449
+ ).set_index("time")
450
+
451
+ result = merge_asof(
452
+ left, right, left_index=True, right_index=True, by=["k1", "k2"]
453
+ )
454
+
455
+ tm.assert_frame_equal(expected, result)
456
+
457
+ with pytest.raises(
458
+ MergeError, match="left_by and right_by must be same length"
459
+ ):
460
+ merge_asof(
461
+ left,
462
+ right,
463
+ left_index=True,
464
+ right_index=True,
465
+ left_by=["k1", "k2"],
466
+ right_by=["k1"],
467
+ )
468
+
469
+ def test_basic2(self, datapath):
470
+ expected = self.read_data(datapath, "asof2.csv")
471
+ trades = self.read_data(datapath, "trades2.csv")
472
+ quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
473
+
474
+ result = merge_asof(trades, quotes, on="time", by="ticker")
475
+ tm.assert_frame_equal(result, expected)
476
+
477
+ def test_basic_no_by(self, trades, asof, quotes):
478
+ f = (
479
+ lambda x: x[x.ticker == "MSFT"]
480
+ .drop("ticker", axis=1)
481
+ .reset_index(drop=True)
482
+ )
483
+
484
+ # just use a single ticker
485
+ expected = f(asof)
486
+ trades = f(trades)
487
+ quotes = f(quotes)
488
+
489
+ result = merge_asof(trades, quotes, on="time")
490
+ tm.assert_frame_equal(result, expected)
491
+
492
+ def test_valid_join_keys(self, trades, quotes):
493
+ msg = r"incompatible merge keys \[1\] .* must be the same type"
494
+
495
+ with pytest.raises(MergeError, match=msg):
496
+ merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
497
+
498
+ with pytest.raises(MergeError, match="can only asof on a key for left"):
499
+ merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
500
+
501
+ with pytest.raises(MergeError, match="can only asof on a key for left"):
502
+ merge_asof(trades, quotes, by="ticker")
503
+
504
+ def test_with_duplicates(self, datapath, trades, quotes):
505
+ q = (
506
+ pd.concat([quotes, quotes])
507
+ .sort_values(["time", "ticker"])
508
+ .reset_index(drop=True)
509
+ )
510
+ result = merge_asof(trades, q, on="time", by="ticker")
511
+ expected = self.read_data(datapath, "asof.csv")
512
+ tm.assert_frame_equal(result, expected)
513
+
514
+ def test_with_duplicates_no_on(self):
515
+ df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
516
+ df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
517
+ result = merge_asof(df1, df2, on="key")
518
+ expected = pd.DataFrame(
519
+ {"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
520
+ )
521
+ tm.assert_frame_equal(result, expected)
522
+
523
+ def test_valid_allow_exact_matches(self, trades, quotes):
524
+ msg = "allow_exact_matches must be boolean, passed foo"
525
+
526
+ with pytest.raises(MergeError, match=msg):
527
+ merge_asof(
528
+ trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
529
+ )
530
+
531
+ def test_valid_tolerance(self, trades, quotes):
532
+ # dti
533
+ merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
534
+
535
+ # integer
536
+ merge_asof(
537
+ trades.reset_index(),
538
+ quotes.reset_index(),
539
+ on="index",
540
+ by="ticker",
541
+ tolerance=1,
542
+ )
543
+
544
+ msg = r"incompatible tolerance .*, must be compat with type .*"
545
+
546
+ # incompat
547
+ with pytest.raises(MergeError, match=msg):
548
+ merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
549
+
550
+ # invalid
551
+ with pytest.raises(MergeError, match=msg):
552
+ merge_asof(
553
+ trades.reset_index(),
554
+ quotes.reset_index(),
555
+ on="index",
556
+ by="ticker",
557
+ tolerance=1.0,
558
+ )
559
+
560
+ msg = "tolerance must be positive"
561
+
562
+ # invalid negative
563
+ with pytest.raises(MergeError, match=msg):
564
+ merge_asof(
565
+ trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
566
+ )
567
+
568
+ with pytest.raises(MergeError, match=msg):
569
+ merge_asof(
570
+ trades.reset_index(),
571
+ quotes.reset_index(),
572
+ on="index",
573
+ by="ticker",
574
+ tolerance=-1,
575
+ )
576
+
577
+ def test_non_sorted(self, trades, quotes):
578
+ trades = trades.sort_values("time", ascending=False)
579
+ quotes = quotes.sort_values("time", ascending=False)
580
+
581
+ # we require that we are already sorted on time & quotes
582
+ assert not trades.time.is_monotonic_increasing
583
+ assert not quotes.time.is_monotonic_increasing
584
+ with pytest.raises(ValueError, match="left keys must be sorted"):
585
+ merge_asof(trades, quotes, on="time", by="ticker")
586
+
587
+ trades = trades.sort_values("time")
588
+ assert trades.time.is_monotonic_increasing
589
+ assert not quotes.time.is_monotonic_increasing
590
+ with pytest.raises(ValueError, match="right keys must be sorted"):
591
+ merge_asof(trades, quotes, on="time", by="ticker")
592
+
593
+ quotes = quotes.sort_values("time")
594
+ assert trades.time.is_monotonic_increasing
595
+ assert quotes.time.is_monotonic_increasing
596
+
597
+ # ok, though has dupes
598
+ merge_asof(trades, quotes, on="time", by="ticker")
599
+
600
+ @pytest.mark.parametrize(
601
+ "tolerance_ts",
602
+ [Timedelta("1day"), datetime.timedelta(days=1)],
603
+ ids=["Timedelta", "datetime.timedelta"],
604
+ )
605
+ def test_tolerance(self, tolerance_ts, trades, quotes, tolerance):
606
+ result = merge_asof(
607
+ trades, quotes, on="time", by="ticker", tolerance=tolerance_ts
608
+ )
609
+ expected = tolerance
610
+ tm.assert_frame_equal(result, expected)
611
+
612
+ def test_tolerance_forward(self):
613
+ # GH14887
614
+
615
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
616
+ right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
617
+
618
+ expected = pd.DataFrame(
619
+ {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
620
+ )
621
+
622
+ result = merge_asof(left, right, on="a", direction="forward", tolerance=1)
623
+ tm.assert_frame_equal(result, expected)
624
+
625
+ def test_tolerance_nearest(self):
626
+ # GH14887
627
+
628
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
629
+ right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
630
+
631
+ expected = pd.DataFrame(
632
+ {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
633
+ )
634
+
635
+ result = merge_asof(left, right, on="a", direction="nearest", tolerance=1)
636
+ tm.assert_frame_equal(result, expected)
637
+
638
+ def test_tolerance_tz(self, unit):
639
+ # GH 14844
640
+ left = pd.DataFrame(
641
+ {
642
+ "date": pd.date_range(
643
+ start=to_datetime("2016-01-02"),
644
+ freq="D",
645
+ periods=5,
646
+ tz=pytz.timezone("UTC"),
647
+ unit=unit,
648
+ ),
649
+ "value1": np.arange(5),
650
+ }
651
+ )
652
+ right = pd.DataFrame(
653
+ {
654
+ "date": pd.date_range(
655
+ start=to_datetime("2016-01-01"),
656
+ freq="D",
657
+ periods=5,
658
+ tz=pytz.timezone("UTC"),
659
+ unit=unit,
660
+ ),
661
+ "value2": list("ABCDE"),
662
+ }
663
+ )
664
+ result = merge_asof(left, right, on="date", tolerance=Timedelta("1 day"))
665
+
666
+ expected = pd.DataFrame(
667
+ {
668
+ "date": pd.date_range(
669
+ start=to_datetime("2016-01-02"),
670
+ freq="D",
671
+ periods=5,
672
+ tz=pytz.timezone("UTC"),
673
+ unit=unit,
674
+ ),
675
+ "value1": np.arange(5),
676
+ "value2": list("BCDEE"),
677
+ }
678
+ )
679
+ tm.assert_frame_equal(result, expected)
680
+
681
+ def test_tolerance_float(self):
682
+ # GH22981
683
+ left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
684
+ right = pd.DataFrame(
685
+ {"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
686
+ )
687
+
688
+ expected = pd.DataFrame(
689
+ {
690
+ "a": [1.1, 3.5, 10.9],
691
+ "left_val": ["a", "b", "c"],
692
+ "right_val": [1, 3.3, np.nan],
693
+ }
694
+ )
695
+
696
+ result = merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
697
+ tm.assert_frame_equal(result, expected)
698
+
699
+ def test_index_tolerance(self, trades, quotes, tolerance):
700
+ # GH 15135
701
+ expected = tolerance.set_index("time")
702
+ trades = trades.set_index("time")
703
+ quotes = quotes.set_index("time")
704
+
705
+ result = merge_asof(
706
+ trades,
707
+ quotes,
708
+ left_index=True,
709
+ right_index=True,
710
+ by="ticker",
711
+ tolerance=Timedelta("1day"),
712
+ )
713
+ tm.assert_frame_equal(result, expected)
714
+
715
+ def test_allow_exact_matches(self, trades, quotes, allow_exact_matches):
716
+ result = merge_asof(
717
+ trades, quotes, on="time", by="ticker", allow_exact_matches=False
718
+ )
719
+ expected = allow_exact_matches
720
+ tm.assert_frame_equal(result, expected)
721
+
722
+ def test_allow_exact_matches_forward(self):
723
+ # GH14887
724
+
725
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
726
+ right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
727
+
728
+ expected = pd.DataFrame(
729
+ {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
730
+ )
731
+
732
+ result = merge_asof(
733
+ left, right, on="a", direction="forward", allow_exact_matches=False
734
+ )
735
+ tm.assert_frame_equal(result, expected)
736
+
737
+ def test_allow_exact_matches_nearest(self):
738
+ # GH14887
739
+
740
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
741
+ right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
742
+
743
+ expected = pd.DataFrame(
744
+ {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
745
+ )
746
+
747
+ result = merge_asof(
748
+ left, right, on="a", direction="nearest", allow_exact_matches=False
749
+ )
750
+ tm.assert_frame_equal(result, expected)
751
+
752
+ def test_allow_exact_matches_and_tolerance(
753
+ self, trades, quotes, allow_exact_matches_and_tolerance
754
+ ):
755
+ result = merge_asof(
756
+ trades,
757
+ quotes,
758
+ on="time",
759
+ by="ticker",
760
+ tolerance=Timedelta("100ms"),
761
+ allow_exact_matches=False,
762
+ )
763
+ expected = allow_exact_matches_and_tolerance
764
+ tm.assert_frame_equal(result, expected)
765
+
766
+ def test_allow_exact_matches_and_tolerance2(self):
767
+ # GH 13695
768
+ df1 = pd.DataFrame(
769
+ {"time": to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
770
+ )
771
+ df2 = pd.DataFrame(
772
+ {
773
+ "time": to_datetime(
774
+ ["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
775
+ ),
776
+ "version": [1, 2],
777
+ }
778
+ )
779
+
780
+ result = merge_asof(df1, df2, on="time")
781
+ expected = pd.DataFrame(
782
+ {
783
+ "time": to_datetime(["2016-07-15 13:30:00.030"]),
784
+ "username": ["bob"],
785
+ "version": [2],
786
+ }
787
+ )
788
+ tm.assert_frame_equal(result, expected)
789
+
790
+ result = merge_asof(df1, df2, on="time", allow_exact_matches=False)
791
+ expected = pd.DataFrame(
792
+ {
793
+ "time": to_datetime(["2016-07-15 13:30:00.030"]),
794
+ "username": ["bob"],
795
+ "version": [1],
796
+ }
797
+ )
798
+ tm.assert_frame_equal(result, expected)
799
+
800
+ result = merge_asof(
801
+ df1,
802
+ df2,
803
+ on="time",
804
+ allow_exact_matches=False,
805
+ tolerance=Timedelta("10ms"),
806
+ )
807
+ expected = pd.DataFrame(
808
+ {
809
+ "time": to_datetime(["2016-07-15 13:30:00.030"]),
810
+ "username": ["bob"],
811
+ "version": [np.nan],
812
+ }
813
+ )
814
+ tm.assert_frame_equal(result, expected)
815
+
816
+ def test_allow_exact_matches_and_tolerance3(self):
817
+ # GH 13709
818
+ df1 = pd.DataFrame(
819
+ {
820
+ "time": to_datetime(
821
+ ["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
822
+ ),
823
+ "username": ["bob", "charlie"],
824
+ }
825
+ )
826
+ df2 = pd.DataFrame(
827
+ {
828
+ "time": to_datetime(
829
+ ["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
830
+ ),
831
+ "version": [1, 2],
832
+ }
833
+ )
834
+
835
+ result = merge_asof(
836
+ df1,
837
+ df2,
838
+ on="time",
839
+ allow_exact_matches=False,
840
+ tolerance=Timedelta("10ms"),
841
+ )
842
+ expected = pd.DataFrame(
843
+ {
844
+ "time": to_datetime(
845
+ ["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
846
+ ),
847
+ "username": ["bob", "charlie"],
848
+ "version": [np.nan, np.nan],
849
+ }
850
+ )
851
+ tm.assert_frame_equal(result, expected)
852
+
853
+ def test_allow_exact_matches_and_tolerance_forward(self):
854
+ # GH14887
855
+
856
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
857
+ right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
858
+
859
+ expected = pd.DataFrame(
860
+ {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
861
+ )
862
+
863
+ result = merge_asof(
864
+ left,
865
+ right,
866
+ on="a",
867
+ direction="forward",
868
+ allow_exact_matches=False,
869
+ tolerance=1,
870
+ )
871
+ tm.assert_frame_equal(result, expected)
872
+
873
+ def test_allow_exact_matches_and_tolerance_nearest(self):
874
+ # GH14887
875
+
876
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
877
+ right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
878
+
879
+ expected = pd.DataFrame(
880
+ {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
881
+ )
882
+
883
+ result = merge_asof(
884
+ left,
885
+ right,
886
+ on="a",
887
+ direction="nearest",
888
+ allow_exact_matches=False,
889
+ tolerance=1,
890
+ )
891
+ tm.assert_frame_equal(result, expected)
892
+
893
+ def test_forward_by(self):
894
+ # GH14887
895
+
896
+ left = pd.DataFrame(
897
+ {
898
+ "a": [1, 5, 10, 12, 15],
899
+ "b": ["X", "X", "Y", "Z", "Y"],
900
+ "left_val": ["a", "b", "c", "d", "e"],
901
+ }
902
+ )
903
+ right = pd.DataFrame(
904
+ {
905
+ "a": [1, 6, 11, 15, 16],
906
+ "b": ["X", "Z", "Y", "Z", "Y"],
907
+ "right_val": [1, 6, 11, 15, 16],
908
+ }
909
+ )
910
+
911
+ expected = pd.DataFrame(
912
+ {
913
+ "a": [1, 5, 10, 12, 15],
914
+ "b": ["X", "X", "Y", "Z", "Y"],
915
+ "left_val": ["a", "b", "c", "d", "e"],
916
+ "right_val": [1, np.nan, 11, 15, 16],
917
+ }
918
+ )
919
+
920
+ result = merge_asof(left, right, on="a", by="b", direction="forward")
921
+ tm.assert_frame_equal(result, expected)
922
+
923
+ def test_nearest_by(self):
924
+ # GH14887
925
+
926
+ left = pd.DataFrame(
927
+ {
928
+ "a": [1, 5, 10, 12, 15],
929
+ "b": ["X", "X", "Z", "Z", "Y"],
930
+ "left_val": ["a", "b", "c", "d", "e"],
931
+ }
932
+ )
933
+ right = pd.DataFrame(
934
+ {
935
+ "a": [1, 6, 11, 15, 16],
936
+ "b": ["X", "Z", "Z", "Z", "Y"],
937
+ "right_val": [1, 6, 11, 15, 16],
938
+ }
939
+ )
940
+
941
+ expected = pd.DataFrame(
942
+ {
943
+ "a": [1, 5, 10, 12, 15],
944
+ "b": ["X", "X", "Z", "Z", "Y"],
945
+ "left_val": ["a", "b", "c", "d", "e"],
946
+ "right_val": [1, 1, 11, 11, 16],
947
+ }
948
+ )
949
+
950
+ result = merge_asof(left, right, on="a", by="b", direction="nearest")
951
+ tm.assert_frame_equal(result, expected)
952
+
953
+ def test_by_int(self):
954
+ # we specialize by type, so test that this is correct
955
+ df1 = pd.DataFrame(
956
+ {
957
+ "time": to_datetime(
958
+ [
959
+ "20160525 13:30:00.020",
960
+ "20160525 13:30:00.030",
961
+ "20160525 13:30:00.040",
962
+ "20160525 13:30:00.050",
963
+ "20160525 13:30:00.060",
964
+ ]
965
+ ),
966
+ "key": [1, 2, 1, 3, 2],
967
+ "value1": [1.1, 1.2, 1.3, 1.4, 1.5],
968
+ },
969
+ columns=["time", "key", "value1"],
970
+ )
971
+
972
+ df2 = pd.DataFrame(
973
+ {
974
+ "time": to_datetime(
975
+ [
976
+ "20160525 13:30:00.015",
977
+ "20160525 13:30:00.020",
978
+ "20160525 13:30:00.025",
979
+ "20160525 13:30:00.035",
980
+ "20160525 13:30:00.040",
981
+ "20160525 13:30:00.055",
982
+ "20160525 13:30:00.060",
983
+ "20160525 13:30:00.065",
984
+ ]
985
+ ),
986
+ "key": [2, 1, 1, 3, 2, 1, 2, 3],
987
+ "value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
988
+ },
989
+ columns=["time", "key", "value2"],
990
+ )
991
+
992
+ result = merge_asof(df1, df2, on="time", by="key")
993
+
994
+ expected = pd.DataFrame(
995
+ {
996
+ "time": to_datetime(
997
+ [
998
+ "20160525 13:30:00.020",
999
+ "20160525 13:30:00.030",
1000
+ "20160525 13:30:00.040",
1001
+ "20160525 13:30:00.050",
1002
+ "20160525 13:30:00.060",
1003
+ ]
1004
+ ),
1005
+ "key": [1, 2, 1, 3, 2],
1006
+ "value1": [1.1, 1.2, 1.3, 1.4, 1.5],
1007
+ "value2": [2.2, 2.1, 2.3, 2.4, 2.7],
1008
+ },
1009
+ columns=["time", "key", "value1", "value2"],
1010
+ )
1011
+
1012
+ tm.assert_frame_equal(result, expected)
1013
+
1014
+ def test_on_float(self):
1015
+ # mimics how to determine the minimum-price variation
1016
+ df1 = pd.DataFrame(
1017
+ {
1018
+ "price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
1019
+ "symbol": list("ABCDEFG"),
1020
+ },
1021
+ columns=["symbol", "price"],
1022
+ )
1023
+
1024
+ df2 = pd.DataFrame(
1025
+ {"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
1026
+ columns=["price", "mpv"],
1027
+ )
1028
+
1029
+ df1 = df1.sort_values("price").reset_index(drop=True)
1030
+
1031
+ result = merge_asof(df1, df2, on="price")
1032
+
1033
+ expected = pd.DataFrame(
1034
+ {
1035
+ "symbol": list("BGACEDF"),
1036
+ "price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
1037
+ "mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
1038
+ },
1039
+ columns=["symbol", "price", "mpv"],
1040
+ )
1041
+
1042
+ tm.assert_frame_equal(result, expected)
1043
+
1044
+ def test_on_specialized_type(self, any_real_numpy_dtype):
1045
+ # see gh-13936
1046
+ dtype = np.dtype(any_real_numpy_dtype).type
1047
+
1048
+ df1 = pd.DataFrame(
1049
+ {"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
1050
+ columns=["symbol", "value"],
1051
+ )
1052
+ df1.value = dtype(df1.value)
1053
+
1054
+ df2 = pd.DataFrame(
1055
+ {"value": [0, 80, 120, 125], "result": list("xyzw")},
1056
+ columns=["value", "result"],
1057
+ )
1058
+ df2.value = dtype(df2.value)
1059
+
1060
+ df1 = df1.sort_values("value").reset_index(drop=True)
1061
+ result = merge_asof(df1, df2, on="value")
1062
+
1063
+ expected = pd.DataFrame(
1064
+ {
1065
+ "symbol": list("BACEGDF"),
1066
+ "value": [2, 5, 25, 78, 79, 100, 120],
1067
+ "result": list("xxxxxyz"),
1068
+ },
1069
+ columns=["symbol", "value", "result"],
1070
+ )
1071
+ expected.value = dtype(expected.value)
1072
+
1073
+ tm.assert_frame_equal(result, expected)
1074
+
1075
+ def test_on_specialized_type_by_int(self, any_real_numpy_dtype):
1076
+ # see gh-13936
1077
+ dtype = np.dtype(any_real_numpy_dtype).type
1078
+
1079
+ df1 = pd.DataFrame(
1080
+ {
1081
+ "value": [5, 2, 25, 100, 78, 120, 79],
1082
+ "key": [1, 2, 3, 2, 3, 1, 2],
1083
+ "symbol": list("ABCDEFG"),
1084
+ },
1085
+ columns=["symbol", "key", "value"],
1086
+ )
1087
+ df1.value = dtype(df1.value)
1088
+
1089
+ df2 = pd.DataFrame(
1090
+ {"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
1091
+ columns=["value", "key", "result"],
1092
+ )
1093
+ df2.value = dtype(df2.value)
1094
+
1095
+ df1 = df1.sort_values("value").reset_index(drop=True)
1096
+ result = merge_asof(df1, df2, on="value", by="key")
1097
+
1098
+ expected = pd.DataFrame(
1099
+ {
1100
+ "symbol": list("BACEGDF"),
1101
+ "key": [2, 1, 3, 3, 2, 2, 1],
1102
+ "value": [2, 5, 25, 78, 79, 100, 120],
1103
+ "result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
1104
+ },
1105
+ columns=["symbol", "key", "value", "result"],
1106
+ )
1107
+ expected.value = dtype(expected.value)
1108
+
1109
+ tm.assert_frame_equal(result, expected)
1110
+
1111
+ def test_on_float_by_int(self):
1112
+ # type specialize both "by" and "on" parameters
1113
+ df1 = pd.DataFrame(
1114
+ {
1115
+ "symbol": list("AAABBBCCC"),
1116
+ "exch": [1, 2, 3, 1, 2, 3, 1, 2, 3],
1117
+ "price": [
1118
+ 3.26,
1119
+ 3.2599,
1120
+ 3.2598,
1121
+ 12.58,
1122
+ 12.59,
1123
+ 12.5,
1124
+ 378.15,
1125
+ 378.2,
1126
+ 378.25,
1127
+ ],
1128
+ },
1129
+ columns=["symbol", "exch", "price"],
1130
+ )
1131
+
1132
+ df2 = pd.DataFrame(
1133
+ {
1134
+ "exch": [1, 1, 1, 2, 2, 2, 3, 3, 3],
1135
+ "price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
1136
+ "mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0],
1137
+ },
1138
+ columns=["exch", "price", "mpv"],
1139
+ )
1140
+
1141
+ df1 = df1.sort_values("price").reset_index(drop=True)
1142
+ df2 = df2.sort_values("price").reset_index(drop=True)
1143
+
1144
+ result = merge_asof(df1, df2, on="price", by="exch")
1145
+
1146
+ expected = pd.DataFrame(
1147
+ {
1148
+ "symbol": list("AAABBBCCC"),
1149
+ "exch": [3, 2, 1, 3, 1, 2, 1, 2, 3],
1150
+ "price": [
1151
+ 3.2598,
1152
+ 3.2599,
1153
+ 3.26,
1154
+ 12.5,
1155
+ 12.58,
1156
+ 12.59,
1157
+ 378.15,
1158
+ 378.2,
1159
+ 378.25,
1160
+ ],
1161
+ "mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25],
1162
+ },
1163
+ columns=["symbol", "exch", "price", "mpv"],
1164
+ )
1165
+
1166
+ tm.assert_frame_equal(result, expected)
1167
+
1168
+ def test_merge_datatype_error_raises(self):
1169
+ msg = r"Incompatible merge dtype, .*, both sides must have numeric dtype"
1170
+
1171
+ left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]})
1172
+ right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]})
1173
+
1174
+ with pytest.raises(MergeError, match=msg):
1175
+ merge_asof(left, right, on="a")
1176
+
1177
+ def test_merge_datatype_categorical_error_raises(self):
1178
+ msg = (
1179
+ r"incompatible merge keys \[0\] .* both sides category, "
1180
+ "but not equal ones"
1181
+ )
1182
+
1183
+ left = pd.DataFrame(
1184
+ {"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])}
1185
+ )
1186
+ right = pd.DataFrame(
1187
+ {
1188
+ "right_val": [1, 2, 3, 6, 7],
1189
+ "a": pd.Categorical(["a", "X", "c", "X", "b"]),
1190
+ }
1191
+ )
1192
+
1193
+ with pytest.raises(MergeError, match=msg):
1194
+ merge_asof(left, right, on="a")
1195
+
1196
+ def test_merge_groupby_multiple_column_with_categorical_column(self):
1197
+ # GH 16454
1198
+ df = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
1199
+ result = merge_asof(df, df, on="x", by=["y", "z"])
1200
+ expected = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
1201
+ tm.assert_frame_equal(result, expected)
1202
+
1203
+ @pytest.mark.parametrize(
1204
+ "func", [lambda x: x, lambda x: to_datetime(x)], ids=["numeric", "datetime"]
1205
+ )
1206
+ @pytest.mark.parametrize("side", ["left", "right"])
1207
+ def test_merge_on_nans(self, func, side):
1208
+ # GH 23189
1209
+ msg = f"Merge keys contain null values on {side} side"
1210
+ nulls = func([1.0, 5.0, np.nan])
1211
+ non_nulls = func([1.0, 5.0, 10.0])
1212
+ df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]})
1213
+ df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]})
1214
+
1215
+ with pytest.raises(ValueError, match=msg):
1216
+ if side == "left":
1217
+ merge_asof(df_null, df, on="a")
1218
+ else:
1219
+ merge_asof(df, df_null, on="a")
1220
+
1221
+ def test_by_nullable(self, any_numeric_ea_dtype):
1222
+ # Note: this test passes if instead of using pd.array we use
1223
+ # np.array([np.nan, 1]). Other than that, I (@jbrockmendel)
1224
+ # have NO IDEA what the expected behavior is.
1225
+ # TODO(GH#32306): may be relevant to the expected behavior here.
1226
+
1227
+ arr = pd.array([pd.NA, 0, 1], dtype=any_numeric_ea_dtype)
1228
+ if arr.dtype.kind in ["i", "u"]:
1229
+ max_val = np.iinfo(arr.dtype.numpy_dtype).max
1230
+ else:
1231
+ max_val = np.finfo(arr.dtype.numpy_dtype).max
1232
+ # set value s.t. (at least for integer dtypes) arr._values_for_argsort
1233
+ # is not an injection
1234
+ arr[2] = max_val
1235
+
1236
+ left = pd.DataFrame(
1237
+ {
1238
+ "by_col1": arr,
1239
+ "by_col2": ["HELLO", "To", "You"],
1240
+ "on_col": [2, 4, 6],
1241
+ "value": ["a", "c", "e"],
1242
+ }
1243
+ )
1244
+ right = pd.DataFrame(
1245
+ {
1246
+ "by_col1": arr,
1247
+ "by_col2": ["WORLD", "Wide", "Web"],
1248
+ "on_col": [1, 2, 6],
1249
+ "value": ["b", "d", "f"],
1250
+ }
1251
+ )
1252
+
1253
+ result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
1254
+ expected = pd.DataFrame(
1255
+ {
1256
+ "by_col1": arr,
1257
+ "by_col2": ["HELLO", "To", "You"],
1258
+ "on_col": [2, 4, 6],
1259
+ "value_x": ["a", "c", "e"],
1260
+ }
1261
+ )
1262
+ expected["value_y"] = np.array([np.nan, np.nan, np.nan], dtype=object)
1263
+ tm.assert_frame_equal(result, expected)
1264
+
1265
+ def test_merge_by_col_tz_aware(self):
1266
+ # GH 21184
1267
+ left = pd.DataFrame(
1268
+ {
1269
+ "by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
1270
+ "on_col": [2],
1271
+ "values": ["a"],
1272
+ }
1273
+ )
1274
+ right = pd.DataFrame(
1275
+ {
1276
+ "by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
1277
+ "on_col": [1],
1278
+ "values": ["b"],
1279
+ }
1280
+ )
1281
+ result = merge_asof(left, right, by="by_col", on="on_col")
1282
+ expected = pd.DataFrame(
1283
+ [[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]],
1284
+ columns=["by_col", "on_col", "values_x", "values_y"],
1285
+ )
1286
+ tm.assert_frame_equal(result, expected)
1287
+
1288
+ def test_by_mixed_tz_aware(self):
1289
+ # GH 26649
1290
+ left = pd.DataFrame(
1291
+ {
1292
+ "by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
1293
+ "by_col2": ["HELLO"],
1294
+ "on_col": [2],
1295
+ "value": ["a"],
1296
+ }
1297
+ )
1298
+ right = pd.DataFrame(
1299
+ {
1300
+ "by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
1301
+ "by_col2": ["WORLD"],
1302
+ "on_col": [1],
1303
+ "value": ["b"],
1304
+ }
1305
+ )
1306
+ result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
1307
+ expected = pd.DataFrame(
1308
+ [[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]],
1309
+ columns=["by_col1", "by_col2", "on_col", "value_x"],
1310
+ )
1311
+ expected["value_y"] = np.array([np.nan], dtype=object)
1312
+ tm.assert_frame_equal(result, expected)
1313
+
1314
+ def test_timedelta_tolerance_nearest(self, unit):
1315
+ # GH 27642
1316
+ if unit == "s":
1317
+ pytest.skip(
1318
+ "This test is invalid with unit='s' because that would "
1319
+ "round left['time']"
1320
+ )
1321
+
1322
+ left = pd.DataFrame(
1323
+ list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])),
1324
+ columns=["time", "left"],
1325
+ )
1326
+
1327
+ left["time"] = pd.to_timedelta(left["time"], "ms").astype(f"m8[{unit}]")
1328
+
1329
+ right = pd.DataFrame(
1330
+ list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])),
1331
+ columns=["time", "right"],
1332
+ )
1333
+
1334
+ right["time"] = pd.to_timedelta(right["time"], "ms").astype(f"m8[{unit}]")
1335
+
1336
+ expected = pd.DataFrame(
1337
+ list(
1338
+ zip(
1339
+ [0, 5, 10, 15, 20, 25],
1340
+ [0, 1, 2, 3, 4, 5],
1341
+ [0, np.nan, 2, 4, np.nan, np.nan],
1342
+ )
1343
+ ),
1344
+ columns=["time", "left", "right"],
1345
+ )
1346
+
1347
+ expected["time"] = pd.to_timedelta(expected["time"], "ms").astype(f"m8[{unit}]")
1348
+
1349
+ result = merge_asof(
1350
+ left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest"
1351
+ )
1352
+
1353
+ tm.assert_frame_equal(result, expected)
1354
+
1355
+ # TODO: any_int_dtype; causes failures in _get_join_indexers
1356
+ def test_int_type_tolerance(self, any_int_numpy_dtype):
1357
+ # GH #28870
1358
+
1359
+ left = pd.DataFrame({"a": [0, 10, 20], "left_val": [1, 2, 3]})
1360
+ right = pd.DataFrame({"a": [5, 15, 25], "right_val": [1, 2, 3]})
1361
+ left["a"] = left["a"].astype(any_int_numpy_dtype)
1362
+ right["a"] = right["a"].astype(any_int_numpy_dtype)
1363
+
1364
+ expected = pd.DataFrame(
1365
+ {"a": [0, 10, 20], "left_val": [1, 2, 3], "right_val": [np.nan, 1.0, 2.0]}
1366
+ )
1367
+ expected["a"] = expected["a"].astype(any_int_numpy_dtype)
1368
+
1369
+ result = merge_asof(left, right, on="a", tolerance=10)
1370
+ tm.assert_frame_equal(result, expected)
1371
+
1372
+ def test_merge_index_column_tz(self):
1373
+ # GH 29864
1374
+ index = pd.date_range("2019-10-01", freq="30min", periods=5, tz="UTC")
1375
+ left = pd.DataFrame([0.9, 0.8, 0.7, 0.6], columns=["xyz"], index=index[1:])
1376
+ right = pd.DataFrame({"from_date": index, "abc": [2.46] * 4 + [2.19]})
1377
+ result = merge_asof(
1378
+ left=left, right=right, left_index=True, right_on=["from_date"]
1379
+ )
1380
+ expected = pd.DataFrame(
1381
+ {
1382
+ "xyz": [0.9, 0.8, 0.7, 0.6],
1383
+ "from_date": index[1:],
1384
+ "abc": [2.46] * 3 + [2.19],
1385
+ },
1386
+ index=pd.date_range(
1387
+ "2019-10-01 00:30:00", freq="30min", periods=4, tz="UTC"
1388
+ ),
1389
+ )
1390
+ tm.assert_frame_equal(result, expected)
1391
+
1392
+ result = merge_asof(
1393
+ left=right, right=left, right_index=True, left_on=["from_date"]
1394
+ )
1395
+ expected = pd.DataFrame(
1396
+ {
1397
+ "from_date": index,
1398
+ "abc": [2.46] * 4 + [2.19],
1399
+ "xyz": [np.nan, 0.9, 0.8, 0.7, 0.6],
1400
+ },
1401
+ index=Index([0, 1, 2, 3, 4]),
1402
+ )
1403
+ tm.assert_frame_equal(result, expected)
1404
+
1405
+ def test_left_index_right_index_tolerance(self, unit):
1406
+ # https://github.com/pandas-dev/pandas/issues/35558
1407
+ if unit == "s":
1408
+ pytest.skip(
1409
+ "This test is invalid with unit='s' because that would round dr1"
1410
+ )
1411
+
1412
+ dr1 = pd.date_range(
1413
+ start="1/1/2020", end="1/20/2020", freq="2D", unit=unit
1414
+ ) + Timedelta(seconds=0.4).as_unit(unit)
1415
+ dr2 = pd.date_range(start="1/1/2020", end="2/1/2020", unit=unit)
1416
+
1417
+ df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1))
1418
+ df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2))
1419
+
1420
+ expected = pd.DataFrame(
1421
+ {"val1": "foo", "val2": "bar"}, index=pd.DatetimeIndex(dr1)
1422
+ )
1423
+ result = merge_asof(
1424
+ df1,
1425
+ df2,
1426
+ left_index=True,
1427
+ right_index=True,
1428
+ tolerance=Timedelta(seconds=0.5),
1429
+ )
1430
+ tm.assert_frame_equal(result, expected)
1431
+
1432
+
1433
+ @pytest.mark.parametrize(
1434
+ "kwargs", [{"on": "x"}, {"left_index": True, "right_index": True}]
1435
+ )
1436
+ @pytest.mark.parametrize(
1437
+ "data",
1438
+ [["2019-06-01 00:09:12", "2019-06-01 00:10:29"], [1.0, "2019-06-01 00:10:29"]],
1439
+ )
1440
+ def test_merge_asof_non_numerical_dtype(kwargs, data):
1441
+ # GH#29130
1442
+ left = pd.DataFrame({"x": data}, index=data)
1443
+ right = pd.DataFrame({"x": data}, index=data)
1444
+ with pytest.raises(
1445
+ MergeError,
1446
+ match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
1447
+ ):
1448
+ merge_asof(left, right, **kwargs)
1449
+
1450
+
1451
+ def test_merge_asof_non_numerical_dtype_object():
1452
+ # GH#29130
1453
+ left = pd.DataFrame({"a": ["12", "13", "15"], "left_val1": ["a", "b", "c"]})
1454
+ right = pd.DataFrame({"a": ["a", "b", "c"], "left_val": ["d", "e", "f"]})
1455
+ with pytest.raises(
1456
+ MergeError,
1457
+ match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
1458
+ ):
1459
+ merge_asof(
1460
+ left,
1461
+ right,
1462
+ left_on="left_val1",
1463
+ right_on="a",
1464
+ left_by="a",
1465
+ right_by="left_val",
1466
+ )
1467
+
1468
+
1469
+ @pytest.mark.parametrize(
1470
+ "kwargs",
1471
+ [
1472
+ {"right_index": True, "left_index": True},
1473
+ {"left_on": "left_time", "right_index": True},
1474
+ {"left_index": True, "right_on": "right"},
1475
+ ],
1476
+ )
1477
+ def test_merge_asof_index_behavior(kwargs):
1478
+ # GH 33463
1479
+ index = Index([1, 5, 10], name="test")
1480
+ left = pd.DataFrame({"left": ["a", "b", "c"], "left_time": [1, 4, 10]}, index=index)
1481
+ right = pd.DataFrame({"right": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
1482
+ result = merge_asof(left, right, **kwargs)
1483
+
1484
+ expected = pd.DataFrame(
1485
+ {"left": ["a", "b", "c"], "left_time": [1, 4, 10], "right": [1, 3, 7]},
1486
+ index=index,
1487
+ )
1488
+ tm.assert_frame_equal(result, expected)
1489
+
1490
+
1491
+ def test_merge_asof_numeri_column_in_index():
1492
+ # GH#34488
1493
+ left = pd.DataFrame({"b": [10, 11, 12]}, index=Index([1, 2, 3], name="a"))
1494
+ right = pd.DataFrame({"c": [20, 21, 22]}, index=Index([0, 2, 3], name="a"))
1495
+
1496
+ result = merge_asof(left, right, left_on="a", right_on="a")
1497
+ expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]})
1498
+ tm.assert_frame_equal(result, expected)
1499
+
1500
+
1501
+ def test_merge_asof_numeri_column_in_multiindex():
1502
+ # GH#34488
1503
+ left = pd.DataFrame(
1504
+ {"b": [10, 11, 12]},
1505
+ index=pd.MultiIndex.from_arrays([[1, 2, 3], ["a", "b", "c"]], names=["a", "z"]),
1506
+ )
1507
+ right = pd.DataFrame(
1508
+ {"c": [20, 21, 22]},
1509
+ index=pd.MultiIndex.from_arrays([[1, 2, 3], ["x", "y", "z"]], names=["a", "y"]),
1510
+ )
1511
+
1512
+ result = merge_asof(left, right, left_on="a", right_on="a")
1513
+ expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]})
1514
+ tm.assert_frame_equal(result, expected)
1515
+
1516
+
1517
+ def test_merge_asof_numeri_column_in_index_object_dtype():
1518
+ # GH#34488
1519
+ left = pd.DataFrame({"b": [10, 11, 12]}, index=Index(["1", "2", "3"], name="a"))
1520
+ right = pd.DataFrame({"c": [20, 21, 22]}, index=Index(["m", "n", "o"], name="a"))
1521
+
1522
+ with pytest.raises(
1523
+ MergeError,
1524
+ match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
1525
+ ):
1526
+ merge_asof(left, right, left_on="a", right_on="a")
1527
+
1528
+ left = left.reset_index().set_index(["a", "b"])
1529
+ right = right.reset_index().set_index(["a", "c"])
1530
+
1531
+ with pytest.raises(
1532
+ MergeError,
1533
+ match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
1534
+ ):
1535
+ merge_asof(left, right, left_on="a", right_on="a")
1536
+
1537
+
1538
+ def test_merge_asof_array_as_on():
1539
+ # GH#42844
1540
+ right = pd.DataFrame(
1541
+ {
1542
+ "a": [2, 6],
1543
+ "ts": [pd.Timestamp("2021/01/01 00:37"), pd.Timestamp("2021/01/01 01:40")],
1544
+ }
1545
+ )
1546
+ ts_merge = pd.date_range(
1547
+ start=pd.Timestamp("2021/01/01 00:00"), periods=3, freq="1h"
1548
+ )
1549
+ left = pd.DataFrame({"b": [4, 8, 7]})
1550
+ result = merge_asof(
1551
+ left,
1552
+ right,
1553
+ left_on=ts_merge,
1554
+ right_on="ts",
1555
+ allow_exact_matches=False,
1556
+ direction="backward",
1557
+ )
1558
+ expected = pd.DataFrame({"b": [4, 8, 7], "a": [np.nan, 2, 6], "ts": ts_merge})
1559
+ tm.assert_frame_equal(result, expected)
1560
+
1561
+ result = merge_asof(
1562
+ right,
1563
+ left,
1564
+ left_on="ts",
1565
+ right_on=ts_merge,
1566
+ allow_exact_matches=False,
1567
+ direction="backward",
1568
+ )
1569
+ expected = pd.DataFrame(
1570
+ {
1571
+ "a": [2, 6],
1572
+ "ts": [pd.Timestamp("2021/01/01 00:37"), pd.Timestamp("2021/01/01 01:40")],
1573
+ "b": [4, 8],
1574
+ }
1575
+ )
1576
+ tm.assert_frame_equal(result, expected)
1577
+
1578
+
1579
+ def test_merge_asof_raise_for_duplicate_columns():
1580
+ # GH#50102
1581
+ left = pd.DataFrame([[1, 2, "a"]], columns=["a", "a", "left_val"])
1582
+ right = pd.DataFrame([[1, 1, 1]], columns=["a", "a", "right_val"])
1583
+
1584
+ with pytest.raises(ValueError, match="column label 'a'"):
1585
+ merge_asof(left, right, on="a")
1586
+
1587
+ with pytest.raises(ValueError, match="column label 'a'"):
1588
+ merge_asof(left, right, left_on="a", right_on="right_val")
1589
+
1590
+ with pytest.raises(ValueError, match="column label 'a'"):
1591
+ merge_asof(left, right, left_on="left_val", right_on="a")