ZTWHHH commited on
Commit
217f9f9
·
verified ·
1 Parent(s): ff171e7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. videochat2/lib/python3.10/site-packages/pandas/_libs/interval.cpython-310-x86_64-linux-gnu.so +3 -0
  3. videochat2/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so +3 -0
  4. videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.cpython-310-x86_64-linux-gnu.so +3 -0
  5. videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.cpython-310-x86_64-linux-gnu.so +3 -0
  6. videochat2/lib/python3.10/site-packages/pandas/tests/__init__.py +0 -0
  7. videochat2/lib/python3.10/site-packages/pandas/tests/config/__init__.py +0 -0
  8. videochat2/lib/python3.10/site-packages/pandas/tests/config/__pycache__/__init__.cpython-310.pyc +0 -0
  9. videochat2/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_config.cpython-310.pyc +0 -0
  10. videochat2/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_localization.cpython-310.pyc +0 -0
  11. videochat2/lib/python3.10/site-packages/pandas/tests/config/test_config.py +462 -0
  12. videochat2/lib/python3.10/site-packages/pandas/tests/config/test_localization.py +142 -0
  13. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/__init__.py +0 -0
  14. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_array.py +185 -0
  15. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_astype.py +244 -0
  16. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_clip.py +72 -0
  17. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_constructors.py +341 -0
  18. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_core_functionalities.py +88 -0
  19. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_functions.py +312 -0
  20. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_indexing.py +1079 -0
  21. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_interp_fillna.py +319 -0
  22. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_methods.py +1725 -0
  23. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_replace.py +364 -0
  24. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_util.py +14 -0
  25. videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/util.py +30 -0
  26. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__init__.py +0 -0
  27. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
  28. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/conftest.cpython-310.pyc +0 -0
  29. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_impl.cpython-310.pyc +0 -0
  30. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_spec_conformance.cpython-310.pyc +0 -0
  31. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_utils.cpython-310.pyc +0 -0
  32. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/conftest.py +12 -0
  33. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/test_impl.py +274 -0
  34. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/test_spec_conformance.py +164 -0
  35. videochat2/lib/python3.10/site-packages/pandas/tests/interchange/test_utils.py +40 -0
  36. videochat2/lib/python3.10/site-packages/pandas/tests/test_algos.py +2407 -0
  37. videochat2/lib/python3.10/site-packages/pandas/tests/test_downstream.py +269 -0
  38. videochat2/lib/python3.10/site-packages/pandas/tests/test_errors.py +112 -0
  39. videochat2/lib/python3.10/site-packages/pandas/tests/test_expressions.py +451 -0
  40. videochat2/lib/python3.10/site-packages/pandas/tests/test_flags.py +48 -0
  41. videochat2/lib/python3.10/site-packages/pandas/tests/test_multilevel.py +289 -0
  42. videochat2/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py +86 -0
  43. videochat2/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py +109 -0
  44. videochat2/lib/python3.10/site-packages/pandas/tests/test_take.py +306 -0
  45. videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__init__.py +0 -0
  46. videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/__init__.cpython-310.pyc +0 -0
  47. videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_api.cpython-310.pyc +0 -0
  48. videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_array_to_datetime.cpython-310.pyc +0 -0
  49. videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_ccalendar.cpython-310.pyc +0 -0
  50. videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_conversion.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1255,3 +1255,7 @@ videochat2/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_6
1255
  videochat2/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1256
  videochat2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 filter=lfs diff=lfs merge=lfs -text
1257
  videochat2/lib/python3.10/site-packages/pandas/_libs/internals.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
1255
  videochat2/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1256
  videochat2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 filter=lfs diff=lfs merge=lfs -text
1257
  videochat2/lib/python3.10/site-packages/pandas/_libs/internals.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1258
+ videochat2/lib/python3.10/site-packages/pandas/_libs/interval.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1259
+ videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1260
+ videochat2/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1261
+ videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/pandas/_libs/interval.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d8a25e7c1ad94e370341dd7b6e20f6a0e6aa22646b2e1a0eb8199224d49adb4
3
+ size 1375560
videochat2/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5565be514d9158667ac6e1d76f75d715d86807e647c44e78790c68e8fd65665
3
+ size 243816
videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c2443a7f82b82aa65c28241421272154bddce51ed3297f1489e7b2da1db3f29
3
+ size 341000
videochat2/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f5331d20eec68cff34f08edd193e0832bb1b6e1579c81bcdcb92a0e53984da4
3
+ size 434248
videochat2/lib/python3.10/site-packages/pandas/tests/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/config/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (175 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_config.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_localization.cpython-310.pyc ADDED
Binary file (3.54 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/config/test_config.py ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import pytest
4
+
5
+ from pandas._config import config as cf
6
+ from pandas._config.config import OptionError
7
+
8
+ import pandas as pd
9
+
10
+
11
+ class TestConfig:
12
+ @pytest.fixture(autouse=True)
13
+ def clean_config(self, monkeypatch):
14
+ with monkeypatch.context() as m:
15
+ m.setattr(cf, "_global_config", {})
16
+ m.setattr(cf, "options", cf.DictWrapper(cf._global_config))
17
+ m.setattr(cf, "_deprecated_options", {})
18
+ m.setattr(cf, "_registered_options", {})
19
+
20
+ # Our test fixture in conftest.py sets "chained_assignment"
21
+ # to "raise" only after all test methods have been setup.
22
+ # However, after this setup, there is no longer any
23
+ # "chained_assignment" option, so re-register it.
24
+ cf.register_option("chained_assignment", "raise")
25
+ yield
26
+
27
+ def test_api(self):
28
+ # the pandas object exposes the user API
29
+ assert hasattr(pd, "get_option")
30
+ assert hasattr(pd, "set_option")
31
+ assert hasattr(pd, "reset_option")
32
+ assert hasattr(pd, "describe_option")
33
+
34
+ def test_is_one_of_factory(self):
35
+ v = cf.is_one_of_factory([None, 12])
36
+
37
+ v(12)
38
+ v(None)
39
+ msg = r"Value must be one of None\|12"
40
+ with pytest.raises(ValueError, match=msg):
41
+ v(1.1)
42
+
43
+ def test_register_option(self):
44
+ cf.register_option("a", 1, "doc")
45
+
46
+ # can't register an already registered option
47
+ msg = "Option 'a' has already been registered"
48
+ with pytest.raises(OptionError, match=msg):
49
+ cf.register_option("a", 1, "doc")
50
+
51
+ # can't register an already registered option
52
+ msg = "Path prefix to option 'a' is already an option"
53
+ with pytest.raises(OptionError, match=msg):
54
+ cf.register_option("a.b.c.d1", 1, "doc")
55
+ with pytest.raises(OptionError, match=msg):
56
+ cf.register_option("a.b.c.d2", 1, "doc")
57
+
58
+ # no python keywords
59
+ msg = "for is a python keyword"
60
+ with pytest.raises(ValueError, match=msg):
61
+ cf.register_option("for", 0)
62
+ with pytest.raises(ValueError, match=msg):
63
+ cf.register_option("a.for.b", 0)
64
+ # must be valid identifier (ensure attribute access works)
65
+ msg = "oh my goddess! is not a valid identifier"
66
+ with pytest.raises(ValueError, match=msg):
67
+ cf.register_option("Oh my Goddess!", 0)
68
+
69
+ # we can register options several levels deep
70
+ # without predefining the intermediate steps
71
+ # and we can define differently named options
72
+ # in the same namespace
73
+ cf.register_option("k.b.c.d1", 1, "doc")
74
+ cf.register_option("k.b.c.d2", 1, "doc")
75
+
76
+ def test_describe_option(self):
77
+ cf.register_option("a", 1, "doc")
78
+ cf.register_option("b", 1, "doc2")
79
+ cf.deprecate_option("b")
80
+
81
+ cf.register_option("c.d.e1", 1, "doc3")
82
+ cf.register_option("c.d.e2", 1, "doc4")
83
+ cf.register_option("f", 1)
84
+ cf.register_option("g.h", 1)
85
+ cf.register_option("k", 2)
86
+ cf.deprecate_option("g.h", rkey="k")
87
+ cf.register_option("l", "foo")
88
+
89
+ # non-existent keys raise KeyError
90
+ msg = r"No such keys\(s\)"
91
+ with pytest.raises(OptionError, match=msg):
92
+ cf.describe_option("no.such.key")
93
+
94
+ # we can get the description for any key we registered
95
+ assert "doc" in cf.describe_option("a", _print_desc=False)
96
+ assert "doc2" in cf.describe_option("b", _print_desc=False)
97
+ assert "precated" in cf.describe_option("b", _print_desc=False)
98
+ assert "doc3" in cf.describe_option("c.d.e1", _print_desc=False)
99
+ assert "doc4" in cf.describe_option("c.d.e2", _print_desc=False)
100
+
101
+ # if no doc is specified we get a default message
102
+ # saying "description not available"
103
+ assert "available" in cf.describe_option("f", _print_desc=False)
104
+ assert "available" in cf.describe_option("g.h", _print_desc=False)
105
+ assert "precated" in cf.describe_option("g.h", _print_desc=False)
106
+ assert "k" in cf.describe_option("g.h", _print_desc=False)
107
+
108
+ # default is reported
109
+ assert "foo" in cf.describe_option("l", _print_desc=False)
110
+ # current value is reported
111
+ assert "bar" not in cf.describe_option("l", _print_desc=False)
112
+ cf.set_option("l", "bar")
113
+ assert "bar" in cf.describe_option("l", _print_desc=False)
114
+
115
+ def test_case_insensitive(self):
116
+ cf.register_option("KanBAN", 1, "doc")
117
+
118
+ assert "doc" in cf.describe_option("kanbaN", _print_desc=False)
119
+ assert cf.get_option("kanBaN") == 1
120
+ cf.set_option("KanBan", 2)
121
+ assert cf.get_option("kAnBaN") == 2
122
+
123
+ # gets of non-existent keys fail
124
+ msg = r"No such keys\(s\): 'no_such_option'"
125
+ with pytest.raises(OptionError, match=msg):
126
+ cf.get_option("no_such_option")
127
+ cf.deprecate_option("KanBan")
128
+
129
+ assert cf._is_deprecated("kAnBaN")
130
+
131
+ def test_get_option(self):
132
+ cf.register_option("a", 1, "doc")
133
+ cf.register_option("b.c", "hullo", "doc2")
134
+ cf.register_option("b.b", None, "doc2")
135
+
136
+ # gets of existing keys succeed
137
+ assert cf.get_option("a") == 1
138
+ assert cf.get_option("b.c") == "hullo"
139
+ assert cf.get_option("b.b") is None
140
+
141
+ # gets of non-existent keys fail
142
+ msg = r"No such keys\(s\): 'no_such_option'"
143
+ with pytest.raises(OptionError, match=msg):
144
+ cf.get_option("no_such_option")
145
+
146
+ def test_set_option(self):
147
+ cf.register_option("a", 1, "doc")
148
+ cf.register_option("b.c", "hullo", "doc2")
149
+ cf.register_option("b.b", None, "doc2")
150
+
151
+ assert cf.get_option("a") == 1
152
+ assert cf.get_option("b.c") == "hullo"
153
+ assert cf.get_option("b.b") is None
154
+
155
+ cf.set_option("a", 2)
156
+ cf.set_option("b.c", "wurld")
157
+ cf.set_option("b.b", 1.1)
158
+
159
+ assert cf.get_option("a") == 2
160
+ assert cf.get_option("b.c") == "wurld"
161
+ assert cf.get_option("b.b") == 1.1
162
+
163
+ msg = r"No such keys\(s\): 'no.such.key'"
164
+ with pytest.raises(OptionError, match=msg):
165
+ cf.set_option("no.such.key", None)
166
+
167
+ def test_set_option_empty_args(self):
168
+ msg = "Must provide an even number of non-keyword arguments"
169
+ with pytest.raises(ValueError, match=msg):
170
+ cf.set_option()
171
+
172
+ def test_set_option_uneven_args(self):
173
+ msg = "Must provide an even number of non-keyword arguments"
174
+ with pytest.raises(ValueError, match=msg):
175
+ cf.set_option("a.b", 2, "b.c")
176
+
177
+ def test_set_option_invalid_single_argument_type(self):
178
+ msg = "Must provide an even number of non-keyword arguments"
179
+ with pytest.raises(ValueError, match=msg):
180
+ cf.set_option(2)
181
+
182
+ def test_set_option_multiple(self):
183
+ cf.register_option("a", 1, "doc")
184
+ cf.register_option("b.c", "hullo", "doc2")
185
+ cf.register_option("b.b", None, "doc2")
186
+
187
+ assert cf.get_option("a") == 1
188
+ assert cf.get_option("b.c") == "hullo"
189
+ assert cf.get_option("b.b") is None
190
+
191
+ cf.set_option("a", "2", "b.c", None, "b.b", 10.0)
192
+
193
+ assert cf.get_option("a") == "2"
194
+ assert cf.get_option("b.c") is None
195
+ assert cf.get_option("b.b") == 10.0
196
+
197
+ def test_validation(self):
198
+ cf.register_option("a", 1, "doc", validator=cf.is_int)
199
+ cf.register_option("d", 1, "doc", validator=cf.is_nonnegative_int)
200
+ cf.register_option("b.c", "hullo", "doc2", validator=cf.is_text)
201
+
202
+ msg = "Value must have type '<class 'int'>'"
203
+ with pytest.raises(ValueError, match=msg):
204
+ cf.register_option("a.b.c.d2", "NO", "doc", validator=cf.is_int)
205
+
206
+ cf.set_option("a", 2) # int is_int
207
+ cf.set_option("b.c", "wurld") # str is_str
208
+ cf.set_option("d", 2)
209
+ cf.set_option("d", None) # non-negative int can be None
210
+
211
+ # None not is_int
212
+ with pytest.raises(ValueError, match=msg):
213
+ cf.set_option("a", None)
214
+ with pytest.raises(ValueError, match=msg):
215
+ cf.set_option("a", "ab")
216
+
217
+ msg = "Value must be a nonnegative integer or None"
218
+ with pytest.raises(ValueError, match=msg):
219
+ cf.register_option("a.b.c.d3", "NO", "doc", validator=cf.is_nonnegative_int)
220
+ with pytest.raises(ValueError, match=msg):
221
+ cf.register_option("a.b.c.d3", -2, "doc", validator=cf.is_nonnegative_int)
222
+
223
+ msg = r"Value must be an instance of <class 'str'>\|<class 'bytes'>"
224
+ with pytest.raises(ValueError, match=msg):
225
+ cf.set_option("b.c", 1)
226
+
227
+ validator = cf.is_one_of_factory([None, cf.is_callable])
228
+ cf.register_option("b", lambda: None, "doc", validator=validator)
229
+ # pylint: disable-next=consider-using-f-string
230
+ cf.set_option("b", "%.1f".format) # Formatter is callable
231
+ cf.set_option("b", None) # Formatter is none (default)
232
+ with pytest.raises(ValueError, match="Value must be a callable"):
233
+ cf.set_option("b", "%.1f")
234
+
235
+ def test_reset_option(self):
236
+ cf.register_option("a", 1, "doc", validator=cf.is_int)
237
+ cf.register_option("b.c", "hullo", "doc2", validator=cf.is_str)
238
+ assert cf.get_option("a") == 1
239
+ assert cf.get_option("b.c") == "hullo"
240
+
241
+ cf.set_option("a", 2)
242
+ cf.set_option("b.c", "wurld")
243
+ assert cf.get_option("a") == 2
244
+ assert cf.get_option("b.c") == "wurld"
245
+
246
+ cf.reset_option("a")
247
+ assert cf.get_option("a") == 1
248
+ assert cf.get_option("b.c") == "wurld"
249
+ cf.reset_option("b.c")
250
+ assert cf.get_option("a") == 1
251
+ assert cf.get_option("b.c") == "hullo"
252
+
253
+ def test_reset_option_all(self):
254
+ cf.register_option("a", 1, "doc", validator=cf.is_int)
255
+ cf.register_option("b.c", "hullo", "doc2", validator=cf.is_str)
256
+ assert cf.get_option("a") == 1
257
+ assert cf.get_option("b.c") == "hullo"
258
+
259
+ cf.set_option("a", 2)
260
+ cf.set_option("b.c", "wurld")
261
+ assert cf.get_option("a") == 2
262
+ assert cf.get_option("b.c") == "wurld"
263
+
264
+ cf.reset_option("all")
265
+ assert cf.get_option("a") == 1
266
+ assert cf.get_option("b.c") == "hullo"
267
+
268
+ def test_deprecate_option(self):
269
+ # we can deprecate non-existent options
270
+ cf.deprecate_option("foo")
271
+
272
+ assert cf._is_deprecated("foo")
273
+ with warnings.catch_warnings(record=True) as w:
274
+ warnings.simplefilter("always")
275
+ with pytest.raises(KeyError, match="No such keys.s.: 'foo'"):
276
+ cf.get_option("foo")
277
+ assert len(w) == 1 # should have raised one warning
278
+ assert "deprecated" in str(w[-1]) # we get the default message
279
+
280
+ cf.register_option("a", 1, "doc", validator=cf.is_int)
281
+ cf.register_option("b.c", "hullo", "doc2")
282
+ cf.register_option("foo", "hullo", "doc2")
283
+
284
+ cf.deprecate_option("a", removal_ver="nifty_ver")
285
+ with warnings.catch_warnings(record=True) as w:
286
+ warnings.simplefilter("always")
287
+ cf.get_option("a")
288
+
289
+ assert len(w) == 1 # should have raised one warning
290
+ assert "eprecated" in str(w[-1]) # we get the default message
291
+ assert "nifty_ver" in str(w[-1]) # with the removal_ver quoted
292
+
293
+ msg = "Option 'a' has already been defined as deprecated"
294
+ with pytest.raises(OptionError, match=msg):
295
+ cf.deprecate_option("a")
296
+
297
+ cf.deprecate_option("b.c", "zounds!")
298
+ with warnings.catch_warnings(record=True) as w:
299
+ warnings.simplefilter("always")
300
+ cf.get_option("b.c")
301
+
302
+ assert len(w) == 1 # should have raised one warning
303
+ assert "zounds!" in str(w[-1]) # we get the custom message
304
+
305
+ # test rerouting keys
306
+ cf.register_option("d.a", "foo", "doc2")
307
+ cf.register_option("d.dep", "bar", "doc2")
308
+ assert cf.get_option("d.a") == "foo"
309
+ assert cf.get_option("d.dep") == "bar"
310
+
311
+ cf.deprecate_option("d.dep", rkey="d.a") # reroute d.dep to d.a
312
+ with warnings.catch_warnings(record=True) as w:
313
+ warnings.simplefilter("always")
314
+ assert cf.get_option("d.dep") == "foo"
315
+
316
+ assert len(w) == 1 # should have raised one warning
317
+ assert "eprecated" in str(w[-1]) # we get the custom message
318
+
319
+ with warnings.catch_warnings(record=True) as w:
320
+ warnings.simplefilter("always")
321
+ cf.set_option("d.dep", "baz") # should overwrite "d.a"
322
+
323
+ assert len(w) == 1 # should have raised one warning
324
+ assert "eprecated" in str(w[-1]) # we get the custom message
325
+
326
+ with warnings.catch_warnings(record=True) as w:
327
+ warnings.simplefilter("always")
328
+ assert cf.get_option("d.dep") == "baz"
329
+
330
+ assert len(w) == 1 # should have raised one warning
331
+ assert "eprecated" in str(w[-1]) # we get the custom message
332
+
333
+ def test_config_prefix(self):
334
+ with cf.config_prefix("base"):
335
+ cf.register_option("a", 1, "doc1")
336
+ cf.register_option("b", 2, "doc2")
337
+ assert cf.get_option("a") == 1
338
+ assert cf.get_option("b") == 2
339
+
340
+ cf.set_option("a", 3)
341
+ cf.set_option("b", 4)
342
+ assert cf.get_option("a") == 3
343
+ assert cf.get_option("b") == 4
344
+
345
+ assert cf.get_option("base.a") == 3
346
+ assert cf.get_option("base.b") == 4
347
+ assert "doc1" in cf.describe_option("base.a", _print_desc=False)
348
+ assert "doc2" in cf.describe_option("base.b", _print_desc=False)
349
+
350
+ cf.reset_option("base.a")
351
+ cf.reset_option("base.b")
352
+
353
+ with cf.config_prefix("base"):
354
+ assert cf.get_option("a") == 1
355
+ assert cf.get_option("b") == 2
356
+
357
+ def test_callback(self):
358
+ k = [None]
359
+ v = [None]
360
+
361
+ def callback(key):
362
+ k.append(key)
363
+ v.append(cf.get_option(key))
364
+
365
+ cf.register_option("d.a", "foo", cb=callback)
366
+ cf.register_option("d.b", "foo", cb=callback)
367
+
368
+ del k[-1], v[-1]
369
+ cf.set_option("d.a", "fooz")
370
+ assert k[-1] == "d.a"
371
+ assert v[-1] == "fooz"
372
+
373
+ del k[-1], v[-1]
374
+ cf.set_option("d.b", "boo")
375
+ assert k[-1] == "d.b"
376
+ assert v[-1] == "boo"
377
+
378
+ del k[-1], v[-1]
379
+ cf.reset_option("d.b")
380
+ assert k[-1] == "d.b"
381
+
382
+ def test_set_ContextManager(self):
383
+ def eq(val):
384
+ assert cf.get_option("a") == val
385
+
386
+ cf.register_option("a", 0)
387
+ eq(0)
388
+ with cf.option_context("a", 15):
389
+ eq(15)
390
+ with cf.option_context("a", 25):
391
+ eq(25)
392
+ eq(15)
393
+ eq(0)
394
+
395
+ cf.set_option("a", 17)
396
+ eq(17)
397
+
398
+ # Test that option_context can be used as a decorator too (#34253).
399
+ @cf.option_context("a", 123)
400
+ def f():
401
+ eq(123)
402
+
403
+ f()
404
+
405
+ def test_attribute_access(self):
406
+ holder = []
407
+
408
+ def f3(key):
409
+ holder.append(True)
410
+
411
+ cf.register_option("a", 0)
412
+ cf.register_option("c", 0, cb=f3)
413
+ options = cf.options
414
+
415
+ assert options.a == 0
416
+ with cf.option_context("a", 15):
417
+ assert options.a == 15
418
+
419
+ options.a = 500
420
+ assert cf.get_option("a") == 500
421
+
422
+ cf.reset_option("a")
423
+ assert options.a == cf.get_option("a", 0)
424
+
425
+ msg = "You can only set the value of existing options"
426
+ with pytest.raises(OptionError, match=msg):
427
+ options.b = 1
428
+ with pytest.raises(OptionError, match=msg):
429
+ options.display = 1
430
+
431
+ # make sure callback kicks when using this form of setting
432
+ options.c = 1
433
+ assert len(holder) == 1
434
+
435
+ def test_option_context_scope(self):
436
+ # Ensure that creating a context does not affect the existing
437
+ # environment as it is supposed to be used with the `with` statement.
438
+ # See https://github.com/pandas-dev/pandas/issues/8514
439
+
440
+ original_value = 60
441
+ context_value = 10
442
+ option_name = "a"
443
+
444
+ cf.register_option(option_name, original_value)
445
+
446
+ # Ensure creating contexts didn't affect the current context.
447
+ ctx = cf.option_context(option_name, context_value)
448
+ assert cf.get_option(option_name) == original_value
449
+
450
+ # Ensure the correct value is available inside the context.
451
+ with ctx:
452
+ assert cf.get_option(option_name) == context_value
453
+
454
+ # Ensure the current context is reset
455
+ assert cf.get_option(option_name) == original_value
456
+
457
+ def test_dictwrapper_getattr(self):
458
+ options = cf.options
459
+ # GH 19789
460
+ with pytest.raises(OptionError, match="No such option"):
461
+ options.bananas
462
+ assert not hasattr(options, "bananas")
videochat2/lib/python3.10/site-packages/pandas/tests/config/test_localization.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import codecs
2
+ import locale
3
+ import os
4
+
5
+ import pytest
6
+
7
+ from pandas._config.localization import (
8
+ can_set_locale,
9
+ get_locales,
10
+ set_locale,
11
+ )
12
+
13
+ import pandas as pd
14
+
15
+ _all_locales = get_locales()
16
+ _current_locale = locale.setlocale(locale.LC_ALL) # getlocale() is wrong, see GH#46595
17
+
18
+ # Don't run any of these tests if we have no locales.
19
+ pytestmark = pytest.mark.skipif(not _all_locales, reason="Need locales")
20
+
21
+ _skip_if_only_one_locale = pytest.mark.skipif(
22
+ len(_all_locales) <= 1, reason="Need multiple locales for meaningful test"
23
+ )
24
+
25
+
26
+ def _get_current_locale(lc_var: int = locale.LC_ALL) -> str:
27
+ # getlocale is not always compliant with setlocale, use setlocale. GH#46595
28
+ return locale.setlocale(lc_var)
29
+
30
+
31
+ @pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME))
32
+ def test_can_set_current_locale(lc_var):
33
+ # Can set the current locale
34
+ before_locale = _get_current_locale(lc_var)
35
+ assert can_set_locale(before_locale, lc_var=lc_var)
36
+ after_locale = _get_current_locale(lc_var)
37
+ assert before_locale == after_locale
38
+
39
+
40
+ @pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME))
41
+ def test_can_set_locale_valid_set(lc_var):
42
+ # Can set the default locale.
43
+ before_locale = _get_current_locale(lc_var)
44
+ assert can_set_locale("", lc_var=lc_var)
45
+ after_locale = _get_current_locale(lc_var)
46
+ assert before_locale == after_locale
47
+
48
+
49
+ @pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME))
50
+ def test_can_set_locale_invalid_set(lc_var):
51
+ # Cannot set an invalid locale.
52
+ before_locale = _get_current_locale(lc_var)
53
+ assert not can_set_locale("non-existent_locale", lc_var=lc_var)
54
+ after_locale = _get_current_locale(lc_var)
55
+ assert before_locale == after_locale
56
+
57
+
58
+ @pytest.mark.parametrize(
59
+ "lang,enc",
60
+ [
61
+ ("it_CH", "UTF-8"),
62
+ ("en_US", "ascii"),
63
+ ("zh_CN", "GB2312"),
64
+ ("it_IT", "ISO-8859-1"),
65
+ ],
66
+ )
67
+ @pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME))
68
+ def test_can_set_locale_no_leak(lang, enc, lc_var):
69
+ # Test that can_set_locale does not leak even when returning False. See GH#46595
70
+ before_locale = _get_current_locale(lc_var)
71
+ can_set_locale((lang, enc), locale.LC_ALL)
72
+ after_locale = _get_current_locale(lc_var)
73
+ assert before_locale == after_locale
74
+
75
+
76
+ def test_can_set_locale_invalid_get(monkeypatch):
77
+ # see GH#22129
78
+ # In some cases, an invalid locale can be set,
79
+ # but a subsequent getlocale() raises a ValueError.
80
+
81
+ def mock_get_locale():
82
+ raise ValueError()
83
+
84
+ with monkeypatch.context() as m:
85
+ m.setattr(locale, "getlocale", mock_get_locale)
86
+ assert not can_set_locale("")
87
+
88
+
89
+ def test_get_locales_at_least_one():
90
+ # see GH#9744
91
+ assert len(_all_locales) > 0
92
+
93
+
94
+ @_skip_if_only_one_locale
95
+ def test_get_locales_prefix():
96
+ first_locale = _all_locales[0]
97
+ assert len(get_locales(prefix=first_locale[:2])) > 0
98
+
99
+
100
+ @_skip_if_only_one_locale
101
+ @pytest.mark.parametrize(
102
+ "lang,enc",
103
+ [
104
+ ("it_CH", "UTF-8"),
105
+ ("en_US", "ascii"),
106
+ ("zh_CN", "GB2312"),
107
+ ("it_IT", "ISO-8859-1"),
108
+ ],
109
+ )
110
+ def test_set_locale(lang, enc):
111
+ before_locale = _get_current_locale()
112
+
113
+ enc = codecs.lookup(enc).name
114
+ new_locale = lang, enc
115
+
116
+ if not can_set_locale(new_locale):
117
+ msg = "unsupported locale setting"
118
+
119
+ with pytest.raises(locale.Error, match=msg):
120
+ with set_locale(new_locale):
121
+ pass
122
+ else:
123
+ with set_locale(new_locale) as normalized_locale:
124
+ new_lang, new_enc = normalized_locale.split(".")
125
+ new_enc = codecs.lookup(enc).name
126
+
127
+ normalized_locale = new_lang, new_enc
128
+ assert normalized_locale == new_locale
129
+
130
+ # Once we exit the "with" statement, locale should be back to what it was.
131
+ after_locale = _get_current_locale()
132
+ assert before_locale == after_locale
133
+
134
+
135
+ def test_encoding_detected():
136
+ system_locale = os.environ.get("LC_ALL")
137
+ system_encoding = system_locale.split(".")[-1] if system_locale else "utf-8"
138
+
139
+ assert (
140
+ codecs.lookup(pd.options.display.encoding).name
141
+ == codecs.lookup(system_encoding).name
142
+ )
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_array.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ DataFrame,
6
+ Series,
7
+ date_range,
8
+ )
9
+ import pandas._testing as tm
10
+ from pandas.tests.copy_view.util import get_array
11
+
12
+ # -----------------------------------------------------------------------------
13
+ # Copy/view behaviour for accessing underlying array of Series/DataFrame
14
+
15
+
16
+ @pytest.mark.parametrize(
17
+ "method",
18
+ [lambda ser: ser.values, lambda ser: np.asarray(ser)],
19
+ ids=["values", "asarray"],
20
+ )
21
+ def test_series_values(using_copy_on_write, method):
22
+ ser = Series([1, 2, 3], name="name")
23
+ ser_orig = ser.copy()
24
+
25
+ arr = method(ser)
26
+
27
+ if using_copy_on_write:
28
+ # .values still gives a view but is read-only
29
+ assert np.shares_memory(arr, get_array(ser, "name"))
30
+ assert arr.flags.writeable is False
31
+
32
+ # mutating series through arr therefore doesn't work
33
+ with pytest.raises(ValueError, match="read-only"):
34
+ arr[0] = 0
35
+ tm.assert_series_equal(ser, ser_orig)
36
+
37
+ # mutating the series itself still works
38
+ ser.iloc[0] = 0
39
+ assert ser.values[0] == 0
40
+ else:
41
+ assert arr.flags.writeable is True
42
+ arr[0] = 0
43
+ assert ser.iloc[0] == 0
44
+
45
+
46
+ @pytest.mark.parametrize(
47
+ "method",
48
+ [lambda df: df.values, lambda df: np.asarray(df)],
49
+ ids=["values", "asarray"],
50
+ )
51
+ def test_dataframe_values(using_copy_on_write, using_array_manager, method):
52
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
53
+ df_orig = df.copy()
54
+
55
+ arr = method(df)
56
+
57
+ if using_copy_on_write:
58
+ # .values still gives a view but is read-only
59
+ assert np.shares_memory(arr, get_array(df, "a"))
60
+ assert arr.flags.writeable is False
61
+
62
+ # mutating series through arr therefore doesn't work
63
+ with pytest.raises(ValueError, match="read-only"):
64
+ arr[0, 0] = 0
65
+ tm.assert_frame_equal(df, df_orig)
66
+
67
+ # mutating the series itself still works
68
+ df.iloc[0, 0] = 0
69
+ assert df.values[0, 0] == 0
70
+ else:
71
+ assert arr.flags.writeable is True
72
+ arr[0, 0] = 0
73
+ if not using_array_manager:
74
+ assert df.iloc[0, 0] == 0
75
+ else:
76
+ tm.assert_frame_equal(df, df_orig)
77
+
78
+
79
+ def test_series_to_numpy(using_copy_on_write):
80
+ ser = Series([1, 2, 3], name="name")
81
+ ser_orig = ser.copy()
82
+
83
+ # default: copy=False, no dtype or NAs
84
+ arr = ser.to_numpy()
85
+ if using_copy_on_write:
86
+ # to_numpy still gives a view but is read-only
87
+ assert np.shares_memory(arr, get_array(ser, "name"))
88
+ assert arr.flags.writeable is False
89
+
90
+ # mutating series through arr therefore doesn't work
91
+ with pytest.raises(ValueError, match="read-only"):
92
+ arr[0] = 0
93
+ tm.assert_series_equal(ser, ser_orig)
94
+
95
+ # mutating the series itself still works
96
+ ser.iloc[0] = 0
97
+ assert ser.values[0] == 0
98
+ else:
99
+ assert arr.flags.writeable is True
100
+ arr[0] = 0
101
+ assert ser.iloc[0] == 0
102
+
103
+ # specify copy=False gives a writeable array
104
+ ser = Series([1, 2, 3], name="name")
105
+ arr = ser.to_numpy(copy=True)
106
+ assert not np.shares_memory(arr, get_array(ser, "name"))
107
+ assert arr.flags.writeable is True
108
+
109
+ # specifying a dtype that already causes a copy also gives a writeable array
110
+ ser = Series([1, 2, 3], name="name")
111
+ arr = ser.to_numpy(dtype="float64")
112
+ assert not np.shares_memory(arr, get_array(ser, "name"))
113
+ assert arr.flags.writeable is True
114
+
115
+
116
+ @pytest.mark.parametrize("order", ["F", "C"])
117
+ def test_ravel_read_only(using_copy_on_write, order):
118
+ ser = Series([1, 2, 3])
119
+ arr = ser.ravel(order=order)
120
+ if using_copy_on_write:
121
+ assert arr.flags.writeable is False
122
+ assert np.shares_memory(get_array(ser), arr)
123
+
124
+
125
+ def test_series_array_ea_dtypes(using_copy_on_write):
126
+ ser = Series([1, 2, 3], dtype="Int64")
127
+ arr = np.asarray(ser, dtype="int64")
128
+ assert np.shares_memory(arr, get_array(ser))
129
+ if using_copy_on_write:
130
+ assert arr.flags.writeable is False
131
+ else:
132
+ assert arr.flags.writeable is True
133
+
134
+ arr = np.asarray(ser)
135
+ assert not np.shares_memory(arr, get_array(ser))
136
+ assert arr.flags.writeable is True
137
+
138
+
139
+ def test_dataframe_array_ea_dtypes(using_copy_on_write):
140
+ df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")
141
+ arr = np.asarray(df, dtype="int64")
142
+ # TODO: This should be able to share memory, but we are roundtripping
143
+ # through object
144
+ assert not np.shares_memory(arr, get_array(df, "a"))
145
+ assert arr.flags.writeable is True
146
+
147
+ arr = np.asarray(df)
148
+ if using_copy_on_write:
149
+ # TODO(CoW): This should be True
150
+ assert arr.flags.writeable is False
151
+ else:
152
+ assert arr.flags.writeable is True
153
+
154
+
155
+ def test_dataframe_array_string_dtype(using_copy_on_write, using_array_manager):
156
+ df = DataFrame({"a": ["a", "b"]}, dtype="string")
157
+ arr = np.asarray(df)
158
+ if not using_array_manager:
159
+ assert np.shares_memory(arr, get_array(df, "a"))
160
+ if using_copy_on_write:
161
+ assert arr.flags.writeable is False
162
+ else:
163
+ assert arr.flags.writeable is True
164
+
165
+
166
+ def test_dataframe_multiple_numpy_dtypes():
167
+ df = DataFrame({"a": [1, 2, 3], "b": 1.5})
168
+ arr = np.asarray(df)
169
+ assert not np.shares_memory(arr, get_array(df, "a"))
170
+ assert arr.flags.writeable is True
171
+
172
+
173
+ def test_values_is_ea(using_copy_on_write):
174
+ df = DataFrame({"a": date_range("2012-01-01", periods=3)})
175
+ arr = np.asarray(df)
176
+ if using_copy_on_write:
177
+ assert arr.flags.writeable is False
178
+ else:
179
+ assert arr.flags.writeable is True
180
+
181
+
182
+ def test_empty_dataframe():
183
+ df = DataFrame()
184
+ arr = np.asarray(df)
185
+ assert arr.flags.writeable is True
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_astype.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.compat import pa_version_under7p0
5
+ import pandas.util._test_decorators as td
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ DataFrame,
10
+ Series,
11
+ Timestamp,
12
+ date_range,
13
+ )
14
+ import pandas._testing as tm
15
+ from pandas.tests.copy_view.util import get_array
16
+
17
+
18
+ def test_astype_single_dtype(using_copy_on_write):
19
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": 1.5})
20
+ df_orig = df.copy()
21
+ df2 = df.astype("float64")
22
+
23
+ if using_copy_on_write:
24
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
25
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
26
+ else:
27
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
28
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
29
+
30
+ # mutating df2 triggers a copy-on-write for that column/block
31
+ df2.iloc[0, 2] = 5.5
32
+ if using_copy_on_write:
33
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
34
+ tm.assert_frame_equal(df, df_orig)
35
+
36
+ # mutating parent also doesn't update result
37
+ df2 = df.astype("float64")
38
+ df.iloc[0, 2] = 5.5
39
+ tm.assert_frame_equal(df2, df_orig.astype("float64"))
40
+
41
+
42
+ @pytest.mark.parametrize("dtype", ["int64", "Int64"])
43
+ @pytest.mark.parametrize("new_dtype", ["int64", "Int64", "int64[pyarrow]"])
44
+ def test_astype_avoids_copy(using_copy_on_write, dtype, new_dtype):
45
+ if new_dtype == "int64[pyarrow]" and pa_version_under7p0:
46
+ pytest.skip("pyarrow not installed")
47
+ df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)
48
+ df_orig = df.copy()
49
+ df2 = df.astype(new_dtype)
50
+
51
+ if using_copy_on_write:
52
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
53
+ else:
54
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
55
+
56
+ # mutating df2 triggers a copy-on-write for that column/block
57
+ df2.iloc[0, 0] = 10
58
+ if using_copy_on_write:
59
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
60
+ tm.assert_frame_equal(df, df_orig)
61
+
62
+ # mutating parent also doesn't update result
63
+ df2 = df.astype(new_dtype)
64
+ df.iloc[0, 0] = 100
65
+ tm.assert_frame_equal(df2, df_orig.astype(new_dtype))
66
+
67
+
68
+ @pytest.mark.parametrize("dtype", ["float64", "int32", "Int32", "int32[pyarrow]"])
69
+ def test_astype_different_target_dtype(using_copy_on_write, dtype):
70
+ if dtype == "int32[pyarrow]" and pa_version_under7p0:
71
+ pytest.skip("pyarrow not installed")
72
+ df = DataFrame({"a": [1, 2, 3]})
73
+ df_orig = df.copy()
74
+ df2 = df.astype(dtype)
75
+
76
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
77
+ if using_copy_on_write:
78
+ assert df2._mgr._has_no_reference(0)
79
+
80
+ df2.iloc[0, 0] = 5
81
+ tm.assert_frame_equal(df, df_orig)
82
+
83
+ # mutating parent also doesn't update result
84
+ df2 = df.astype(dtype)
85
+ df.iloc[0, 0] = 100
86
+ tm.assert_frame_equal(df2, df_orig.astype(dtype))
87
+
88
+
89
+ @td.skip_array_manager_invalid_test
90
+ def test_astype_numpy_to_ea():
91
+ ser = Series([1, 2, 3])
92
+ with pd.option_context("mode.copy_on_write", True):
93
+ result = ser.astype("Int64")
94
+ assert np.shares_memory(get_array(ser), get_array(result))
95
+
96
+
97
+ @pytest.mark.parametrize(
98
+ "dtype, new_dtype", [("object", "string"), ("string", "object")]
99
+ )
100
+ def test_astype_string_and_object(using_copy_on_write, dtype, new_dtype):
101
+ df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype)
102
+ df_orig = df.copy()
103
+ df2 = df.astype(new_dtype)
104
+
105
+ if using_copy_on_write:
106
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
107
+ else:
108
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
109
+
110
+ df2.iloc[0, 0] = "x"
111
+ tm.assert_frame_equal(df, df_orig)
112
+
113
+
114
+ @pytest.mark.parametrize(
115
+ "dtype, new_dtype", [("object", "string"), ("string", "object")]
116
+ )
117
+ def test_astype_string_and_object_update_original(
118
+ using_copy_on_write, dtype, new_dtype
119
+ ):
120
+ df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype)
121
+ df2 = df.astype(new_dtype)
122
+ df_orig = df2.copy()
123
+
124
+ if using_copy_on_write:
125
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
126
+ else:
127
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
128
+
129
+ df.iloc[0, 0] = "x"
130
+ tm.assert_frame_equal(df2, df_orig)
131
+
132
+
133
+ def test_astype_dict_dtypes(using_copy_on_write):
134
+ df = DataFrame(
135
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": Series([1.5, 1.5, 1.5], dtype="float64")}
136
+ )
137
+ df_orig = df.copy()
138
+ df2 = df.astype({"a": "float64", "c": "float64"})
139
+
140
+ if using_copy_on_write:
141
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
142
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
143
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
144
+ else:
145
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
146
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
147
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
148
+
149
+ # mutating df2 triggers a copy-on-write for that column/block
150
+ df2.iloc[0, 2] = 5.5
151
+ if using_copy_on_write:
152
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
153
+
154
+ df2.iloc[0, 1] = 10
155
+ if using_copy_on_write:
156
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
157
+ tm.assert_frame_equal(df, df_orig)
158
+
159
+
160
+ def test_astype_different_datetime_resos(using_copy_on_write):
161
+ df = DataFrame({"a": date_range("2019-12-31", periods=2, freq="D")})
162
+ result = df.astype("datetime64[ms]")
163
+
164
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
165
+ if using_copy_on_write:
166
+ assert result._mgr._has_no_reference(0)
167
+
168
+
169
+ def test_astype_different_timezones(using_copy_on_write):
170
+ df = DataFrame(
171
+ {"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")}
172
+ )
173
+ result = df.astype("datetime64[ns, Europe/Berlin]")
174
+ if using_copy_on_write:
175
+ assert not result._mgr._has_no_reference(0)
176
+ assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))
177
+
178
+
179
+ def test_astype_different_timezones_different_reso(using_copy_on_write):
180
+ df = DataFrame(
181
+ {"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")}
182
+ )
183
+ result = df.astype("datetime64[ms, Europe/Berlin]")
184
+ if using_copy_on_write:
185
+ assert result._mgr._has_no_reference(0)
186
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
187
+
188
+
189
+ @pytest.mark.skipif(pa_version_under7p0, reason="pyarrow not installed")
190
+ def test_astype_arrow_timestamp(using_copy_on_write):
191
+ df = DataFrame(
192
+ {
193
+ "a": [
194
+ Timestamp("2020-01-01 01:01:01.000001"),
195
+ Timestamp("2020-01-01 01:01:01.000001"),
196
+ ]
197
+ },
198
+ dtype="M8[ns]",
199
+ )
200
+ result = df.astype("timestamp[ns][pyarrow]")
201
+ if using_copy_on_write:
202
+ assert not result._mgr._has_no_reference(0)
203
+ # TODO(CoW): arrow is not setting copy=False in the Series constructor
204
+ # under the hood
205
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")._data)
206
+
207
+
208
+ def test_convert_dtypes_infer_objects(using_copy_on_write):
209
+ ser = Series(["a", "b", "c"])
210
+ ser_orig = ser.copy()
211
+ result = ser.convert_dtypes(
212
+ convert_integer=False,
213
+ convert_boolean=False,
214
+ convert_floating=False,
215
+ convert_string=False,
216
+ )
217
+
218
+ if using_copy_on_write:
219
+ assert np.shares_memory(get_array(ser), get_array(result))
220
+ else:
221
+ assert not np.shares_memory(get_array(ser), get_array(result))
222
+
223
+ result.iloc[0] = "x"
224
+ tm.assert_series_equal(ser, ser_orig)
225
+
226
+
227
+ def test_convert_dtypes(using_copy_on_write):
228
+ df = DataFrame({"a": ["a", "b"], "b": [1, 2], "c": [1.5, 2.5], "d": [True, False]})
229
+ df_orig = df.copy()
230
+ df2 = df.convert_dtypes()
231
+
232
+ if using_copy_on_write:
233
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
234
+ assert np.shares_memory(get_array(df2, "d"), get_array(df, "d"))
235
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
236
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
237
+ else:
238
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
239
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
240
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
241
+ assert not np.shares_memory(get_array(df2, "d"), get_array(df, "d"))
242
+
243
+ df2.iloc[0, 0] = "x"
244
+ tm.assert_frame_equal(df, df_orig)
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_clip.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas import DataFrame
4
+ import pandas._testing as tm
5
+ from pandas.tests.copy_view.util import get_array
6
+
7
+
8
+ def test_clip_inplace_reference(using_copy_on_write):
9
+ df = DataFrame({"a": [1.5, 2, 3]})
10
+ df_copy = df.copy()
11
+ arr_a = get_array(df, "a")
12
+ view = df[:]
13
+ df.clip(lower=2, inplace=True)
14
+
15
+ # Clip not actually inplace right now but could be
16
+ assert not np.shares_memory(get_array(df, "a"), arr_a)
17
+
18
+ if using_copy_on_write:
19
+ assert df._mgr._has_no_reference(0)
20
+ assert view._mgr._has_no_reference(0)
21
+ tm.assert_frame_equal(df_copy, view)
22
+
23
+
24
+ def test_clip_inplace_reference_no_op(using_copy_on_write):
25
+ df = DataFrame({"a": [1.5, 2, 3]})
26
+ df_copy = df.copy()
27
+ arr_a = get_array(df, "a")
28
+ view = df[:]
29
+ df.clip(lower=0, inplace=True)
30
+
31
+ if using_copy_on_write:
32
+ assert np.shares_memory(get_array(df, "a"), arr_a)
33
+ assert not df._mgr._has_no_reference(0)
34
+ assert not view._mgr._has_no_reference(0)
35
+ tm.assert_frame_equal(df_copy, view)
36
+ else:
37
+ assert not np.shares_memory(get_array(df, "a"), arr_a)
38
+
39
+
40
+ def test_clip_inplace(using_copy_on_write):
41
+ df = DataFrame({"a": [1.5, 2, 3]})
42
+ arr_a = get_array(df, "a")
43
+ df.clip(lower=2, inplace=True)
44
+
45
+ # Clip not actually inplace right now but could be
46
+ assert not np.shares_memory(get_array(df, "a"), arr_a)
47
+
48
+ if using_copy_on_write:
49
+ assert df._mgr._has_no_reference(0)
50
+
51
+
52
+ def test_clip(using_copy_on_write):
53
+ df = DataFrame({"a": [1.5, 2, 3]})
54
+ df_orig = df.copy()
55
+ df2 = df.clip(lower=2)
56
+
57
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
58
+
59
+ if using_copy_on_write:
60
+ assert df._mgr._has_no_reference(0)
61
+ tm.assert_frame_equal(df_orig, df)
62
+
63
+
64
+ def test_clip_no_op(using_copy_on_write):
65
+ df = DataFrame({"a": [1.5, 2, 3]})
66
+ df2 = df.clip(lower=0)
67
+
68
+ if using_copy_on_write:
69
+ assert not df._mgr._has_no_reference(0)
70
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
71
+ else:
72
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_constructors.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ DatetimeIndex,
8
+ Index,
9
+ Period,
10
+ PeriodIndex,
11
+ Series,
12
+ Timedelta,
13
+ TimedeltaIndex,
14
+ Timestamp,
15
+ )
16
+ import pandas._testing as tm
17
+ from pandas.tests.copy_view.util import get_array
18
+
19
+ # -----------------------------------------------------------------------------
20
+ # Copy/view behaviour for Series / DataFrame constructors
21
+
22
+
23
+ @pytest.mark.parametrize("dtype", [None, "int64"])
24
+ def test_series_from_series(dtype, using_copy_on_write):
25
+ # Case: constructing a Series from another Series object follows CoW rules:
26
+ # a new object is returned and thus mutations are not propagated
27
+ ser = Series([1, 2, 3], name="name")
28
+
29
+ # default is copy=False -> new Series is a shallow copy / view of original
30
+ result = Series(ser, dtype=dtype)
31
+
32
+ # the shallow copy still shares memory
33
+ assert np.shares_memory(get_array(ser), get_array(result))
34
+
35
+ if using_copy_on_write:
36
+ assert result._mgr.blocks[0].refs.has_reference()
37
+
38
+ if using_copy_on_write:
39
+ # mutating new series copy doesn't mutate original
40
+ result.iloc[0] = 0
41
+ assert ser.iloc[0] == 1
42
+ # mutating triggered a copy-on-write -> no longer shares memory
43
+ assert not np.shares_memory(get_array(ser), get_array(result))
44
+ else:
45
+ # mutating shallow copy does mutate original
46
+ result.iloc[0] = 0
47
+ assert ser.iloc[0] == 0
48
+ # and still shares memory
49
+ assert np.shares_memory(get_array(ser), get_array(result))
50
+
51
+ # the same when modifying the parent
52
+ result = Series(ser, dtype=dtype)
53
+
54
+ if using_copy_on_write:
55
+ # mutating original doesn't mutate new series
56
+ ser.iloc[0] = 0
57
+ assert result.iloc[0] == 1
58
+ else:
59
+ # mutating original does mutate shallow copy
60
+ ser.iloc[0] = 0
61
+ assert result.iloc[0] == 0
62
+
63
+
64
+ def test_series_from_series_with_reindex(using_copy_on_write):
65
+ # Case: constructing a Series from another Series with specifying an index
66
+ # that potentially requires a reindex of the values
67
+ ser = Series([1, 2, 3], name="name")
68
+
69
+ # passing an index that doesn't actually require a reindex of the values
70
+ # -> without CoW we get an actual mutating view
71
+ for index in [
72
+ ser.index,
73
+ ser.index.copy(),
74
+ list(ser.index),
75
+ ser.index.rename("idx"),
76
+ ]:
77
+ result = Series(ser, index=index)
78
+ assert np.shares_memory(ser.values, result.values)
79
+ result.iloc[0] = 0
80
+ if using_copy_on_write:
81
+ assert ser.iloc[0] == 1
82
+ else:
83
+ assert ser.iloc[0] == 0
84
+
85
+ # ensure that if an actual reindex is needed, we don't have any refs
86
+ # (mutating the result wouldn't trigger CoW)
87
+ result = Series(ser, index=[0, 1, 2, 3])
88
+ assert not np.shares_memory(ser.values, result.values)
89
+ if using_copy_on_write:
90
+ assert not result._mgr.blocks[0].refs.has_reference()
91
+
92
+
93
+ @pytest.mark.parametrize("fastpath", [False, True])
94
+ @pytest.mark.parametrize("dtype", [None, "int64"])
95
+ @pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)])
96
+ @pytest.mark.parametrize(
97
+ "arr", [np.array([1, 2, 3], dtype="int64"), pd.array([1, 2, 3], dtype="Int64")]
98
+ )
99
+ def test_series_from_array(using_copy_on_write, idx, dtype, fastpath, arr):
100
+ if idx is None or dtype is not None:
101
+ fastpath = False
102
+ ser = Series(arr, dtype=dtype, index=idx, fastpath=fastpath)
103
+ ser_orig = ser.copy()
104
+ data = getattr(arr, "_data", arr)
105
+ if using_copy_on_write:
106
+ assert not np.shares_memory(get_array(ser), data)
107
+ else:
108
+ assert np.shares_memory(get_array(ser), data)
109
+
110
+ arr[0] = 100
111
+ if using_copy_on_write:
112
+ tm.assert_series_equal(ser, ser_orig)
113
+ else:
114
+ expected = Series([100, 2, 3], dtype=dtype if dtype is not None else arr.dtype)
115
+ tm.assert_series_equal(ser, expected)
116
+
117
+
118
+ @pytest.mark.parametrize("copy", [True, False, None])
119
+ def test_series_from_array_different_dtype(using_copy_on_write, copy):
120
+ arr = np.array([1, 2, 3], dtype="int64")
121
+ ser = Series(arr, dtype="int32", copy=copy)
122
+ assert not np.shares_memory(get_array(ser), arr)
123
+
124
+
125
+ @pytest.mark.parametrize(
126
+ "idx",
127
+ [
128
+ Index([1, 2]),
129
+ DatetimeIndex([Timestamp("2019-12-31"), Timestamp("2020-12-31")]),
130
+ PeriodIndex([Period("2019-12-31"), Period("2020-12-31")]),
131
+ TimedeltaIndex([Timedelta("1 days"), Timedelta("2 days")]),
132
+ ],
133
+ )
134
+ def test_series_from_index(using_copy_on_write, idx):
135
+ ser = Series(idx)
136
+ expected = idx.copy(deep=True)
137
+ if using_copy_on_write:
138
+ assert np.shares_memory(get_array(ser), get_array(idx))
139
+ assert not ser._mgr._has_no_reference(0)
140
+ else:
141
+ assert not np.shares_memory(get_array(ser), get_array(idx))
142
+ ser.iloc[0] = ser.iloc[1]
143
+ tm.assert_index_equal(idx, expected)
144
+
145
+
146
+ def test_series_from_index_different_dtypes(using_copy_on_write):
147
+ idx = Index([1, 2, 3], dtype="int64")
148
+ ser = Series(idx, dtype="int32")
149
+ assert not np.shares_memory(get_array(ser), get_array(idx))
150
+ if using_copy_on_write:
151
+ assert ser._mgr._has_no_reference(0)
152
+
153
+
154
+ @pytest.mark.parametrize("fastpath", [False, True])
155
+ @pytest.mark.parametrize("dtype", [None, "int64"])
156
+ @pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)])
157
+ def test_series_from_block_manager(using_copy_on_write, idx, dtype, fastpath):
158
+ ser = Series([1, 2, 3], dtype="int64")
159
+ ser_orig = ser.copy()
160
+ ser2 = Series(ser._mgr, dtype=dtype, fastpath=fastpath, index=idx)
161
+ assert np.shares_memory(get_array(ser), get_array(ser2))
162
+ if using_copy_on_write:
163
+ assert not ser2._mgr._has_no_reference(0)
164
+
165
+ ser2.iloc[0] = 100
166
+ if using_copy_on_write:
167
+ tm.assert_series_equal(ser, ser_orig)
168
+ else:
169
+ expected = Series([100, 2, 3])
170
+ tm.assert_series_equal(ser, expected)
171
+
172
+
173
+ def test_series_from_block_manager_different_dtype(using_copy_on_write):
174
+ ser = Series([1, 2, 3], dtype="int64")
175
+ ser2 = Series(ser._mgr, dtype="int32")
176
+ assert not np.shares_memory(get_array(ser), get_array(ser2))
177
+ if using_copy_on_write:
178
+ assert ser2._mgr._has_no_reference(0)
179
+
180
+
181
+ @pytest.mark.parametrize("func", [lambda x: x, lambda x: x._mgr])
182
+ @pytest.mark.parametrize("columns", [None, ["a"]])
183
+ def test_dataframe_constructor_mgr_or_df(using_copy_on_write, columns, func):
184
+ df = DataFrame({"a": [1, 2, 3]})
185
+ df_orig = df.copy()
186
+
187
+ new_df = DataFrame(func(df))
188
+
189
+ assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))
190
+ new_df.iloc[0] = 100
191
+
192
+ if using_copy_on_write:
193
+ assert not np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))
194
+ tm.assert_frame_equal(df, df_orig)
195
+ else:
196
+ assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))
197
+ tm.assert_frame_equal(df, new_df)
198
+
199
+
200
+ @pytest.mark.parametrize("dtype", [None, "int64", "Int64"])
201
+ @pytest.mark.parametrize("index", [None, [0, 1, 2]])
202
+ @pytest.mark.parametrize("columns", [None, ["a", "b"], ["a", "b", "c"]])
203
+ def test_dataframe_from_dict_of_series(
204
+ request, using_copy_on_write, columns, index, dtype
205
+ ):
206
+ # Case: constructing a DataFrame from Series objects with copy=False
207
+ # has to do a lazy following CoW rules
208
+ # (the default for DataFrame(dict) is still to copy to ensure consolidation)
209
+ s1 = Series([1, 2, 3])
210
+ s2 = Series([4, 5, 6])
211
+ s1_orig = s1.copy()
212
+ expected = DataFrame(
213
+ {"a": [1, 2, 3], "b": [4, 5, 6]}, index=index, columns=columns, dtype=dtype
214
+ )
215
+
216
+ result = DataFrame(
217
+ {"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False
218
+ )
219
+
220
+ # the shallow copy still shares memory
221
+ assert np.shares_memory(get_array(result, "a"), get_array(s1))
222
+
223
+ # mutating the new dataframe doesn't mutate original
224
+ result.iloc[0, 0] = 10
225
+ if using_copy_on_write:
226
+ assert not np.shares_memory(get_array(result, "a"), get_array(s1))
227
+ tm.assert_series_equal(s1, s1_orig)
228
+ else:
229
+ assert s1.iloc[0] == 10
230
+
231
+ # the same when modifying the parent series
232
+ s1 = Series([1, 2, 3])
233
+ s2 = Series([4, 5, 6])
234
+ result = DataFrame(
235
+ {"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False
236
+ )
237
+ s1.iloc[0] = 10
238
+ if using_copy_on_write:
239
+ assert not np.shares_memory(get_array(result, "a"), get_array(s1))
240
+ tm.assert_frame_equal(result, expected)
241
+ else:
242
+ assert result.iloc[0, 0] == 10
243
+
244
+
245
+ @pytest.mark.parametrize("dtype", [None, "int64"])
246
+ def test_dataframe_from_dict_of_series_with_reindex(dtype):
247
+ # Case: constructing a DataFrame from Series objects with copy=False
248
+ # and passing an index that requires an actual (no-view) reindex -> need
249
+ # to ensure the result doesn't have refs set up to unnecessarily trigger
250
+ # a copy on write
251
+ s1 = Series([1, 2, 3])
252
+ s2 = Series([4, 5, 6])
253
+ df = DataFrame({"a": s1, "b": s2}, index=[1, 2, 3], dtype=dtype, copy=False)
254
+
255
+ # df should own its memory, so mutating shouldn't trigger a copy
256
+ arr_before = get_array(df, "a")
257
+ assert not np.shares_memory(arr_before, get_array(s1))
258
+ df.iloc[0, 0] = 100
259
+ arr_after = get_array(df, "a")
260
+ assert np.shares_memory(arr_before, arr_after)
261
+
262
+
263
+ @pytest.mark.parametrize("cons", [Series, Index])
264
+ @pytest.mark.parametrize(
265
+ "data, dtype", [([1, 2], None), ([1, 2], "int64"), (["a", "b"], None)]
266
+ )
267
+ def test_dataframe_from_series_or_index(using_copy_on_write, data, dtype, cons):
268
+ obj = cons(data, dtype=dtype)
269
+ obj_orig = obj.copy()
270
+ df = DataFrame(obj, dtype=dtype)
271
+ assert np.shares_memory(get_array(obj), get_array(df, 0))
272
+ if using_copy_on_write:
273
+ assert not df._mgr._has_no_reference(0)
274
+
275
+ df.iloc[0, 0] = data[-1]
276
+ if using_copy_on_write:
277
+ tm.assert_equal(obj, obj_orig)
278
+
279
+
280
+ @pytest.mark.parametrize("cons", [Series, Index])
281
+ def test_dataframe_from_series_or_index_different_dtype(using_copy_on_write, cons):
282
+ obj = cons([1, 2], dtype="int64")
283
+ df = DataFrame(obj, dtype="int32")
284
+ assert not np.shares_memory(get_array(obj), get_array(df, 0))
285
+ if using_copy_on_write:
286
+ assert df._mgr._has_no_reference(0)
287
+
288
+
289
+ def test_dataframe_from_series_infer_datetime(using_copy_on_write):
290
+ ser = Series([Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype=object)
291
+ df = DataFrame(ser)
292
+ assert not np.shares_memory(get_array(ser), get_array(df, 0))
293
+ if using_copy_on_write:
294
+ assert df._mgr._has_no_reference(0)
295
+
296
+
297
+ @pytest.mark.parametrize("index", [None, [0, 1, 2]])
298
+ def test_dataframe_from_dict_of_series_with_dtype(index):
299
+ # Variant of above, but now passing a dtype that causes a copy
300
+ # -> need to ensure the result doesn't have refs set up to unnecessarily
301
+ # trigger a copy on write
302
+ s1 = Series([1.0, 2.0, 3.0])
303
+ s2 = Series([4, 5, 6])
304
+ df = DataFrame({"a": s1, "b": s2}, index=index, dtype="int64", copy=False)
305
+
306
+ # df should own its memory, so mutating shouldn't trigger a copy
307
+ arr_before = get_array(df, "a")
308
+ assert not np.shares_memory(arr_before, get_array(s1))
309
+ df.iloc[0, 0] = 100
310
+ arr_after = get_array(df, "a")
311
+ assert np.shares_memory(arr_before, arr_after)
312
+
313
+
314
+ @pytest.mark.parametrize("copy", [False, None, True])
315
+ def test_frame_from_numpy_array(using_copy_on_write, copy, using_array_manager):
316
+ arr = np.array([[1, 2], [3, 4]])
317
+ df = DataFrame(arr, copy=copy)
318
+
319
+ if (
320
+ using_copy_on_write
321
+ and copy is not False
322
+ or copy is True
323
+ or (using_array_manager and copy is None)
324
+ ):
325
+ assert not np.shares_memory(get_array(df, 0), arr)
326
+ else:
327
+ assert np.shares_memory(get_array(df, 0), arr)
328
+
329
+
330
+ def test_dataframe_from_records_with_dataframe(using_copy_on_write):
331
+ df = DataFrame({"a": [1, 2, 3]})
332
+ df_orig = df.copy()
333
+ df2 = DataFrame.from_records(df)
334
+ if using_copy_on_write:
335
+ assert not df._mgr._has_no_reference(0)
336
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
337
+ df2.iloc[0, 0] = 100
338
+ if using_copy_on_write:
339
+ tm.assert_frame_equal(df, df_orig)
340
+ else:
341
+ tm.assert_frame_equal(df, df2)
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_core_functionalities.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import DataFrame
5
+ import pandas._testing as tm
6
+ from pandas.tests.copy_view.util import get_array
7
+
8
+
9
+ def test_assigning_to_same_variable_removes_references(using_copy_on_write):
10
+ df = DataFrame({"a": [1, 2, 3]})
11
+ df = df.reset_index()
12
+ if using_copy_on_write:
13
+ assert df._mgr._has_no_reference(1)
14
+ arr = get_array(df, "a")
15
+ df.iloc[0, 1] = 100 # Write into a
16
+
17
+ assert np.shares_memory(arr, get_array(df, "a"))
18
+
19
+
20
+ def test_setitem_dont_track_unnecessary_references(using_copy_on_write):
21
+ df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
22
+
23
+ df["b"] = 100
24
+ arr = get_array(df, "a")
25
+ # We split the block in setitem, if we are not careful the new blocks will
26
+ # reference each other triggering a copy
27
+ df.iloc[0, 0] = 100
28
+ assert np.shares_memory(arr, get_array(df, "a"))
29
+
30
+
31
+ def test_setitem_with_view_copies(using_copy_on_write):
32
+ df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
33
+ view = df[:]
34
+ expected = df.copy()
35
+
36
+ df["b"] = 100
37
+ arr = get_array(df, "a")
38
+ df.iloc[0, 0] = 100 # Check that we correctly track reference
39
+ if using_copy_on_write:
40
+ assert not np.shares_memory(arr, get_array(df, "a"))
41
+ tm.assert_frame_equal(view, expected)
42
+
43
+
44
+ def test_setitem_with_view_invalidated_does_not_copy(using_copy_on_write, request):
45
+ df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
46
+ view = df[:]
47
+
48
+ df["b"] = 100
49
+ arr = get_array(df, "a")
50
+ view = None # noqa
51
+ df.iloc[0, 0] = 100
52
+ if using_copy_on_write:
53
+ # Setitem split the block. Since the old block shared data with view
54
+ # all the new blocks are referencing view and each other. When view
55
+ # goes out of scope, they don't share data with any other block,
56
+ # so we should not trigger a copy
57
+ mark = pytest.mark.xfail(
58
+ reason="blk.delete does not track references correctly"
59
+ )
60
+ request.node.add_marker(mark)
61
+ assert np.shares_memory(arr, get_array(df, "a"))
62
+
63
+
64
+ def test_out_of_scope(using_copy_on_write):
65
+ def func():
66
+ df = DataFrame({"a": [1, 2], "b": 1.5, "c": 1})
67
+ # create some subset
68
+ result = df[["a", "b"]]
69
+ return result
70
+
71
+ result = func()
72
+ if using_copy_on_write:
73
+ assert not result._mgr.blocks[0].refs.has_reference()
74
+ assert not result._mgr.blocks[1].refs.has_reference()
75
+
76
+
77
+ def test_delete(using_copy_on_write):
78
+ df = DataFrame(np.random.randn(4, 3), columns=["a", "b", "c"])
79
+ del df["b"]
80
+ if using_copy_on_write:
81
+ # TODO: This should not have references, delete makes a shallow copy
82
+ # but keeps the blocks alive
83
+ assert df._mgr.blocks[0].refs.has_reference()
84
+ assert df._mgr.blocks[1].refs.has_reference()
85
+
86
+ df = df[["a"]]
87
+ if using_copy_on_write:
88
+ assert not df._mgr.blocks[0].refs.has_reference()
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_functions.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ DataFrame,
6
+ Series,
7
+ concat,
8
+ merge,
9
+ )
10
+ import pandas._testing as tm
11
+ from pandas.tests.copy_view.util import get_array
12
+
13
+
14
+ def test_concat_frames(using_copy_on_write):
15
+ df = DataFrame({"b": ["a"] * 3})
16
+ df2 = DataFrame({"a": ["a"] * 3})
17
+ df_orig = df.copy()
18
+ result = concat([df, df2], axis=1)
19
+
20
+ if using_copy_on_write:
21
+ assert np.shares_memory(get_array(result, "b"), get_array(df, "b"))
22
+ assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
23
+ else:
24
+ assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))
25
+ assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
26
+
27
+ result.iloc[0, 0] = "d"
28
+ if using_copy_on_write:
29
+ assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))
30
+ assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
31
+
32
+ result.iloc[0, 1] = "d"
33
+ if using_copy_on_write:
34
+ assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
35
+ tm.assert_frame_equal(df, df_orig)
36
+
37
+
38
+ def test_concat_frames_updating_input(using_copy_on_write):
39
+ df = DataFrame({"b": ["a"] * 3})
40
+ df2 = DataFrame({"a": ["a"] * 3})
41
+ result = concat([df, df2], axis=1)
42
+
43
+ if using_copy_on_write:
44
+ assert np.shares_memory(get_array(result, "b"), get_array(df, "b"))
45
+ assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
46
+ else:
47
+ assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))
48
+ assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
49
+
50
+ expected = result.copy()
51
+ df.iloc[0, 0] = "d"
52
+ if using_copy_on_write:
53
+ assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))
54
+ assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
55
+
56
+ df2.iloc[0, 0] = "d"
57
+ if using_copy_on_write:
58
+ assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
59
+ tm.assert_frame_equal(result, expected)
60
+
61
+
62
+ def test_concat_series(using_copy_on_write):
63
+ ser = Series([1, 2], name="a")
64
+ ser2 = Series([3, 4], name="b")
65
+ ser_orig = ser.copy()
66
+ ser2_orig = ser2.copy()
67
+ result = concat([ser, ser2], axis=1)
68
+
69
+ if using_copy_on_write:
70
+ assert np.shares_memory(get_array(result, "a"), ser.values)
71
+ assert np.shares_memory(get_array(result, "b"), ser2.values)
72
+ else:
73
+ assert not np.shares_memory(get_array(result, "a"), ser.values)
74
+ assert not np.shares_memory(get_array(result, "b"), ser2.values)
75
+
76
+ result.iloc[0, 0] = 100
77
+ if using_copy_on_write:
78
+ assert not np.shares_memory(get_array(result, "a"), ser.values)
79
+ assert np.shares_memory(get_array(result, "b"), ser2.values)
80
+
81
+ result.iloc[0, 1] = 1000
82
+ if using_copy_on_write:
83
+ assert not np.shares_memory(get_array(result, "b"), ser2.values)
84
+ tm.assert_series_equal(ser, ser_orig)
85
+ tm.assert_series_equal(ser2, ser2_orig)
86
+
87
+
88
+ def test_concat_frames_chained(using_copy_on_write):
89
+ df1 = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
90
+ df2 = DataFrame({"c": [4, 5, 6]})
91
+ df3 = DataFrame({"d": [4, 5, 6]})
92
+ result = concat([concat([df1, df2], axis=1), df3], axis=1)
93
+ expected = result.copy()
94
+
95
+ if using_copy_on_write:
96
+ assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
97
+ assert np.shares_memory(get_array(result, "c"), get_array(df2, "c"))
98
+ assert np.shares_memory(get_array(result, "d"), get_array(df3, "d"))
99
+ else:
100
+ assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
101
+ assert not np.shares_memory(get_array(result, "c"), get_array(df2, "c"))
102
+ assert not np.shares_memory(get_array(result, "d"), get_array(df3, "d"))
103
+
104
+ df1.iloc[0, 0] = 100
105
+ if using_copy_on_write:
106
+ assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
107
+
108
+ tm.assert_frame_equal(result, expected)
109
+
110
+
111
+ def test_concat_series_chained(using_copy_on_write):
112
+ ser1 = Series([1, 2, 3], name="a")
113
+ ser2 = Series([4, 5, 6], name="c")
114
+ ser3 = Series([4, 5, 6], name="d")
115
+ result = concat([concat([ser1, ser2], axis=1), ser3], axis=1)
116
+ expected = result.copy()
117
+
118
+ if using_copy_on_write:
119
+ assert np.shares_memory(get_array(result, "a"), get_array(ser1, "a"))
120
+ assert np.shares_memory(get_array(result, "c"), get_array(ser2, "c"))
121
+ assert np.shares_memory(get_array(result, "d"), get_array(ser3, "d"))
122
+ else:
123
+ assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a"))
124
+ assert not np.shares_memory(get_array(result, "c"), get_array(ser2, "c"))
125
+ assert not np.shares_memory(get_array(result, "d"), get_array(ser3, "d"))
126
+
127
+ ser1.iloc[0] = 100
128
+ if using_copy_on_write:
129
+ assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a"))
130
+
131
+ tm.assert_frame_equal(result, expected)
132
+
133
+
134
+ def test_concat_series_updating_input(using_copy_on_write):
135
+ ser = Series([1, 2], name="a")
136
+ ser2 = Series([3, 4], name="b")
137
+ expected = DataFrame({"a": [1, 2], "b": [3, 4]})
138
+ result = concat([ser, ser2], axis=1)
139
+
140
+ if using_copy_on_write:
141
+ assert np.shares_memory(get_array(result, "a"), get_array(ser, "a"))
142
+ assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))
143
+ else:
144
+ assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a"))
145
+ assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))
146
+
147
+ ser.iloc[0] = 100
148
+ if using_copy_on_write:
149
+ assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a"))
150
+ assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))
151
+ tm.assert_frame_equal(result, expected)
152
+
153
+ ser2.iloc[0] = 1000
154
+ if using_copy_on_write:
155
+ assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))
156
+ tm.assert_frame_equal(result, expected)
157
+
158
+
159
+ def test_concat_mixed_series_frame(using_copy_on_write):
160
+ df = DataFrame({"a": [1, 2, 3], "c": 1})
161
+ ser = Series([4, 5, 6], name="d")
162
+ result = concat([df, ser], axis=1)
163
+ expected = result.copy()
164
+
165
+ if using_copy_on_write:
166
+ assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
167
+ assert np.shares_memory(get_array(result, "c"), get_array(df, "c"))
168
+ assert np.shares_memory(get_array(result, "d"), get_array(ser, "d"))
169
+ else:
170
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
171
+ assert not np.shares_memory(get_array(result, "c"), get_array(df, "c"))
172
+ assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d"))
173
+
174
+ ser.iloc[0] = 100
175
+ if using_copy_on_write:
176
+ assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d"))
177
+
178
+ df.iloc[0, 0] = 100
179
+ if using_copy_on_write:
180
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
181
+ tm.assert_frame_equal(result, expected)
182
+
183
+
184
+ @pytest.mark.parametrize("copy", [True, None, False])
185
+ def test_concat_copy_keyword(using_copy_on_write, copy):
186
+ df = DataFrame({"a": [1, 2]})
187
+ df2 = DataFrame({"b": [1.5, 2.5]})
188
+
189
+ result = concat([df, df2], axis=1, copy=copy)
190
+
191
+ if using_copy_on_write or copy is False:
192
+ assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))
193
+ assert np.shares_memory(get_array(df2, "b"), get_array(result, "b"))
194
+ else:
195
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
196
+ assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b"))
197
+
198
+
199
+ @pytest.mark.parametrize(
200
+ "func",
201
+ [
202
+ lambda df1, df2, **kwargs: df1.merge(df2, **kwargs),
203
+ lambda df1, df2, **kwargs: merge(df1, df2, **kwargs),
204
+ ],
205
+ )
206
+ def test_merge_on_key(using_copy_on_write, func):
207
+ df1 = DataFrame({"key": ["a", "b", "c"], "a": [1, 2, 3]})
208
+ df2 = DataFrame({"key": ["a", "b", "c"], "b": [4, 5, 6]})
209
+ df1_orig = df1.copy()
210
+ df2_orig = df2.copy()
211
+
212
+ result = func(df1, df2, on="key")
213
+
214
+ if using_copy_on_write:
215
+ assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
216
+ assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
217
+ assert np.shares_memory(get_array(result, "key"), get_array(df1, "key"))
218
+ assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key"))
219
+ else:
220
+ assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
221
+ assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
222
+
223
+ result.iloc[0, 1] = 0
224
+ if using_copy_on_write:
225
+ assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
226
+ assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
227
+
228
+ result.iloc[0, 2] = 0
229
+ if using_copy_on_write:
230
+ assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
231
+ tm.assert_frame_equal(df1, df1_orig)
232
+ tm.assert_frame_equal(df2, df2_orig)
233
+
234
+
235
+ def test_merge_on_index(using_copy_on_write):
236
+ df1 = DataFrame({"a": [1, 2, 3]})
237
+ df2 = DataFrame({"b": [4, 5, 6]})
238
+ df1_orig = df1.copy()
239
+ df2_orig = df2.copy()
240
+
241
+ result = merge(df1, df2, left_index=True, right_index=True)
242
+
243
+ if using_copy_on_write:
244
+ assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
245
+ assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
246
+ else:
247
+ assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
248
+ assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
249
+
250
+ result.iloc[0, 0] = 0
251
+ if using_copy_on_write:
252
+ assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
253
+ assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
254
+
255
+ result.iloc[0, 1] = 0
256
+ if using_copy_on_write:
257
+ assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
258
+ tm.assert_frame_equal(df1, df1_orig)
259
+ tm.assert_frame_equal(df2, df2_orig)
260
+
261
+
262
+ @pytest.mark.parametrize(
263
+ "func, how",
264
+ [
265
+ (lambda df1, df2, **kwargs: merge(df2, df1, on="key", **kwargs), "right"),
266
+ (lambda df1, df2, **kwargs: merge(df1, df2, on="key", **kwargs), "left"),
267
+ ],
268
+ )
269
+ def test_merge_on_key_enlarging_one(using_copy_on_write, func, how):
270
+ df1 = DataFrame({"key": ["a", "b", "c"], "a": [1, 2, 3]})
271
+ df2 = DataFrame({"key": ["a", "b"], "b": [4, 5]})
272
+ df1_orig = df1.copy()
273
+ df2_orig = df2.copy()
274
+
275
+ result = func(df1, df2, how=how)
276
+
277
+ if using_copy_on_write:
278
+ assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
279
+ assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
280
+ assert df2._mgr._has_no_reference(1)
281
+ assert df2._mgr._has_no_reference(0)
282
+ assert np.shares_memory(get_array(result, "key"), get_array(df1, "key")) is (
283
+ how == "left"
284
+ )
285
+ assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key"))
286
+ else:
287
+ assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
288
+ assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
289
+
290
+ if how == "left":
291
+ result.iloc[0, 1] = 0
292
+ else:
293
+ result.iloc[0, 2] = 0
294
+ if using_copy_on_write:
295
+ assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
296
+ tm.assert_frame_equal(df1, df1_orig)
297
+ tm.assert_frame_equal(df2, df2_orig)
298
+
299
+
300
+ @pytest.mark.parametrize("copy", [True, None, False])
301
+ def test_merge_copy_keyword(using_copy_on_write, copy):
302
+ df = DataFrame({"a": [1, 2]})
303
+ df2 = DataFrame({"b": [3, 4.5]})
304
+
305
+ result = df.merge(df2, copy=copy, left_index=True, right_index=True)
306
+
307
+ if using_copy_on_write or copy is False:
308
+ assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))
309
+ assert np.shares_memory(get_array(df2, "b"), get_array(result, "b"))
310
+ else:
311
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
312
+ assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b"))
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_indexing.py ADDED
@@ -0,0 +1,1079 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.errors import SettingWithCopyWarning
5
+
6
+ from pandas.core.dtypes.common import is_float_dtype
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ Series,
12
+ )
13
+ import pandas._testing as tm
14
+ from pandas.tests.copy_view.util import get_array
15
+
16
+
17
+ @pytest.fixture(params=["numpy", "nullable"])
18
+ def backend(request):
19
+ if request.param == "numpy":
20
+
21
+ def make_dataframe(*args, **kwargs):
22
+ return DataFrame(*args, **kwargs)
23
+
24
+ def make_series(*args, **kwargs):
25
+ return Series(*args, **kwargs)
26
+
27
+ elif request.param == "nullable":
28
+
29
+ def make_dataframe(*args, **kwargs):
30
+ df = DataFrame(*args, **kwargs)
31
+ df_nullable = df.convert_dtypes()
32
+ # convert_dtypes will try to cast float to int if there is no loss in
33
+ # precision -> undo that change
34
+ for col in df.columns:
35
+ if is_float_dtype(df[col].dtype) and not is_float_dtype(
36
+ df_nullable[col].dtype
37
+ ):
38
+ df_nullable[col] = df_nullable[col].astype("Float64")
39
+ # copy final result to ensure we start with a fully self-owning DataFrame
40
+ return df_nullable.copy()
41
+
42
+ def make_series(*args, **kwargs):
43
+ ser = Series(*args, **kwargs)
44
+ return ser.convert_dtypes().copy()
45
+
46
+ return request.param, make_dataframe, make_series
47
+
48
+
49
+ # -----------------------------------------------------------------------------
50
+ # Indexing operations taking subset + modifying the subset/parent
51
+
52
+
53
+ def test_subset_column_selection(backend, using_copy_on_write):
54
+ # Case: taking a subset of the columns of a DataFrame
55
+ # + afterwards modifying the subset
56
+ _, DataFrame, _ = backend
57
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
58
+ df_orig = df.copy()
59
+
60
+ subset = df[["a", "c"]]
61
+
62
+ if using_copy_on_write:
63
+ # the subset shares memory ...
64
+ assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
65
+ # ... but uses CoW when being modified
66
+ subset.iloc[0, 0] = 0
67
+ else:
68
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
69
+ # INFO this no longer raise warning since pandas 1.4
70
+ # with pd.option_context("chained_assignment", "warn"):
71
+ # with tm.assert_produces_warning(SettingWithCopyWarning):
72
+ subset.iloc[0, 0] = 0
73
+
74
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
75
+
76
+ expected = DataFrame({"a": [0, 2, 3], "c": [0.1, 0.2, 0.3]})
77
+ tm.assert_frame_equal(subset, expected)
78
+ tm.assert_frame_equal(df, df_orig)
79
+
80
+
81
+ def test_subset_column_selection_modify_parent(backend, using_copy_on_write):
82
+ # Case: taking a subset of the columns of a DataFrame
83
+ # + afterwards modifying the parent
84
+ _, DataFrame, _ = backend
85
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
86
+
87
+ subset = df[["a", "c"]]
88
+
89
+ if using_copy_on_write:
90
+ # the subset shares memory ...
91
+ assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
92
+ # ... but parent uses CoW parent when it is modified
93
+ df.iloc[0, 0] = 0
94
+
95
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
96
+ if using_copy_on_write:
97
+ # different column/block still shares memory
98
+ assert np.shares_memory(get_array(subset, "c"), get_array(df, "c"))
99
+
100
+ expected = DataFrame({"a": [1, 2, 3], "c": [0.1, 0.2, 0.3]})
101
+ tm.assert_frame_equal(subset, expected)
102
+
103
+
104
+ def test_subset_row_slice(backend, using_copy_on_write):
105
+ # Case: taking a subset of the rows of a DataFrame using a slice
106
+ # + afterwards modifying the subset
107
+ _, DataFrame, _ = backend
108
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
109
+ df_orig = df.copy()
110
+
111
+ subset = df[1:3]
112
+ subset._mgr._verify_integrity()
113
+
114
+ assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
115
+
116
+ if using_copy_on_write:
117
+ subset.iloc[0, 0] = 0
118
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
119
+
120
+ else:
121
+ # INFO this no longer raise warning since pandas 1.4
122
+ # with pd.option_context("chained_assignment", "warn"):
123
+ # with tm.assert_produces_warning(SettingWithCopyWarning):
124
+ subset.iloc[0, 0] = 0
125
+
126
+ subset._mgr._verify_integrity()
127
+
128
+ expected = DataFrame({"a": [0, 3], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3))
129
+ tm.assert_frame_equal(subset, expected)
130
+ if using_copy_on_write:
131
+ # original parent dataframe is not modified (CoW)
132
+ tm.assert_frame_equal(df, df_orig)
133
+ else:
134
+ # original parent dataframe is actually updated
135
+ df_orig.iloc[1, 0] = 0
136
+ tm.assert_frame_equal(df, df_orig)
137
+
138
+
139
+ @pytest.mark.parametrize(
140
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
141
+ )
142
+ def test_subset_column_slice(backend, using_copy_on_write, using_array_manager, dtype):
143
+ # Case: taking a subset of the columns of a DataFrame using a slice
144
+ # + afterwards modifying the subset
145
+ dtype_backend, DataFrame, _ = backend
146
+ single_block = (
147
+ dtype == "int64" and dtype_backend == "numpy"
148
+ ) and not using_array_manager
149
+ df = DataFrame(
150
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
151
+ )
152
+ df_orig = df.copy()
153
+
154
+ subset = df.iloc[:, 1:]
155
+ subset._mgr._verify_integrity()
156
+
157
+ if using_copy_on_write:
158
+ assert np.shares_memory(get_array(subset, "b"), get_array(df, "b"))
159
+
160
+ subset.iloc[0, 0] = 0
161
+ assert not np.shares_memory(get_array(subset, "b"), get_array(df, "b"))
162
+
163
+ else:
164
+ # we only get a warning in case of a single block
165
+ warn = SettingWithCopyWarning if single_block else None
166
+ with pd.option_context("chained_assignment", "warn"):
167
+ with tm.assert_produces_warning(warn):
168
+ subset.iloc[0, 0] = 0
169
+
170
+ expected = DataFrame({"b": [0, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)})
171
+ tm.assert_frame_equal(subset, expected)
172
+ # original parent dataframe is not modified (also not for BlockManager case,
173
+ # except for single block)
174
+ if not using_copy_on_write and (using_array_manager or single_block):
175
+ df_orig.iloc[0, 1] = 0
176
+ tm.assert_frame_equal(df, df_orig)
177
+ else:
178
+ tm.assert_frame_equal(df, df_orig)
179
+
180
+
181
+ @pytest.mark.parametrize(
182
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
183
+ )
184
+ @pytest.mark.parametrize(
185
+ "row_indexer",
186
+ [slice(1, 2), np.array([False, True, True]), np.array([1, 2])],
187
+ ids=["slice", "mask", "array"],
188
+ )
189
+ @pytest.mark.parametrize(
190
+ "column_indexer",
191
+ [slice("b", "c"), np.array([False, True, True]), ["b", "c"]],
192
+ ids=["slice", "mask", "array"],
193
+ )
194
+ def test_subset_loc_rows_columns(
195
+ backend,
196
+ dtype,
197
+ row_indexer,
198
+ column_indexer,
199
+ using_array_manager,
200
+ using_copy_on_write,
201
+ ):
202
+ # Case: taking a subset of the rows+columns of a DataFrame using .loc
203
+ # + afterwards modifying the subset
204
+ # Generic test for several combinations of row/column indexers, not all
205
+ # of those could actually return a view / need CoW (so this test is not
206
+ # checking memory sharing, only ensuring subsequent mutation doesn't
207
+ # affect the parent dataframe)
208
+ dtype_backend, DataFrame, _ = backend
209
+ df = DataFrame(
210
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
211
+ )
212
+ df_orig = df.copy()
213
+
214
+ subset = df.loc[row_indexer, column_indexer]
215
+
216
+ # modifying the subset never modifies the parent
217
+ subset.iloc[0, 0] = 0
218
+
219
+ expected = DataFrame(
220
+ {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)
221
+ )
222
+ tm.assert_frame_equal(subset, expected)
223
+ # a few corner cases _do_ actually modify the parent (with both row and column
224
+ # slice, and in case of ArrayManager or BlockManager with single block)
225
+ if (
226
+ isinstance(row_indexer, slice)
227
+ and isinstance(column_indexer, slice)
228
+ and (
229
+ using_array_manager
230
+ or (
231
+ dtype == "int64"
232
+ and dtype_backend == "numpy"
233
+ and not using_copy_on_write
234
+ )
235
+ )
236
+ ):
237
+ df_orig.iloc[1, 1] = 0
238
+ tm.assert_frame_equal(df, df_orig)
239
+
240
+
241
+ @pytest.mark.parametrize(
242
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
243
+ )
244
+ @pytest.mark.parametrize(
245
+ "row_indexer",
246
+ [slice(1, 3), np.array([False, True, True]), np.array([1, 2])],
247
+ ids=["slice", "mask", "array"],
248
+ )
249
+ @pytest.mark.parametrize(
250
+ "column_indexer",
251
+ [slice(1, 3), np.array([False, True, True]), [1, 2]],
252
+ ids=["slice", "mask", "array"],
253
+ )
254
+ def test_subset_iloc_rows_columns(
255
+ backend,
256
+ dtype,
257
+ row_indexer,
258
+ column_indexer,
259
+ using_array_manager,
260
+ using_copy_on_write,
261
+ ):
262
+ # Case: taking a subset of the rows+columns of a DataFrame using .iloc
263
+ # + afterwards modifying the subset
264
+ # Generic test for several combinations of row/column indexers, not all
265
+ # of those could actually return a view / need CoW (so this test is not
266
+ # checking memory sharing, only ensuring subsequent mutation doesn't
267
+ # affect the parent dataframe)
268
+ dtype_backend, DataFrame, _ = backend
269
+ df = DataFrame(
270
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
271
+ )
272
+ df_orig = df.copy()
273
+
274
+ subset = df.iloc[row_indexer, column_indexer]
275
+
276
+ # modifying the subset never modifies the parent
277
+ subset.iloc[0, 0] = 0
278
+
279
+ expected = DataFrame(
280
+ {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)
281
+ )
282
+ tm.assert_frame_equal(subset, expected)
283
+ # a few corner cases _do_ actually modify the parent (with both row and column
284
+ # slice, and in case of ArrayManager or BlockManager with single block)
285
+ if (
286
+ isinstance(row_indexer, slice)
287
+ and isinstance(column_indexer, slice)
288
+ and (
289
+ using_array_manager
290
+ or (
291
+ dtype == "int64"
292
+ and dtype_backend == "numpy"
293
+ and not using_copy_on_write
294
+ )
295
+ )
296
+ ):
297
+ df_orig.iloc[1, 1] = 0
298
+ tm.assert_frame_equal(df, df_orig)
299
+
300
+
301
+ @pytest.mark.parametrize(
302
+ "indexer",
303
+ [slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
304
+ ids=["slice", "mask", "array"],
305
+ )
306
+ def test_subset_set_with_row_indexer(backend, indexer_si, indexer, using_copy_on_write):
307
+ # Case: setting values with a row indexer on a viewing subset
308
+ # subset[indexer] = value and subset.iloc[indexer] = value
309
+ _, DataFrame, _ = backend
310
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
311
+ df_orig = df.copy()
312
+ subset = df[1:4]
313
+
314
+ if (
315
+ indexer_si is tm.setitem
316
+ and isinstance(indexer, np.ndarray)
317
+ and indexer.dtype == "int"
318
+ ):
319
+ pytest.skip("setitem with labels selects on columns")
320
+
321
+ if using_copy_on_write:
322
+ indexer_si(subset)[indexer] = 0
323
+ else:
324
+ # INFO iloc no longer raises warning since pandas 1.4
325
+ warn = SettingWithCopyWarning if indexer_si is tm.setitem else None
326
+ with pd.option_context("chained_assignment", "warn"):
327
+ with tm.assert_produces_warning(warn):
328
+ indexer_si(subset)[indexer] = 0
329
+
330
+ expected = DataFrame(
331
+ {"a": [0, 0, 4], "b": [0, 0, 7], "c": [0.0, 0.0, 0.4]}, index=range(1, 4)
332
+ )
333
+ tm.assert_frame_equal(subset, expected)
334
+ if using_copy_on_write:
335
+ # original parent dataframe is not modified (CoW)
336
+ tm.assert_frame_equal(df, df_orig)
337
+ else:
338
+ # original parent dataframe is actually updated
339
+ df_orig[1:3] = 0
340
+ tm.assert_frame_equal(df, df_orig)
341
+
342
+
343
+ def test_subset_set_with_mask(backend, using_copy_on_write):
344
+ # Case: setting values with a mask on a viewing subset: subset[mask] = value
345
+ _, DataFrame, _ = backend
346
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
347
+ df_orig = df.copy()
348
+ subset = df[1:4]
349
+
350
+ mask = subset > 3
351
+
352
+ if using_copy_on_write:
353
+ subset[mask] = 0
354
+ else:
355
+ with pd.option_context("chained_assignment", "warn"):
356
+ with tm.assert_produces_warning(SettingWithCopyWarning):
357
+ subset[mask] = 0
358
+
359
+ expected = DataFrame(
360
+ {"a": [2, 3, 0], "b": [0, 0, 0], "c": [0.20, 0.3, 0.4]}, index=range(1, 4)
361
+ )
362
+ tm.assert_frame_equal(subset, expected)
363
+ if using_copy_on_write:
364
+ # original parent dataframe is not modified (CoW)
365
+ tm.assert_frame_equal(df, df_orig)
366
+ else:
367
+ # original parent dataframe is actually updated
368
+ df_orig.loc[3, "a"] = 0
369
+ df_orig.loc[1:3, "b"] = 0
370
+ tm.assert_frame_equal(df, df_orig)
371
+
372
+
373
+ def test_subset_set_column(backend, using_copy_on_write):
374
+ # Case: setting a single column on a viewing subset -> subset[col] = value
375
+ dtype_backend, DataFrame, _ = backend
376
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
377
+ df_orig = df.copy()
378
+ subset = df[1:3]
379
+
380
+ if dtype_backend == "numpy":
381
+ arr = np.array([10, 11], dtype="int64")
382
+ else:
383
+ arr = pd.array([10, 11], dtype="Int64")
384
+
385
+ if using_copy_on_write:
386
+ subset["a"] = arr
387
+ else:
388
+ with pd.option_context("chained_assignment", "warn"):
389
+ with tm.assert_produces_warning(SettingWithCopyWarning):
390
+ subset["a"] = arr
391
+
392
+ subset._mgr._verify_integrity()
393
+ expected = DataFrame(
394
+ {"a": [10, 11], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3)
395
+ )
396
+ tm.assert_frame_equal(subset, expected)
397
+ tm.assert_frame_equal(df, df_orig)
398
+
399
+
400
+ @pytest.mark.parametrize(
401
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
402
+ )
403
+ def test_subset_set_column_with_loc(
404
+ backend, using_copy_on_write, using_array_manager, dtype
405
+ ):
406
+ # Case: setting a single column with loc on a viewing subset
407
+ # -> subset.loc[:, col] = value
408
+ _, DataFrame, _ = backend
409
+ df = DataFrame(
410
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
411
+ )
412
+ df_orig = df.copy()
413
+ subset = df[1:3]
414
+
415
+ if using_copy_on_write:
416
+ subset.loc[:, "a"] = np.array([10, 11], dtype="int64")
417
+ else:
418
+ with pd.option_context("chained_assignment", "warn"):
419
+ with tm.assert_produces_warning(
420
+ None,
421
+ raise_on_extra_warnings=not using_array_manager,
422
+ ):
423
+ subset.loc[:, "a"] = np.array([10, 11], dtype="int64")
424
+
425
+ subset._mgr._verify_integrity()
426
+ expected = DataFrame(
427
+ {"a": [10, 11], "b": [5, 6], "c": np.array([8, 9], dtype=dtype)},
428
+ index=range(1, 3),
429
+ )
430
+ tm.assert_frame_equal(subset, expected)
431
+ if using_copy_on_write:
432
+ # original parent dataframe is not modified (CoW)
433
+ tm.assert_frame_equal(df, df_orig)
434
+ else:
435
+ # original parent dataframe is actually updated
436
+ df_orig.loc[1:3, "a"] = np.array([10, 11], dtype="int64")
437
+ tm.assert_frame_equal(df, df_orig)
438
+
439
+
440
+ def test_subset_set_column_with_loc2(backend, using_copy_on_write, using_array_manager):
441
+ # Case: setting a single column with loc on a viewing subset
442
+ # -> subset.loc[:, col] = value
443
+ # separate test for case of DataFrame of a single column -> takes a separate
444
+ # code path
445
+ _, DataFrame, _ = backend
446
+ df = DataFrame({"a": [1, 2, 3]})
447
+ df_orig = df.copy()
448
+ subset = df[1:3]
449
+
450
+ if using_copy_on_write:
451
+ subset.loc[:, "a"] = 0
452
+ else:
453
+ with pd.option_context("chained_assignment", "warn"):
454
+ with tm.assert_produces_warning(
455
+ None,
456
+ raise_on_extra_warnings=not using_array_manager,
457
+ ):
458
+ subset.loc[:, "a"] = 0
459
+
460
+ subset._mgr._verify_integrity()
461
+ expected = DataFrame({"a": [0, 0]}, index=range(1, 3))
462
+ tm.assert_frame_equal(subset, expected)
463
+ if using_copy_on_write:
464
+ # original parent dataframe is not modified (CoW)
465
+ tm.assert_frame_equal(df, df_orig)
466
+ else:
467
+ # original parent dataframe is actually updated
468
+ df_orig.loc[1:3, "a"] = 0
469
+ tm.assert_frame_equal(df, df_orig)
470
+
471
+
472
+ @pytest.mark.parametrize(
473
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
474
+ )
475
+ def test_subset_set_columns(backend, using_copy_on_write, dtype):
476
+ # Case: setting multiple columns on a viewing subset
477
+ # -> subset[[col1, col2]] = value
478
+ dtype_backend, DataFrame, _ = backend
479
+ df = DataFrame(
480
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
481
+ )
482
+ df_orig = df.copy()
483
+ subset = df[1:3]
484
+
485
+ if using_copy_on_write:
486
+ subset[["a", "c"]] = 0
487
+ else:
488
+ with pd.option_context("chained_assignment", "warn"):
489
+ with tm.assert_produces_warning(SettingWithCopyWarning):
490
+ subset[["a", "c"]] = 0
491
+
492
+ subset._mgr._verify_integrity()
493
+ if using_copy_on_write:
494
+ # first and third column should certainly have no references anymore
495
+ assert all(subset._mgr._has_no_reference(i) for i in [0, 2])
496
+ expected = DataFrame({"a": [0, 0], "b": [5, 6], "c": [0, 0]}, index=range(1, 3))
497
+ if dtype_backend == "nullable":
498
+ # there is not yet a global option, so overriding a column by setting a scalar
499
+ # defaults to numpy dtype even if original column was nullable
500
+ expected["a"] = expected["a"].astype("int64")
501
+ expected["c"] = expected["c"].astype("int64")
502
+
503
+ tm.assert_frame_equal(subset, expected)
504
+ tm.assert_frame_equal(df, df_orig)
505
+
506
+
507
+ @pytest.mark.parametrize(
508
+ "indexer",
509
+ [slice("a", "b"), np.array([True, True, False]), ["a", "b"]],
510
+ ids=["slice", "mask", "array"],
511
+ )
512
+ def test_subset_set_with_column_indexer(backend, indexer, using_copy_on_write):
513
+ # Case: setting multiple columns with a column indexer on a viewing subset
514
+ # -> subset.loc[:, [col1, col2]] = value
515
+ _, DataFrame, _ = backend
516
+ df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
517
+ df_orig = df.copy()
518
+ subset = df[1:3]
519
+
520
+ if using_copy_on_write:
521
+ subset.loc[:, indexer] = 0
522
+ else:
523
+ with pd.option_context("chained_assignment", "warn"):
524
+ # As of 2.0, this setitem attempts (successfully) to set values
525
+ # inplace, so the assignment is not chained.
526
+ subset.loc[:, indexer] = 0
527
+
528
+ subset._mgr._verify_integrity()
529
+ expected = DataFrame({"a": [0, 0], "b": [0.0, 0.0], "c": [5, 6]}, index=range(1, 3))
530
+ tm.assert_frame_equal(subset, expected)
531
+ if using_copy_on_write:
532
+ tm.assert_frame_equal(df, df_orig)
533
+ else:
534
+ # pre-2.0, in the mixed case with BlockManager, only column "a"
535
+ # would be mutated in the parent frame. this changed with the
536
+ # enforcement of GH#45333
537
+ df_orig.loc[1:2, ["a", "b"]] = 0
538
+ tm.assert_frame_equal(df, df_orig)
539
+
540
+
541
+ @pytest.mark.parametrize(
542
+ "method",
543
+ [
544
+ lambda df: df[["a", "b"]][0:2],
545
+ lambda df: df[0:2][["a", "b"]],
546
+ lambda df: df[["a", "b"]].iloc[0:2],
547
+ lambda df: df[["a", "b"]].loc[0:1],
548
+ lambda df: df[0:2].iloc[:, 0:2],
549
+ lambda df: df[0:2].loc[:, "a":"b"], # type: ignore[misc]
550
+ ],
551
+ ids=[
552
+ "row-getitem-slice",
553
+ "column-getitem",
554
+ "row-iloc-slice",
555
+ "row-loc-slice",
556
+ "column-iloc-slice",
557
+ "column-loc-slice",
558
+ ],
559
+ )
560
+ @pytest.mark.parametrize(
561
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
562
+ )
563
+ def test_subset_chained_getitem(
564
+ request, backend, method, dtype, using_copy_on_write, using_array_manager
565
+ ):
566
+ # Case: creating a subset using multiple, chained getitem calls using views
567
+ # still needs to guarantee proper CoW behaviour
568
+ _, DataFrame, _ = backend
569
+ df = DataFrame(
570
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
571
+ )
572
+ df_orig = df.copy()
573
+
574
+ # when not using CoW, it depends on whether we have a single block or not
575
+ # and whether we are slicing the columns -> in that case we have a view
576
+ test_callspec = request.node.callspec.id
577
+ if not using_array_manager:
578
+ subset_is_view = test_callspec in (
579
+ "numpy-single-block-column-iloc-slice",
580
+ "numpy-single-block-column-loc-slice",
581
+ )
582
+ else:
583
+ # with ArrayManager, it doesn't matter whether we have
584
+ # single vs mixed block or numpy vs nullable dtypes
585
+ subset_is_view = test_callspec.endswith(
586
+ "column-iloc-slice"
587
+ ) or test_callspec.endswith("column-loc-slice")
588
+
589
+ # modify subset -> don't modify parent
590
+ subset = method(df)
591
+ subset.iloc[0, 0] = 0
592
+ if using_copy_on_write or (not subset_is_view):
593
+ tm.assert_frame_equal(df, df_orig)
594
+ else:
595
+ assert df.iloc[0, 0] == 0
596
+
597
+ # modify parent -> don't modify subset
598
+ subset = method(df)
599
+ df.iloc[0, 0] = 0
600
+ expected = DataFrame({"a": [1, 2], "b": [4, 5]})
601
+ if using_copy_on_write or not subset_is_view:
602
+ tm.assert_frame_equal(subset, expected)
603
+ else:
604
+ assert subset.iloc[0, 0] == 0
605
+
606
+
607
+ @pytest.mark.parametrize(
608
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
609
+ )
610
+ def test_subset_chained_getitem_column(backend, dtype, using_copy_on_write):
611
+ # Case: creating a subset using multiple, chained getitem calls using views
612
+ # still needs to guarantee proper CoW behaviour
613
+ _, DataFrame, Series = backend
614
+ df = DataFrame(
615
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
616
+ )
617
+ df_orig = df.copy()
618
+
619
+ # modify subset -> don't modify parent
620
+ subset = df[:]["a"][0:2]
621
+ df._clear_item_cache()
622
+ subset.iloc[0] = 0
623
+ if using_copy_on_write:
624
+ tm.assert_frame_equal(df, df_orig)
625
+ else:
626
+ assert df.iloc[0, 0] == 0
627
+
628
+ # modify parent -> don't modify subset
629
+ subset = df[:]["a"][0:2]
630
+ df._clear_item_cache()
631
+ df.iloc[0, 0] = 0
632
+ expected = Series([1, 2], name="a")
633
+ if using_copy_on_write:
634
+ tm.assert_series_equal(subset, expected)
635
+ else:
636
+ assert subset.iloc[0] == 0
637
+
638
+
639
+ @pytest.mark.parametrize(
640
+ "method",
641
+ [
642
+ lambda s: s["a":"c"]["a":"b"], # type: ignore[misc]
643
+ lambda s: s.iloc[0:3].iloc[0:2],
644
+ lambda s: s.loc["a":"c"].loc["a":"b"], # type: ignore[misc]
645
+ lambda s: s.loc["a":"c"] # type: ignore[misc]
646
+ .iloc[0:3]
647
+ .iloc[0:2]
648
+ .loc["a":"b"] # type: ignore[misc]
649
+ .iloc[0:1],
650
+ ],
651
+ ids=["getitem", "iloc", "loc", "long-chain"],
652
+ )
653
+ def test_subset_chained_getitem_series(backend, method, using_copy_on_write):
654
+ # Case: creating a subset using multiple, chained getitem calls using views
655
+ # still needs to guarantee proper CoW behaviour
656
+ _, _, Series = backend
657
+ s = Series([1, 2, 3], index=["a", "b", "c"])
658
+ s_orig = s.copy()
659
+
660
+ # modify subset -> don't modify parent
661
+ subset = method(s)
662
+ subset.iloc[0] = 0
663
+ if using_copy_on_write:
664
+ tm.assert_series_equal(s, s_orig)
665
+ else:
666
+ assert s.iloc[0] == 0
667
+
668
+ # modify parent -> don't modify subset
669
+ subset = s.iloc[0:3].iloc[0:2]
670
+ s.iloc[0] = 0
671
+ expected = Series([1, 2], index=["a", "b"])
672
+ if using_copy_on_write:
673
+ tm.assert_series_equal(subset, expected)
674
+ else:
675
+ assert subset.iloc[0] == 0
676
+
677
+
678
+ def test_subset_chained_single_block_row(using_copy_on_write, using_array_manager):
679
+ # not parametrizing this for dtype backend, since this explicitly tests single block
680
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
681
+ df_orig = df.copy()
682
+
683
+ # modify subset -> don't modify parent
684
+ subset = df[:].iloc[0].iloc[0:2]
685
+ subset.iloc[0] = 0
686
+ if using_copy_on_write or using_array_manager:
687
+ tm.assert_frame_equal(df, df_orig)
688
+ else:
689
+ assert df.iloc[0, 0] == 0
690
+
691
+ # modify parent -> don't modify subset
692
+ subset = df[:].iloc[0].iloc[0:2]
693
+ df.iloc[0, 0] = 0
694
+ expected = Series([1, 4], index=["a", "b"], name=0)
695
+ if using_copy_on_write or using_array_manager:
696
+ tm.assert_series_equal(subset, expected)
697
+ else:
698
+ assert subset.iloc[0] == 0
699
+
700
+
701
+ @pytest.mark.parametrize(
702
+ "method",
703
+ [
704
+ lambda df: df[:],
705
+ lambda df: df.loc[:, :],
706
+ lambda df: df.loc[:],
707
+ lambda df: df.iloc[:, :],
708
+ lambda df: df.iloc[:],
709
+ ],
710
+ ids=["getitem", "loc", "loc-rows", "iloc", "iloc-rows"],
711
+ )
712
+ def test_null_slice(backend, method, using_copy_on_write):
713
+ # Case: also all variants of indexing with a null slice (:) should return
714
+ # new objects to ensure we correctly use CoW for the results
715
+ _, DataFrame, _ = backend
716
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
717
+ df_orig = df.copy()
718
+
719
+ df2 = method(df)
720
+
721
+ # we always return new objects (shallow copy), regardless of CoW or not
722
+ assert df2 is not df
723
+
724
+ # and those trigger CoW when mutated
725
+ df2.iloc[0, 0] = 0
726
+ if using_copy_on_write:
727
+ tm.assert_frame_equal(df, df_orig)
728
+ else:
729
+ assert df.iloc[0, 0] == 0
730
+
731
+
732
+ @pytest.mark.parametrize(
733
+ "method",
734
+ [
735
+ lambda s: s[:],
736
+ lambda s: s.loc[:],
737
+ lambda s: s.iloc[:],
738
+ ],
739
+ ids=["getitem", "loc", "iloc"],
740
+ )
741
+ def test_null_slice_series(backend, method, using_copy_on_write):
742
+ _, _, Series = backend
743
+ s = Series([1, 2, 3], index=["a", "b", "c"])
744
+ s_orig = s.copy()
745
+
746
+ s2 = method(s)
747
+
748
+ # we always return new objects, regardless of CoW or not
749
+ assert s2 is not s
750
+
751
+ # and those trigger CoW when mutated
752
+ s2.iloc[0] = 0
753
+ if using_copy_on_write:
754
+ tm.assert_series_equal(s, s_orig)
755
+ else:
756
+ assert s.iloc[0] == 0
757
+
758
+
759
+ # TODO add more tests modifying the parent
760
+
761
+
762
+ # -----------------------------------------------------------------------------
763
+ # Series -- Indexing operations taking subset + modifying the subset/parent
764
+
765
+
766
+ def test_series_getitem_slice(backend, using_copy_on_write):
767
+ # Case: taking a slice of a Series + afterwards modifying the subset
768
+ _, _, Series = backend
769
+ s = Series([1, 2, 3], index=["a", "b", "c"])
770
+ s_orig = s.copy()
771
+
772
+ subset = s[:]
773
+ assert np.shares_memory(get_array(subset), get_array(s))
774
+
775
+ subset.iloc[0] = 0
776
+
777
+ if using_copy_on_write:
778
+ assert not np.shares_memory(get_array(subset), get_array(s))
779
+
780
+ expected = Series([0, 2, 3], index=["a", "b", "c"])
781
+ tm.assert_series_equal(subset, expected)
782
+
783
+ if using_copy_on_write:
784
+ # original parent series is not modified (CoW)
785
+ tm.assert_series_equal(s, s_orig)
786
+ else:
787
+ # original parent series is actually updated
788
+ assert s.iloc[0] == 0
789
+
790
+
791
+ @pytest.mark.parametrize(
792
+ "indexer",
793
+ [slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
794
+ ids=["slice", "mask", "array"],
795
+ )
796
+ def test_series_subset_set_with_indexer(
797
+ backend, indexer_si, indexer, using_copy_on_write
798
+ ):
799
+ # Case: setting values in a viewing Series with an indexer
800
+ _, _, Series = backend
801
+ s = Series([1, 2, 3], index=["a", "b", "c"])
802
+ s_orig = s.copy()
803
+ subset = s[:]
804
+
805
+ indexer_si(subset)[indexer] = 0
806
+ expected = Series([0, 0, 3], index=["a", "b", "c"])
807
+ tm.assert_series_equal(subset, expected)
808
+
809
+ if using_copy_on_write:
810
+ tm.assert_series_equal(s, s_orig)
811
+ else:
812
+ tm.assert_series_equal(s, expected)
813
+
814
+
815
+ # -----------------------------------------------------------------------------
816
+ # del operator
817
+
818
+
819
+ def test_del_frame(backend, using_copy_on_write):
820
+ # Case: deleting a column with `del` on a viewing child dataframe should
821
+ # not modify parent + update the references
822
+ _, DataFrame, _ = backend
823
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
824
+ df_orig = df.copy()
825
+ df2 = df[:]
826
+
827
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
828
+
829
+ del df2["b"]
830
+
831
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
832
+ tm.assert_frame_equal(df, df_orig)
833
+ tm.assert_frame_equal(df2, df_orig[["a", "c"]])
834
+ df2._mgr._verify_integrity()
835
+
836
+ # TODO in theory modifying column "b" of the parent wouldn't need a CoW
837
+ # but the weakref is still alive and so we still perform CoW
838
+
839
+ df2.loc[0, "a"] = 100
840
+ if using_copy_on_write:
841
+ # modifying child after deleting a column still doesn't update parent
842
+ tm.assert_frame_equal(df, df_orig)
843
+ else:
844
+ assert df.loc[0, "a"] == 100
845
+
846
+
847
+ def test_del_series(backend):
848
+ _, _, Series = backend
849
+ s = Series([1, 2, 3], index=["a", "b", "c"])
850
+ s_orig = s.copy()
851
+ s2 = s[:]
852
+
853
+ assert np.shares_memory(get_array(s), get_array(s2))
854
+
855
+ del s2["a"]
856
+
857
+ assert not np.shares_memory(get_array(s), get_array(s2))
858
+ tm.assert_series_equal(s, s_orig)
859
+ tm.assert_series_equal(s2, s_orig[["b", "c"]])
860
+
861
+ # modifying s2 doesn't need copy on write (due to `del`, s2 is backed by new array)
862
+ values = s2.values
863
+ s2.loc["b"] = 100
864
+ assert values[0] == 100
865
+
866
+
867
+ # -----------------------------------------------------------------------------
868
+ # Accessing column as Series
869
+
870
+
871
+ def test_column_as_series(backend, using_copy_on_write, using_array_manager):
872
+ # Case: selecting a single column now also uses Copy-on-Write
873
+ dtype_backend, DataFrame, Series = backend
874
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
875
+ df_orig = df.copy()
876
+
877
+ s = df["a"]
878
+
879
+ assert np.shares_memory(get_array(s, "a"), get_array(df, "a"))
880
+
881
+ if using_copy_on_write or using_array_manager:
882
+ s[0] = 0
883
+ else:
884
+ warn = SettingWithCopyWarning if dtype_backend == "numpy" else None
885
+ with pd.option_context("chained_assignment", "warn"):
886
+ with tm.assert_produces_warning(warn):
887
+ s[0] = 0
888
+
889
+ expected = Series([0, 2, 3], name="a")
890
+ tm.assert_series_equal(s, expected)
891
+ if using_copy_on_write:
892
+ # assert not np.shares_memory(s.values, get_array(df, "a"))
893
+ tm.assert_frame_equal(df, df_orig)
894
+ # ensure cached series on getitem is not the changed series
895
+ tm.assert_series_equal(df["a"], df_orig["a"])
896
+ else:
897
+ df_orig.iloc[0, 0] = 0
898
+ tm.assert_frame_equal(df, df_orig)
899
+
900
+
901
+ def test_column_as_series_set_with_upcast(
902
+ backend, using_copy_on_write, using_array_manager
903
+ ):
904
+ # Case: selecting a single column now also uses Copy-on-Write -> when
905
+ # setting a value causes an upcast, we don't need to update the parent
906
+ # DataFrame through the cache mechanism
907
+ dtype_backend, DataFrame, Series = backend
908
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
909
+ df_orig = df.copy()
910
+
911
+ s = df["a"]
912
+ if dtype_backend == "nullable":
913
+ with pytest.raises(TypeError, match="Invalid value"):
914
+ s[0] = "foo"
915
+ expected = Series([1, 2, 3], name="a")
916
+ elif using_copy_on_write or using_array_manager:
917
+ s[0] = "foo"
918
+ expected = Series(["foo", 2, 3], dtype=object, name="a")
919
+ else:
920
+ with pd.option_context("chained_assignment", "warn"):
921
+ with tm.assert_produces_warning(SettingWithCopyWarning):
922
+ s[0] = "foo"
923
+ expected = Series(["foo", 2, 3], dtype=object, name="a")
924
+
925
+ tm.assert_series_equal(s, expected)
926
+ if using_copy_on_write:
927
+ tm.assert_frame_equal(df, df_orig)
928
+ # ensure cached series on getitem is not the changed series
929
+ tm.assert_series_equal(df["a"], df_orig["a"])
930
+ else:
931
+ df_orig["a"] = expected
932
+ tm.assert_frame_equal(df, df_orig)
933
+
934
+
935
+ @pytest.mark.parametrize(
936
+ "method",
937
+ [
938
+ lambda df: df["a"],
939
+ lambda df: df.loc[:, "a"],
940
+ lambda df: df.iloc[:, 0],
941
+ ],
942
+ ids=["getitem", "loc", "iloc"],
943
+ )
944
+ def test_column_as_series_no_item_cache(
945
+ request, backend, method, using_copy_on_write, using_array_manager
946
+ ):
947
+ # Case: selecting a single column (which now also uses Copy-on-Write to protect
948
+ # the view) should always give a new object (i.e. not make use of a cache)
949
+ dtype_backend, DataFrame, _ = backend
950
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
951
+ df_orig = df.copy()
952
+
953
+ s1 = method(df)
954
+ s2 = method(df)
955
+
956
+ is_iloc = "iloc" in request.node.name
957
+ if using_copy_on_write or is_iloc:
958
+ assert s1 is not s2
959
+ else:
960
+ assert s1 is s2
961
+
962
+ if using_copy_on_write or using_array_manager:
963
+ s1.iloc[0] = 0
964
+ else:
965
+ warn = SettingWithCopyWarning if dtype_backend == "numpy" else None
966
+ with pd.option_context("chained_assignment", "warn"):
967
+ with tm.assert_produces_warning(warn):
968
+ s1.iloc[0] = 0
969
+
970
+ if using_copy_on_write:
971
+ tm.assert_series_equal(s2, df_orig["a"])
972
+ tm.assert_frame_equal(df, df_orig)
973
+ else:
974
+ assert s2.iloc[0] == 0
975
+
976
+
977
+ # TODO add tests for other indexing methods on the Series
978
+
979
+
980
+ def test_dataframe_add_column_from_series(backend):
981
+ # Case: adding a new column to a DataFrame from an existing column/series
982
+ # -> always already takes a copy on assignment
983
+ # (no change in behaviour here)
984
+ # TODO can we achieve the same behaviour with Copy-on-Write?
985
+ _, DataFrame, Series = backend
986
+ df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
987
+
988
+ s = Series([10, 11, 12])
989
+ df["new"] = s
990
+ assert not np.shares_memory(get_array(df, "new"), s.values)
991
+
992
+ # editing series -> doesn't modify column in frame
993
+ s[0] = 0
994
+ expected = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "new": [10, 11, 12]})
995
+ tm.assert_frame_equal(df, expected)
996
+
997
+ # editing column in frame -> doesn't modify series
998
+ df.loc[2, "new"] = 100
999
+ expected_s = Series([0, 11, 12])
1000
+ tm.assert_series_equal(s, expected_s)
1001
+
1002
+
1003
+ @pytest.mark.parametrize("val", [100, "a"])
1004
+ @pytest.mark.parametrize(
1005
+ "indexer_func, indexer",
1006
+ [
1007
+ (tm.loc, (0, "a")),
1008
+ (tm.iloc, (0, 0)),
1009
+ (tm.loc, ([0], "a")),
1010
+ (tm.iloc, ([0], 0)),
1011
+ (tm.loc, (slice(None), "a")),
1012
+ (tm.iloc, (slice(None), 0)),
1013
+ ],
1014
+ )
1015
+ def test_set_value_copy_only_necessary_column(
1016
+ using_copy_on_write, indexer_func, indexer, val
1017
+ ):
1018
+ # When setting inplace, only copy column that is modified instead of the whole
1019
+ # block (by splitting the block)
1020
+ # TODO multi-block only for now
1021
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
1022
+ df_orig = df.copy()
1023
+ view = df[:]
1024
+
1025
+ indexer_func(df)[indexer] = val
1026
+
1027
+ if using_copy_on_write:
1028
+ assert np.shares_memory(get_array(df, "b"), get_array(view, "b"))
1029
+ assert not np.shares_memory(get_array(df, "a"), get_array(view, "a"))
1030
+ tm.assert_frame_equal(view, df_orig)
1031
+ else:
1032
+ assert np.shares_memory(get_array(df, "c"), get_array(view, "c"))
1033
+ if val == "a":
1034
+ assert not np.shares_memory(get_array(df, "a"), get_array(view, "a"))
1035
+ else:
1036
+ assert np.shares_memory(get_array(df, "a"), get_array(view, "a"))
1037
+
1038
+
1039
+ def test_series_midx_slice(using_copy_on_write):
1040
+ ser = Series([1, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]]))
1041
+ result = ser[1]
1042
+ assert np.shares_memory(get_array(ser), get_array(result))
1043
+ result.iloc[0] = 100
1044
+ if using_copy_on_write:
1045
+ expected = Series(
1046
+ [1, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]])
1047
+ )
1048
+ tm.assert_series_equal(ser, expected)
1049
+
1050
+
1051
+ def test_getitem_midx_slice(using_copy_on_write, using_array_manager):
1052
+ df = DataFrame({("a", "x"): [1, 2], ("a", "y"): 1, ("b", "x"): 2})
1053
+ df_orig = df.copy()
1054
+ new_df = df[("a",)]
1055
+
1056
+ if using_copy_on_write:
1057
+ assert not new_df._mgr._has_no_reference(0)
1058
+
1059
+ if not using_array_manager:
1060
+ assert np.shares_memory(get_array(df, ("a", "x")), get_array(new_df, "x"))
1061
+ if using_copy_on_write:
1062
+ new_df.iloc[0, 0] = 100
1063
+ tm.assert_frame_equal(df_orig, df)
1064
+
1065
+
1066
+ def test_series_midx_tuples_slice(using_copy_on_write):
1067
+ ser = Series(
1068
+ [1, 2, 3],
1069
+ index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]),
1070
+ )
1071
+ result = ser[(1, 2)]
1072
+ assert np.shares_memory(get_array(ser), get_array(result))
1073
+ result.iloc[0] = 100
1074
+ if using_copy_on_write:
1075
+ expected = Series(
1076
+ [1, 2, 3],
1077
+ index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]),
1078
+ )
1079
+ tm.assert_series_equal(ser, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_interp_fillna.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ NA,
6
+ DataFrame,
7
+ Interval,
8
+ NaT,
9
+ Series,
10
+ Timestamp,
11
+ interval_range,
12
+ )
13
+ import pandas._testing as tm
14
+ from pandas.tests.copy_view.util import get_array
15
+
16
+
17
+ @pytest.mark.parametrize("method", ["pad", "nearest", "linear"])
18
+ def test_interpolate_no_op(using_copy_on_write, method):
19
+ df = DataFrame({"a": [1, 2]})
20
+ df_orig = df.copy()
21
+
22
+ result = df.interpolate(method=method)
23
+
24
+ if using_copy_on_write:
25
+ assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
26
+ else:
27
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
28
+
29
+ result.iloc[0, 0] = 100
30
+
31
+ if using_copy_on_write:
32
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
33
+ tm.assert_frame_equal(df, df_orig)
34
+
35
+
36
+ @pytest.mark.parametrize("func", ["ffill", "bfill"])
37
+ def test_interp_fill_functions(using_copy_on_write, func):
38
+ # Check that these takes the same code paths as interpolate
39
+ df = DataFrame({"a": [1, 2]})
40
+ df_orig = df.copy()
41
+
42
+ result = getattr(df, func)()
43
+
44
+ if using_copy_on_write:
45
+ assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
46
+ else:
47
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
48
+
49
+ result.iloc[0, 0] = 100
50
+
51
+ if using_copy_on_write:
52
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
53
+ tm.assert_frame_equal(df, df_orig)
54
+
55
+
56
+ @pytest.mark.parametrize("func", ["ffill", "bfill"])
57
+ @pytest.mark.parametrize(
58
+ "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]
59
+ )
60
+ def test_interpolate_triggers_copy(using_copy_on_write, vals, func):
61
+ df = DataFrame({"a": vals})
62
+ result = getattr(df, func)()
63
+
64
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
65
+ if using_copy_on_write:
66
+ # Check that we don't have references when triggering a copy
67
+ assert result._mgr._has_no_reference(0)
68
+
69
+
70
+ @pytest.mark.parametrize(
71
+ "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]
72
+ )
73
+ def test_interpolate_inplace_no_reference_no_copy(using_copy_on_write, vals):
74
+ df = DataFrame({"a": vals})
75
+ arr = get_array(df, "a")
76
+ df.interpolate(method="linear", inplace=True)
77
+
78
+ assert np.shares_memory(arr, get_array(df, "a"))
79
+ if using_copy_on_write:
80
+ # Check that we don't have references when triggering a copy
81
+ assert df._mgr._has_no_reference(0)
82
+
83
+
84
+ @pytest.mark.parametrize(
85
+ "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]
86
+ )
87
+ def test_interpolate_inplace_with_refs(using_copy_on_write, vals):
88
+ df = DataFrame({"a": [1, np.nan, 2]})
89
+ df_orig = df.copy()
90
+ arr = get_array(df, "a")
91
+ view = df[:]
92
+ df.interpolate(method="linear", inplace=True)
93
+
94
+ if using_copy_on_write:
95
+ # Check that copy was triggered in interpolate and that we don't
96
+ # have any references left
97
+ assert not np.shares_memory(arr, get_array(df, "a"))
98
+ tm.assert_frame_equal(df_orig, view)
99
+ assert df._mgr._has_no_reference(0)
100
+ assert view._mgr._has_no_reference(0)
101
+ else:
102
+ assert np.shares_memory(arr, get_array(df, "a"))
103
+
104
+
105
+ def test_interpolate_cleaned_fill_method(using_copy_on_write):
106
+ # Check that "method is set to None" case works correctly
107
+ df = DataFrame({"a": ["a", np.nan, "c"], "b": 1})
108
+ df_orig = df.copy()
109
+
110
+ result = df.interpolate(method="asfreq")
111
+
112
+ if using_copy_on_write:
113
+ assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
114
+ else:
115
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
116
+
117
+ result.iloc[0, 0] = Timestamp("2021-12-31")
118
+
119
+ if using_copy_on_write:
120
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
121
+ tm.assert_frame_equal(df, df_orig)
122
+
123
+
124
+ def test_interpolate_object_convert_no_op(using_copy_on_write):
125
+ df = DataFrame({"a": ["a", "b", "c"], "b": 1})
126
+ arr_a = get_array(df, "a")
127
+ df.interpolate(method="pad", inplace=True)
128
+
129
+ # Now CoW makes a copy, it should not!
130
+ if using_copy_on_write:
131
+ assert df._mgr._has_no_reference(0)
132
+ assert np.shares_memory(arr_a, get_array(df, "a"))
133
+
134
+
135
+ def test_interpolate_object_convert_copies(using_copy_on_write):
136
+ df = DataFrame({"a": Series([1, 2], dtype=object), "b": 1})
137
+ arr_a = get_array(df, "a")
138
+ df.interpolate(method="pad", inplace=True)
139
+
140
+ if using_copy_on_write:
141
+ assert df._mgr._has_no_reference(0)
142
+ assert not np.shares_memory(arr_a, get_array(df, "a"))
143
+
144
+
145
+ def test_interpolate_downcast(using_copy_on_write):
146
+ df = DataFrame({"a": [1, np.nan, 2.5], "b": 1})
147
+ arr_a = get_array(df, "a")
148
+ df.interpolate(method="pad", inplace=True, downcast="infer")
149
+
150
+ if using_copy_on_write:
151
+ assert df._mgr._has_no_reference(0)
152
+ assert np.shares_memory(arr_a, get_array(df, "a"))
153
+
154
+
155
+ def test_interpolate_downcast_reference_triggers_copy(using_copy_on_write):
156
+ df = DataFrame({"a": [1, np.nan, 2.5], "b": 1})
157
+ df_orig = df.copy()
158
+ arr_a = get_array(df, "a")
159
+ view = df[:]
160
+ df.interpolate(method="pad", inplace=True, downcast="infer")
161
+
162
+ if using_copy_on_write:
163
+ assert df._mgr._has_no_reference(0)
164
+ assert not np.shares_memory(arr_a, get_array(df, "a"))
165
+ tm.assert_frame_equal(df_orig, view)
166
+ else:
167
+ tm.assert_frame_equal(df, view)
168
+
169
+
170
+ def test_fillna(using_copy_on_write):
171
+ df = DataFrame({"a": [1.5, np.nan], "b": 1})
172
+ df_orig = df.copy()
173
+
174
+ df2 = df.fillna(5.5)
175
+ if using_copy_on_write:
176
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
177
+ else:
178
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
179
+
180
+ df2.iloc[0, 1] = 100
181
+ tm.assert_frame_equal(df_orig, df)
182
+
183
+
184
+ def test_fillna_dict(using_copy_on_write):
185
+ df = DataFrame({"a": [1.5, np.nan], "b": 1})
186
+ df_orig = df.copy()
187
+
188
+ df2 = df.fillna({"a": 100.5})
189
+ if using_copy_on_write:
190
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
191
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
192
+ else:
193
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
194
+
195
+ df2.iloc[0, 1] = 100
196
+ tm.assert_frame_equal(df_orig, df)
197
+
198
+
199
+ @pytest.mark.parametrize("downcast", [None, False])
200
+ def test_fillna_inplace(using_copy_on_write, downcast):
201
+ df = DataFrame({"a": [1.5, np.nan], "b": 1})
202
+ arr_a = get_array(df, "a")
203
+ arr_b = get_array(df, "b")
204
+
205
+ df.fillna(5.5, inplace=True, downcast=downcast)
206
+ assert np.shares_memory(get_array(df, "a"), arr_a)
207
+ assert np.shares_memory(get_array(df, "b"), arr_b)
208
+ if using_copy_on_write:
209
+ assert df._mgr._has_no_reference(0)
210
+ assert df._mgr._has_no_reference(1)
211
+
212
+
213
+ def test_fillna_inplace_reference(using_copy_on_write):
214
+ df = DataFrame({"a": [1.5, np.nan], "b": 1})
215
+ df_orig = df.copy()
216
+ arr_a = get_array(df, "a")
217
+ arr_b = get_array(df, "b")
218
+ view = df[:]
219
+
220
+ df.fillna(5.5, inplace=True)
221
+ if using_copy_on_write:
222
+ assert not np.shares_memory(get_array(df, "a"), arr_a)
223
+ assert np.shares_memory(get_array(df, "b"), arr_b)
224
+ assert view._mgr._has_no_reference(0)
225
+ assert df._mgr._has_no_reference(0)
226
+ tm.assert_frame_equal(view, df_orig)
227
+ else:
228
+ assert np.shares_memory(get_array(df, "a"), arr_a)
229
+ assert np.shares_memory(get_array(df, "b"), arr_b)
230
+ expected = DataFrame({"a": [1.5, 5.5], "b": 1})
231
+ tm.assert_frame_equal(df, expected)
232
+
233
+
234
+ def test_fillna_interval_inplace_reference(using_copy_on_write):
235
+ ser = Series(interval_range(start=0, end=5), name="a")
236
+ ser.iloc[1] = np.nan
237
+
238
+ ser_orig = ser.copy()
239
+ view = ser[:]
240
+ ser.fillna(value=Interval(left=0, right=5), inplace=True)
241
+
242
+ if using_copy_on_write:
243
+ assert not np.shares_memory(
244
+ get_array(ser, "a").left.values, get_array(view, "a").left.values
245
+ )
246
+ tm.assert_series_equal(view, ser_orig)
247
+ else:
248
+ assert np.shares_memory(
249
+ get_array(ser, "a").left.values, get_array(view, "a").left.values
250
+ )
251
+
252
+
253
+ def test_fillna_series_empty_arg(using_copy_on_write):
254
+ ser = Series([1, np.nan, 2])
255
+ ser_orig = ser.copy()
256
+ result = ser.fillna({})
257
+
258
+ if using_copy_on_write:
259
+ assert np.shares_memory(get_array(ser), get_array(result))
260
+ else:
261
+ assert not np.shares_memory(get_array(ser), get_array(result))
262
+
263
+ ser.iloc[0] = 100.5
264
+ tm.assert_series_equal(ser_orig, result)
265
+
266
+
267
+ def test_fillna_series_empty_arg_inplace(using_copy_on_write):
268
+ ser = Series([1, np.nan, 2])
269
+ arr = get_array(ser)
270
+ ser.fillna({}, inplace=True)
271
+
272
+ assert np.shares_memory(get_array(ser), arr)
273
+ if using_copy_on_write:
274
+ assert ser._mgr._has_no_reference(0)
275
+
276
+
277
+ def test_fillna_ea_noop_shares_memory(
278
+ using_copy_on_write, any_numeric_ea_and_arrow_dtype
279
+ ):
280
+ df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype)
281
+ df_orig = df.copy()
282
+ df2 = df.fillna(100)
283
+
284
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
285
+
286
+ if using_copy_on_write:
287
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
288
+ assert not df2._mgr._has_no_reference(1)
289
+ else:
290
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
291
+
292
+ tm.assert_frame_equal(df_orig, df)
293
+
294
+ df2.iloc[0, 1] = 100
295
+ if using_copy_on_write:
296
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
297
+ assert df2._mgr._has_no_reference(1)
298
+ assert df._mgr._has_no_reference(1)
299
+ tm.assert_frame_equal(df_orig, df)
300
+
301
+
302
+ def test_fillna_inplace_ea_noop_shares_memory(
303
+ using_copy_on_write, any_numeric_ea_and_arrow_dtype
304
+ ):
305
+ df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype)
306
+ df_orig = df.copy()
307
+ view = df[:]
308
+ df.fillna(100, inplace=True)
309
+
310
+ assert not np.shares_memory(get_array(df, "a"), get_array(view, "a"))
311
+
312
+ if using_copy_on_write:
313
+ assert np.shares_memory(get_array(df, "b"), get_array(view, "b"))
314
+ assert not df._mgr._has_no_reference(1)
315
+ assert not view._mgr._has_no_reference(1)
316
+ else:
317
+ assert not np.shares_memory(get_array(df, "b"), get_array(view, "b"))
318
+ df.iloc[0, 1] = 100
319
+ tm.assert_frame_equal(df_orig, view)
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_methods.py ADDED
@@ -0,0 +1,1725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.errors import SettingWithCopyWarning
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ DataFrame,
9
+ Index,
10
+ MultiIndex,
11
+ Period,
12
+ Series,
13
+ Timestamp,
14
+ date_range,
15
+ period_range,
16
+ )
17
+ import pandas._testing as tm
18
+ from pandas.tests.copy_view.util import get_array
19
+
20
+
21
+ def test_copy(using_copy_on_write):
22
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
23
+ df_copy = df.copy()
24
+
25
+ # the deep copy doesn't share memory
26
+ assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
27
+ if using_copy_on_write:
28
+ assert not df_copy._mgr.blocks[0].refs.has_reference()
29
+ assert not df_copy._mgr.blocks[1].refs.has_reference()
30
+
31
+ # mutating copy doesn't mutate original
32
+ df_copy.iloc[0, 0] = 0
33
+ assert df.iloc[0, 0] == 1
34
+
35
+
36
+ def test_copy_shallow(using_copy_on_write):
37
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
38
+ df_copy = df.copy(deep=False)
39
+
40
+ # the shallow copy still shares memory
41
+ assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
42
+ if using_copy_on_write:
43
+ assert df_copy._mgr.blocks[0].refs.has_reference()
44
+ assert df_copy._mgr.blocks[1].refs.has_reference()
45
+
46
+ if using_copy_on_write:
47
+ # mutating shallow copy doesn't mutate original
48
+ df_copy.iloc[0, 0] = 0
49
+ assert df.iloc[0, 0] == 1
50
+ # mutating triggered a copy-on-write -> no longer shares memory
51
+ assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
52
+ # but still shares memory for the other columns/blocks
53
+ assert np.shares_memory(get_array(df_copy, "c"), get_array(df, "c"))
54
+ else:
55
+ # mutating shallow copy does mutate original
56
+ df_copy.iloc[0, 0] = 0
57
+ assert df.iloc[0, 0] == 0
58
+ # and still shares memory
59
+ assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
60
+
61
+
62
+ @pytest.mark.parametrize("copy", [True, None, False])
63
+ @pytest.mark.parametrize(
64
+ "method",
65
+ [
66
+ lambda df, copy: df.rename(columns=str.lower, copy=copy),
67
+ lambda df, copy: df.reindex(columns=["a", "c"], copy=copy),
68
+ lambda df, copy: df.reindex_like(df, copy=copy),
69
+ lambda df, copy: df.align(df, copy=copy)[0],
70
+ lambda df, copy: df.set_axis(["a", "b", "c"], axis="index", copy=copy),
71
+ lambda df, copy: df.rename_axis(index="test", copy=copy),
72
+ lambda df, copy: df.rename_axis(columns="test", copy=copy),
73
+ lambda df, copy: df.astype({"b": "int64"}, copy=copy),
74
+ # lambda df, copy: df.swaplevel(0, 0, copy=copy),
75
+ lambda df, copy: df.swapaxes(0, 0, copy=copy),
76
+ lambda df, copy: df.truncate(0, 5, copy=copy),
77
+ lambda df, copy: df.infer_objects(copy=copy),
78
+ lambda df, copy: df.to_timestamp(copy=copy),
79
+ lambda df, copy: df.to_period(freq="D", copy=copy),
80
+ lambda df, copy: df.tz_localize("US/Central", copy=copy),
81
+ lambda df, copy: df.tz_convert("US/Central", copy=copy),
82
+ lambda df, copy: df.set_flags(allows_duplicate_labels=False, copy=copy),
83
+ ],
84
+ ids=[
85
+ "rename",
86
+ "reindex",
87
+ "reindex_like",
88
+ "align",
89
+ "set_axis",
90
+ "rename_axis0",
91
+ "rename_axis1",
92
+ "astype",
93
+ # "swaplevel", # only series
94
+ "swapaxes",
95
+ "truncate",
96
+ "infer_objects",
97
+ "to_timestamp",
98
+ "to_period",
99
+ "tz_localize",
100
+ "tz_convert",
101
+ "set_flags",
102
+ ],
103
+ )
104
+ def test_methods_copy_keyword(
105
+ request, method, copy, using_copy_on_write, using_array_manager
106
+ ):
107
+ index = None
108
+ if "to_timestamp" in request.node.callspec.id:
109
+ index = period_range("2012-01-01", freq="D", periods=3)
110
+ elif "to_period" in request.node.callspec.id:
111
+ index = date_range("2012-01-01", freq="D", periods=3)
112
+ elif "tz_localize" in request.node.callspec.id:
113
+ index = date_range("2012-01-01", freq="D", periods=3)
114
+ elif "tz_convert" in request.node.callspec.id:
115
+ index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels")
116
+
117
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=index)
118
+ df2 = method(df, copy=copy)
119
+
120
+ share_memory = using_copy_on_write or copy is False
121
+
122
+ if request.node.callspec.id.startswith("reindex-"):
123
+ # TODO copy=False without CoW still returns a copy in this case
124
+ if not using_copy_on_write and not using_array_manager and copy is False:
125
+ share_memory = False
126
+
127
+ if share_memory:
128
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
129
+ else:
130
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
131
+
132
+
133
+ @pytest.mark.parametrize("copy", [True, None, False])
134
+ @pytest.mark.parametrize(
135
+ "method",
136
+ [
137
+ lambda ser, copy: ser.rename(index={0: 100}, copy=copy),
138
+ lambda ser, copy: ser.rename(None, copy=copy),
139
+ lambda ser, copy: ser.reindex(index=ser.index, copy=copy),
140
+ lambda ser, copy: ser.reindex_like(ser, copy=copy),
141
+ lambda ser, copy: ser.align(ser, copy=copy)[0],
142
+ lambda ser, copy: ser.set_axis(["a", "b", "c"], axis="index", copy=copy),
143
+ lambda ser, copy: ser.rename_axis(index="test", copy=copy),
144
+ lambda ser, copy: ser.astype("int64", copy=copy),
145
+ lambda ser, copy: ser.swaplevel(0, 1, copy=copy),
146
+ lambda ser, copy: ser.swapaxes(0, 0, copy=copy),
147
+ lambda ser, copy: ser.truncate(0, 5, copy=copy),
148
+ lambda ser, copy: ser.infer_objects(copy=copy),
149
+ lambda ser, copy: ser.to_timestamp(copy=copy),
150
+ lambda ser, copy: ser.to_period(freq="D", copy=copy),
151
+ lambda ser, copy: ser.tz_localize("US/Central", copy=copy),
152
+ lambda ser, copy: ser.tz_convert("US/Central", copy=copy),
153
+ lambda ser, copy: ser.set_flags(allows_duplicate_labels=False, copy=copy),
154
+ ],
155
+ ids=[
156
+ "rename (dict)",
157
+ "rename",
158
+ "reindex",
159
+ "reindex_like",
160
+ "align",
161
+ "set_axis",
162
+ "rename_axis0",
163
+ "astype",
164
+ "swaplevel",
165
+ "swapaxes",
166
+ "truncate",
167
+ "infer_objects",
168
+ "to_timestamp",
169
+ "to_period",
170
+ "tz_localize",
171
+ "tz_convert",
172
+ "set_flags",
173
+ ],
174
+ )
175
+ def test_methods_series_copy_keyword(request, method, copy, using_copy_on_write):
176
+ index = None
177
+ if "to_timestamp" in request.node.callspec.id:
178
+ index = period_range("2012-01-01", freq="D", periods=3)
179
+ elif "to_period" in request.node.callspec.id:
180
+ index = date_range("2012-01-01", freq="D", periods=3)
181
+ elif "tz_localize" in request.node.callspec.id:
182
+ index = date_range("2012-01-01", freq="D", periods=3)
183
+ elif "tz_convert" in request.node.callspec.id:
184
+ index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels")
185
+ elif "swaplevel" in request.node.callspec.id:
186
+ index = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]])
187
+
188
+ ser = Series([1, 2, 3], index=index)
189
+ ser2 = method(ser, copy=copy)
190
+
191
+ share_memory = using_copy_on_write or copy is False
192
+
193
+ if share_memory:
194
+ assert np.shares_memory(get_array(ser2), get_array(ser))
195
+ else:
196
+ assert not np.shares_memory(get_array(ser2), get_array(ser))
197
+
198
+
199
+ @pytest.mark.parametrize("copy", [True, None, False])
200
+ def test_transpose_copy_keyword(using_copy_on_write, copy, using_array_manager):
201
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
202
+ result = df.transpose(copy=copy)
203
+ share_memory = using_copy_on_write or copy is False or copy is None
204
+ share_memory = share_memory and not using_array_manager
205
+
206
+ if share_memory:
207
+ assert np.shares_memory(get_array(df, "a"), get_array(result, 0))
208
+ else:
209
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))
210
+
211
+
212
+ # -----------------------------------------------------------------------------
213
+ # DataFrame methods returning new DataFrame using shallow copy
214
+
215
+
216
+ def test_reset_index(using_copy_on_write):
217
+ # Case: resetting the index (i.e. adding a new column) + mutating the
218
+ # resulting dataframe
219
+ df = DataFrame(
220
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=[10, 11, 12]
221
+ )
222
+ df_orig = df.copy()
223
+ df2 = df.reset_index()
224
+ df2._mgr._verify_integrity()
225
+
226
+ if using_copy_on_write:
227
+ # still shares memory (df2 is a shallow copy)
228
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
229
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
230
+ # mutating df2 triggers a copy-on-write for that column / block
231
+ df2.iloc[0, 2] = 0
232
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
233
+ if using_copy_on_write:
234
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
235
+ tm.assert_frame_equal(df, df_orig)
236
+
237
+
238
+ @pytest.mark.parametrize("index", [pd.RangeIndex(0, 2), Index([1, 2])])
239
+ def test_reset_index_series_drop(using_copy_on_write, index):
240
+ ser = Series([1, 2], index=index)
241
+ ser_orig = ser.copy()
242
+ ser2 = ser.reset_index(drop=True)
243
+ if using_copy_on_write:
244
+ assert np.shares_memory(get_array(ser), get_array(ser2))
245
+ assert not ser._mgr._has_no_reference(0)
246
+ else:
247
+ assert not np.shares_memory(get_array(ser), get_array(ser2))
248
+
249
+ ser2.iloc[0] = 100
250
+ tm.assert_series_equal(ser, ser_orig)
251
+
252
+
253
+ def test_rename_columns(using_copy_on_write):
254
+ # Case: renaming columns returns a new dataframe
255
+ # + afterwards modifying the result
256
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
257
+ df_orig = df.copy()
258
+ df2 = df.rename(columns=str.upper)
259
+
260
+ if using_copy_on_write:
261
+ assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
262
+ df2.iloc[0, 0] = 0
263
+ assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
264
+ if using_copy_on_write:
265
+ assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))
266
+ expected = DataFrame({"A": [0, 2, 3], "B": [4, 5, 6], "C": [0.1, 0.2, 0.3]})
267
+ tm.assert_frame_equal(df2, expected)
268
+ tm.assert_frame_equal(df, df_orig)
269
+
270
+
271
+ def test_rename_columns_modify_parent(using_copy_on_write):
272
+ # Case: renaming columns returns a new dataframe
273
+ # + afterwards modifying the original (parent) dataframe
274
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
275
+ df2 = df.rename(columns=str.upper)
276
+ df2_orig = df2.copy()
277
+
278
+ if using_copy_on_write:
279
+ assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
280
+ else:
281
+ assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
282
+ df.iloc[0, 0] = 0
283
+ assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
284
+ if using_copy_on_write:
285
+ assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))
286
+ expected = DataFrame({"a": [0, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
287
+ tm.assert_frame_equal(df, expected)
288
+ tm.assert_frame_equal(df2, df2_orig)
289
+
290
+
291
+ def test_pipe(using_copy_on_write):
292
+ df = DataFrame({"a": [1, 2, 3], "b": 1.5})
293
+ df_orig = df.copy()
294
+
295
+ def testfunc(df):
296
+ return df
297
+
298
+ df2 = df.pipe(testfunc)
299
+
300
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
301
+
302
+ # mutating df2 triggers a copy-on-write for that column
303
+ df2.iloc[0, 0] = 0
304
+ if using_copy_on_write:
305
+ tm.assert_frame_equal(df, df_orig)
306
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
307
+ else:
308
+ expected = DataFrame({"a": [0, 2, 3], "b": 1.5})
309
+ tm.assert_frame_equal(df, expected)
310
+
311
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
312
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
313
+
314
+
315
+ def test_pipe_modify_df(using_copy_on_write):
316
+ df = DataFrame({"a": [1, 2, 3], "b": 1.5})
317
+ df_orig = df.copy()
318
+
319
+ def testfunc(df):
320
+ df.iloc[0, 0] = 100
321
+ return df
322
+
323
+ df2 = df.pipe(testfunc)
324
+
325
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
326
+
327
+ if using_copy_on_write:
328
+ tm.assert_frame_equal(df, df_orig)
329
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
330
+ else:
331
+ expected = DataFrame({"a": [100, 2, 3], "b": 1.5})
332
+ tm.assert_frame_equal(df, expected)
333
+
334
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
335
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
336
+
337
+
338
+ def test_reindex_columns(using_copy_on_write):
339
+ # Case: reindexing the column returns a new dataframe
340
+ # + afterwards modifying the result
341
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
342
+ df_orig = df.copy()
343
+ df2 = df.reindex(columns=["a", "c"])
344
+
345
+ if using_copy_on_write:
346
+ # still shares memory (df2 is a shallow copy)
347
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
348
+ else:
349
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
350
+ # mutating df2 triggers a copy-on-write for that column
351
+ df2.iloc[0, 0] = 0
352
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
353
+ if using_copy_on_write:
354
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
355
+ tm.assert_frame_equal(df, df_orig)
356
+
357
+
358
+ def test_drop_on_column(using_copy_on_write):
359
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
360
+ df_orig = df.copy()
361
+ df2 = df.drop(columns="a")
362
+ df2._mgr._verify_integrity()
363
+
364
+ if using_copy_on_write:
365
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
366
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
367
+ else:
368
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
369
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
370
+ df2.iloc[0, 0] = 0
371
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
372
+ if using_copy_on_write:
373
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
374
+ tm.assert_frame_equal(df, df_orig)
375
+
376
+
377
+ def test_select_dtypes(using_copy_on_write):
378
+ # Case: selecting columns using `select_dtypes()` returns a new dataframe
379
+ # + afterwards modifying the result
380
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
381
+ df_orig = df.copy()
382
+ df2 = df.select_dtypes("int64")
383
+ df2._mgr._verify_integrity()
384
+
385
+ if using_copy_on_write:
386
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
387
+ else:
388
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
389
+
390
+ # mutating df2 triggers a copy-on-write for that column/block
391
+ df2.iloc[0, 0] = 0
392
+ if using_copy_on_write:
393
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
394
+ tm.assert_frame_equal(df, df_orig)
395
+
396
+
397
+ @pytest.mark.parametrize(
398
+ "filter_kwargs", [{"items": ["a"]}, {"like": "a"}, {"regex": "a"}]
399
+ )
400
+ def test_filter(using_copy_on_write, filter_kwargs):
401
+ # Case: selecting columns using `filter()` returns a new dataframe
402
+ # + afterwards modifying the result
403
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
404
+ df_orig = df.copy()
405
+ df2 = df.filter(**filter_kwargs)
406
+ if using_copy_on_write:
407
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
408
+ else:
409
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
410
+
411
+ # mutating df2 triggers a copy-on-write for that column/block
412
+ if using_copy_on_write:
413
+ df2.iloc[0, 0] = 0
414
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
415
+ tm.assert_frame_equal(df, df_orig)
416
+
417
+
418
+ def test_shift_no_op(using_copy_on_write):
419
+ df = DataFrame(
420
+ [[1, 2], [3, 4], [5, 6]],
421
+ index=date_range("2020-01-01", "2020-01-03"),
422
+ columns=["a", "b"],
423
+ )
424
+ df_orig = df.copy()
425
+ df2 = df.shift(periods=0)
426
+
427
+ if using_copy_on_write:
428
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
429
+ else:
430
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
431
+
432
+ df.iloc[0, 0] = 0
433
+ if using_copy_on_write:
434
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
435
+ tm.assert_frame_equal(df2, df_orig)
436
+
437
+
438
+ def test_shift_index(using_copy_on_write):
439
+ df = DataFrame(
440
+ [[1, 2], [3, 4], [5, 6]],
441
+ index=date_range("2020-01-01", "2020-01-03"),
442
+ columns=["a", "b"],
443
+ )
444
+ df2 = df.shift(periods=1, axis=0)
445
+
446
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
447
+
448
+
449
+ def test_shift_rows_freq(using_copy_on_write):
450
+ df = DataFrame(
451
+ [[1, 2], [3, 4], [5, 6]],
452
+ index=date_range("2020-01-01", "2020-01-03"),
453
+ columns=["a", "b"],
454
+ )
455
+ df_orig = df.copy()
456
+ df_orig.index = date_range("2020-01-02", "2020-01-04")
457
+ df2 = df.shift(periods=1, freq="1D")
458
+
459
+ if using_copy_on_write:
460
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
461
+ else:
462
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
463
+
464
+ df.iloc[0, 0] = 0
465
+ if using_copy_on_write:
466
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
467
+ tm.assert_frame_equal(df2, df_orig)
468
+
469
+
470
+ def test_shift_columns(using_copy_on_write):
471
+ df = DataFrame(
472
+ [[1, 2], [3, 4], [5, 6]], columns=date_range("2020-01-01", "2020-01-02")
473
+ )
474
+ df2 = df.shift(periods=1, axis=1)
475
+
476
+ assert np.shares_memory(get_array(df2, "2020-01-02"), get_array(df, "2020-01-01"))
477
+ df.iloc[0, 1] = 0
478
+ if using_copy_on_write:
479
+ assert not np.shares_memory(
480
+ get_array(df2, "2020-01-02"), get_array(df, "2020-01-01")
481
+ )
482
+ expected = DataFrame(
483
+ [[np.nan, 1], [np.nan, 3], [np.nan, 5]],
484
+ columns=date_range("2020-01-01", "2020-01-02"),
485
+ )
486
+ tm.assert_frame_equal(df2, expected)
487
+
488
+
489
+ def test_pop(using_copy_on_write):
490
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
491
+ df_orig = df.copy()
492
+ view_original = df[:]
493
+ result = df.pop("a")
494
+
495
+ assert np.shares_memory(result.values, get_array(view_original, "a"))
496
+ assert np.shares_memory(get_array(df, "b"), get_array(view_original, "b"))
497
+
498
+ if using_copy_on_write:
499
+ result.iloc[0] = 0
500
+ assert not np.shares_memory(result.values, get_array(view_original, "a"))
501
+ df.iloc[0, 0] = 0
502
+ if using_copy_on_write:
503
+ assert not np.shares_memory(get_array(df, "b"), get_array(view_original, "b"))
504
+ tm.assert_frame_equal(view_original, df_orig)
505
+ else:
506
+ expected = DataFrame({"a": [1, 2, 3], "b": [0, 5, 6], "c": [0.1, 0.2, 0.3]})
507
+ tm.assert_frame_equal(view_original, expected)
508
+
509
+
510
+ @pytest.mark.parametrize(
511
+ "func",
512
+ [
513
+ lambda x, y: x.align(y),
514
+ lambda x, y: x.align(y.a, axis=0),
515
+ lambda x, y: x.align(y.a.iloc[slice(0, 1)], axis=1),
516
+ ],
517
+ )
518
+ def test_align_frame(using_copy_on_write, func):
519
+ df = DataFrame({"a": [1, 2, 3], "b": "a"})
520
+ df_orig = df.copy()
521
+ df_changed = df[["b", "a"]].copy()
522
+ df2, _ = func(df, df_changed)
523
+
524
+ if using_copy_on_write:
525
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
526
+ else:
527
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
528
+
529
+ df2.iloc[0, 0] = 0
530
+ if using_copy_on_write:
531
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
532
+ tm.assert_frame_equal(df, df_orig)
533
+
534
+
535
+ def test_align_series(using_copy_on_write):
536
+ ser = Series([1, 2])
537
+ ser_orig = ser.copy()
538
+ ser_other = ser.copy()
539
+ ser2, ser_other_result = ser.align(ser_other)
540
+
541
+ if using_copy_on_write:
542
+ assert np.shares_memory(ser2.values, ser.values)
543
+ assert np.shares_memory(ser_other_result.values, ser_other.values)
544
+ else:
545
+ assert not np.shares_memory(ser2.values, ser.values)
546
+ assert not np.shares_memory(ser_other_result.values, ser_other.values)
547
+
548
+ ser2.iloc[0] = 0
549
+ ser_other_result.iloc[0] = 0
550
+ if using_copy_on_write:
551
+ assert not np.shares_memory(ser2.values, ser.values)
552
+ assert not np.shares_memory(ser_other_result.values, ser_other.values)
553
+ tm.assert_series_equal(ser, ser_orig)
554
+ tm.assert_series_equal(ser_other, ser_orig)
555
+
556
+
557
+ def test_align_copy_false(using_copy_on_write):
558
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
559
+ df_orig = df.copy()
560
+ df2, df3 = df.align(df, copy=False)
561
+
562
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
563
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
564
+
565
+ if using_copy_on_write:
566
+ df2.loc[0, "a"] = 0
567
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
568
+
569
+ df3.loc[0, "a"] = 0
570
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
571
+
572
+
573
+ def test_align_with_series_copy_false(using_copy_on_write):
574
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
575
+ ser = Series([1, 2, 3], name="x")
576
+ ser_orig = ser.copy()
577
+ df_orig = df.copy()
578
+ df2, ser2 = df.align(ser, copy=False, axis=0)
579
+
580
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
581
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
582
+ assert np.shares_memory(get_array(ser, "x"), get_array(ser2, "x"))
583
+
584
+ if using_copy_on_write:
585
+ df2.loc[0, "a"] = 0
586
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
587
+
588
+ ser2.loc[0] = 0
589
+ tm.assert_series_equal(ser, ser_orig) # Original is unchanged
590
+
591
+
592
+ def test_to_frame(using_copy_on_write):
593
+ # Case: converting a Series to a DataFrame with to_frame
594
+ ser = Series([1, 2, 3])
595
+ ser_orig = ser.copy()
596
+
597
+ df = ser[:].to_frame()
598
+
599
+ # currently this always returns a "view"
600
+ assert np.shares_memory(ser.values, get_array(df, 0))
601
+
602
+ df.iloc[0, 0] = 0
603
+
604
+ if using_copy_on_write:
605
+ # mutating df triggers a copy-on-write for that column
606
+ assert not np.shares_memory(ser.values, get_array(df, 0))
607
+ tm.assert_series_equal(ser, ser_orig)
608
+ else:
609
+ # but currently select_dtypes() actually returns a view -> mutates parent
610
+ expected = ser_orig.copy()
611
+ expected.iloc[0] = 0
612
+ tm.assert_series_equal(ser, expected)
613
+
614
+ # modify original series -> don't modify dataframe
615
+ df = ser[:].to_frame()
616
+ ser.iloc[0] = 0
617
+
618
+ if using_copy_on_write:
619
+ tm.assert_frame_equal(df, ser_orig.to_frame())
620
+ else:
621
+ expected = ser_orig.copy().to_frame()
622
+ expected.iloc[0, 0] = 0
623
+ tm.assert_frame_equal(df, expected)
624
+
625
+
626
+ @pytest.mark.parametrize("ax", ["index", "columns"])
627
+ def test_swapaxes_noop(using_copy_on_write, ax):
628
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
629
+ df_orig = df.copy()
630
+ df2 = df.swapaxes(ax, ax)
631
+
632
+ if using_copy_on_write:
633
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
634
+ else:
635
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
636
+
637
+ # mutating df2 triggers a copy-on-write for that column/block
638
+ df2.iloc[0, 0] = 0
639
+ if using_copy_on_write:
640
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
641
+ tm.assert_frame_equal(df, df_orig)
642
+
643
+
644
+ def test_swapaxes_single_block(using_copy_on_write):
645
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["x", "y", "z"])
646
+ df_orig = df.copy()
647
+ df2 = df.swapaxes("index", "columns")
648
+
649
+ if using_copy_on_write:
650
+ assert np.shares_memory(get_array(df2, "x"), get_array(df, "a"))
651
+ else:
652
+ assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a"))
653
+
654
+ # mutating df2 triggers a copy-on-write for that column/block
655
+ df2.iloc[0, 0] = 0
656
+ if using_copy_on_write:
657
+ assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a"))
658
+ tm.assert_frame_equal(df, df_orig)
659
+
660
+
661
+ def test_swapaxes_read_only_array():
662
+ df = DataFrame({"a": [1, 2], "b": 3})
663
+ df = df.swapaxes(axis1="index", axis2="columns")
664
+ df.iloc[0, 0] = 100
665
+ expected = DataFrame({0: [100, 3], 1: [2, 3]}, index=["a", "b"])
666
+ tm.assert_frame_equal(df, expected)
667
+
668
+
669
+ @pytest.mark.parametrize(
670
+ "method, idx",
671
+ [
672
+ (lambda df: df.copy(deep=False).copy(deep=False), 0),
673
+ (lambda df: df.reset_index().reset_index(), 2),
674
+ (lambda df: df.rename(columns=str.upper).rename(columns=str.lower), 0),
675
+ (lambda df: df.copy(deep=False).select_dtypes(include="number"), 0),
676
+ ],
677
+ ids=["shallow-copy", "reset_index", "rename", "select_dtypes"],
678
+ )
679
+ def test_chained_methods(request, method, idx, using_copy_on_write):
680
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
681
+ df_orig = df.copy()
682
+
683
+ # when not using CoW, only the copy() variant actually gives a view
684
+ df2_is_view = not using_copy_on_write and request.node.callspec.id == "shallow-copy"
685
+
686
+ # modify df2 -> don't modify df
687
+ df2 = method(df)
688
+ df2.iloc[0, idx] = 0
689
+ if not df2_is_view:
690
+ tm.assert_frame_equal(df, df_orig)
691
+
692
+ # modify df -> don't modify df2
693
+ df2 = method(df)
694
+ df.iloc[0, 0] = 0
695
+ if not df2_is_view:
696
+ tm.assert_frame_equal(df2.iloc[:, idx:], df_orig)
697
+
698
+
699
+ @pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})])
700
+ def test_to_timestamp(using_copy_on_write, obj):
701
+ obj.index = Index([Period("2012-1-1", freq="D"), Period("2012-1-2", freq="D")])
702
+
703
+ obj_orig = obj.copy()
704
+ obj2 = obj.to_timestamp()
705
+
706
+ if using_copy_on_write:
707
+ assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
708
+ else:
709
+ assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
710
+
711
+ # mutating obj2 triggers a copy-on-write for that column / block
712
+ obj2.iloc[0] = 0
713
+ assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
714
+ tm.assert_equal(obj, obj_orig)
715
+
716
+
717
+ @pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})])
718
+ def test_to_period(using_copy_on_write, obj):
719
+ obj.index = Index([Timestamp("2019-12-31"), Timestamp("2020-12-31")])
720
+
721
+ obj_orig = obj.copy()
722
+ obj2 = obj.to_period(freq="Y")
723
+
724
+ if using_copy_on_write:
725
+ assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
726
+ else:
727
+ assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
728
+
729
+ # mutating obj2 triggers a copy-on-write for that column / block
730
+ obj2.iloc[0] = 0
731
+ assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
732
+ tm.assert_equal(obj, obj_orig)
733
+
734
+
735
+ def test_set_index(using_copy_on_write):
736
+ # GH 49473
737
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
738
+ df_orig = df.copy()
739
+ df2 = df.set_index("a")
740
+
741
+ if using_copy_on_write:
742
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
743
+ else:
744
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
745
+
746
+ # mutating df2 triggers a copy-on-write for that column / block
747
+ df2.iloc[0, 1] = 0
748
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
749
+ tm.assert_frame_equal(df, df_orig)
750
+
751
+
752
+ def test_set_index_mutating_parent_does_not_mutate_index():
753
+ df = DataFrame({"a": [1, 2, 3], "b": 1})
754
+ result = df.set_index("a")
755
+ expected = result.copy()
756
+
757
+ df.iloc[0, 0] = 100
758
+ tm.assert_frame_equal(result, expected)
759
+
760
+
761
+ def test_add_prefix(using_copy_on_write):
762
+ # GH 49473
763
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
764
+ df_orig = df.copy()
765
+ df2 = df.add_prefix("CoW_")
766
+
767
+ if using_copy_on_write:
768
+ assert np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a"))
769
+ df2.iloc[0, 0] = 0
770
+
771
+ assert not np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a"))
772
+
773
+ if using_copy_on_write:
774
+ assert np.shares_memory(get_array(df2, "CoW_c"), get_array(df, "c"))
775
+ expected = DataFrame(
776
+ {"CoW_a": [0, 2, 3], "CoW_b": [4, 5, 6], "CoW_c": [0.1, 0.2, 0.3]}
777
+ )
778
+ tm.assert_frame_equal(df2, expected)
779
+ tm.assert_frame_equal(df, df_orig)
780
+
781
+
782
+ def test_add_suffix(using_copy_on_write):
783
+ # GH 49473
784
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
785
+ df_orig = df.copy()
786
+ df2 = df.add_suffix("_CoW")
787
+ if using_copy_on_write:
788
+ assert np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a"))
789
+ df2.iloc[0, 0] = 0
790
+ assert not np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a"))
791
+ if using_copy_on_write:
792
+ assert np.shares_memory(get_array(df2, "c_CoW"), get_array(df, "c"))
793
+ expected = DataFrame(
794
+ {"a_CoW": [0, 2, 3], "b_CoW": [4, 5, 6], "c_CoW": [0.1, 0.2, 0.3]}
795
+ )
796
+ tm.assert_frame_equal(df2, expected)
797
+ tm.assert_frame_equal(df, df_orig)
798
+
799
+
800
+ @pytest.mark.parametrize("axis, val", [(0, 5.5), (1, np.nan)])
801
+ def test_dropna(using_copy_on_write, axis, val):
802
+ df = DataFrame({"a": [1, 2, 3], "b": [4, val, 6], "c": "d"})
803
+ df_orig = df.copy()
804
+ df2 = df.dropna(axis=axis)
805
+
806
+ if using_copy_on_write:
807
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
808
+ else:
809
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
810
+
811
+ df2.iloc[0, 0] = 0
812
+ if using_copy_on_write:
813
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
814
+ tm.assert_frame_equal(df, df_orig)
815
+
816
+
817
+ @pytest.mark.parametrize("val", [5, 5.5])
818
+ def test_dropna_series(using_copy_on_write, val):
819
+ ser = Series([1, val, 4])
820
+ ser_orig = ser.copy()
821
+ ser2 = ser.dropna()
822
+
823
+ if using_copy_on_write:
824
+ assert np.shares_memory(ser2.values, ser.values)
825
+ else:
826
+ assert not np.shares_memory(ser2.values, ser.values)
827
+
828
+ ser2.iloc[0] = 0
829
+ if using_copy_on_write:
830
+ assert not np.shares_memory(ser2.values, ser.values)
831
+ tm.assert_series_equal(ser, ser_orig)
832
+
833
+
834
+ @pytest.mark.parametrize(
835
+ "method",
836
+ [
837
+ lambda df: df.head(),
838
+ lambda df: df.head(2),
839
+ lambda df: df.tail(),
840
+ lambda df: df.tail(3),
841
+ ],
842
+ )
843
+ def test_head_tail(method, using_copy_on_write):
844
+ df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
845
+ df_orig = df.copy()
846
+ df2 = method(df)
847
+ df2._mgr._verify_integrity()
848
+
849
+ if using_copy_on_write:
850
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
851
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
852
+
853
+ # modify df2 to trigger CoW for that block
854
+ df2.iloc[0, 0] = 0
855
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
856
+ if using_copy_on_write:
857
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
858
+ else:
859
+ # without CoW enabled, head and tail return views. Mutating df2 also mutates df.
860
+ df2.iloc[0, 0] = 1
861
+ tm.assert_frame_equal(df, df_orig)
862
+
863
+
864
+ def test_infer_objects(using_copy_on_write):
865
+ df = DataFrame({"a": [1, 2], "b": "c", "c": 1, "d": "x"})
866
+ df_orig = df.copy()
867
+ df2 = df.infer_objects()
868
+
869
+ if using_copy_on_write:
870
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
871
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
872
+
873
+ else:
874
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
875
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
876
+
877
+ df2.iloc[0, 0] = 0
878
+ df2.iloc[0, 1] = "d"
879
+ if using_copy_on_write:
880
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
881
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
882
+ tm.assert_frame_equal(df, df_orig)
883
+
884
+
885
+ def test_infer_objects_no_reference(using_copy_on_write):
886
+ df = DataFrame(
887
+ {
888
+ "a": [1, 2],
889
+ "b": "c",
890
+ "c": 1,
891
+ "d": Series(
892
+ [Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object"
893
+ ),
894
+ "e": "b",
895
+ }
896
+ )
897
+ df = df.infer_objects()
898
+
899
+ arr_a = get_array(df, "a")
900
+ arr_b = get_array(df, "b")
901
+ arr_d = get_array(df, "d")
902
+
903
+ df.iloc[0, 0] = 0
904
+ df.iloc[0, 1] = "d"
905
+ df.iloc[0, 3] = Timestamp("2018-12-31")
906
+ if using_copy_on_write:
907
+ assert np.shares_memory(arr_a, get_array(df, "a"))
908
+ # TODO(CoW): Block splitting causes references here
909
+ assert not np.shares_memory(arr_b, get_array(df, "b"))
910
+ assert np.shares_memory(arr_d, get_array(df, "d"))
911
+
912
+
913
+ def test_infer_objects_reference(using_copy_on_write):
914
+ df = DataFrame(
915
+ {
916
+ "a": [1, 2],
917
+ "b": "c",
918
+ "c": 1,
919
+ "d": Series(
920
+ [Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object"
921
+ ),
922
+ }
923
+ )
924
+ view = df[:] # noqa: F841
925
+ df = df.infer_objects()
926
+
927
+ arr_a = get_array(df, "a")
928
+ arr_b = get_array(df, "b")
929
+ arr_d = get_array(df, "d")
930
+
931
+ df.iloc[0, 0] = 0
932
+ df.iloc[0, 1] = "d"
933
+ df.iloc[0, 3] = Timestamp("2018-12-31")
934
+ if using_copy_on_write:
935
+ assert not np.shares_memory(arr_a, get_array(df, "a"))
936
+ assert not np.shares_memory(arr_b, get_array(df, "b"))
937
+ assert np.shares_memory(arr_d, get_array(df, "d"))
938
+
939
+
940
+ @pytest.mark.parametrize(
941
+ "kwargs",
942
+ [
943
+ {"before": "a", "after": "b", "axis": 1},
944
+ {"before": 0, "after": 1, "axis": 0},
945
+ ],
946
+ )
947
+ def test_truncate(using_copy_on_write, kwargs):
948
+ df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2})
949
+ df_orig = df.copy()
950
+ df2 = df.truncate(**kwargs)
951
+ df2._mgr._verify_integrity()
952
+
953
+ if using_copy_on_write:
954
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
955
+ else:
956
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
957
+
958
+ df2.iloc[0, 0] = 0
959
+ if using_copy_on_write:
960
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
961
+ tm.assert_frame_equal(df, df_orig)
962
+
963
+
964
+ @pytest.mark.parametrize("method", ["assign", "drop_duplicates"])
965
+ def test_assign_drop_duplicates(using_copy_on_write, method):
966
+ df = DataFrame({"a": [1, 2, 3]})
967
+ df_orig = df.copy()
968
+ df2 = getattr(df, method)()
969
+ df2._mgr._verify_integrity()
970
+
971
+ if using_copy_on_write:
972
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
973
+ else:
974
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
975
+
976
+ df2.iloc[0, 0] = 0
977
+ if using_copy_on_write:
978
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
979
+ tm.assert_frame_equal(df, df_orig)
980
+
981
+
982
+ @pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})])
983
+ def test_take(using_copy_on_write, obj):
984
+ # Check that no copy is made when we take all rows in original order
985
+ obj_orig = obj.copy()
986
+ obj2 = obj.take([0, 1])
987
+
988
+ if using_copy_on_write:
989
+ assert np.shares_memory(obj2.values, obj.values)
990
+ else:
991
+ assert not np.shares_memory(obj2.values, obj.values)
992
+
993
+ obj2.iloc[0] = 0
994
+ if using_copy_on_write:
995
+ assert not np.shares_memory(obj2.values, obj.values)
996
+ tm.assert_equal(obj, obj_orig)
997
+
998
+
999
+ @pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})])
1000
+ def test_between_time(using_copy_on_write, obj):
1001
+ obj.index = date_range("2018-04-09", periods=2, freq="1D20min")
1002
+ obj_orig = obj.copy()
1003
+ obj2 = obj.between_time("0:00", "1:00")
1004
+
1005
+ if using_copy_on_write:
1006
+ assert np.shares_memory(obj2.values, obj.values)
1007
+ else:
1008
+ assert not np.shares_memory(obj2.values, obj.values)
1009
+
1010
+ obj2.iloc[0] = 0
1011
+ if using_copy_on_write:
1012
+ assert not np.shares_memory(obj2.values, obj.values)
1013
+ tm.assert_equal(obj, obj_orig)
1014
+
1015
+
1016
+ def test_reindex_like(using_copy_on_write):
1017
+ df = DataFrame({"a": [1, 2], "b": "a"})
1018
+ other = DataFrame({"b": "a", "a": [1, 2]})
1019
+
1020
+ df_orig = df.copy()
1021
+ df2 = df.reindex_like(other)
1022
+
1023
+ if using_copy_on_write:
1024
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1025
+ else:
1026
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1027
+
1028
+ df2.iloc[0, 1] = 0
1029
+ if using_copy_on_write:
1030
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1031
+ tm.assert_frame_equal(df, df_orig)
1032
+
1033
+
1034
+ def test_sort_index(using_copy_on_write):
1035
+ # GH 49473
1036
+ ser = Series([1, 2, 3])
1037
+ ser_orig = ser.copy()
1038
+ ser2 = ser.sort_index()
1039
+
1040
+ if using_copy_on_write:
1041
+ assert np.shares_memory(ser.values, ser2.values)
1042
+ else:
1043
+ assert not np.shares_memory(ser.values, ser2.values)
1044
+
1045
+ # mutating ser triggers a copy-on-write for the column / block
1046
+ ser2.iloc[0] = 0
1047
+ assert not np.shares_memory(ser2.values, ser.values)
1048
+ tm.assert_series_equal(ser, ser_orig)
1049
+
1050
+
1051
+ @pytest.mark.parametrize(
1052
+ "obj, kwargs",
1053
+ [(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})],
1054
+ )
1055
+ def test_sort_values(using_copy_on_write, obj, kwargs):
1056
+ obj_orig = obj.copy()
1057
+ obj2 = obj.sort_values(**kwargs)
1058
+
1059
+ if using_copy_on_write:
1060
+ assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
1061
+ else:
1062
+ assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
1063
+
1064
+ # mutating df triggers a copy-on-write for the column / block
1065
+ obj2.iloc[0] = 0
1066
+ assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
1067
+ tm.assert_equal(obj, obj_orig)
1068
+
1069
+
1070
+ @pytest.mark.parametrize(
1071
+ "obj, kwargs",
1072
+ [(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})],
1073
+ )
1074
+ def test_sort_values_inplace(using_copy_on_write, obj, kwargs, using_array_manager):
1075
+ obj_orig = obj.copy()
1076
+ view = obj[:]
1077
+ obj.sort_values(inplace=True, **kwargs)
1078
+
1079
+ assert np.shares_memory(get_array(obj, "a"), get_array(view, "a"))
1080
+
1081
+ # mutating obj triggers a copy-on-write for the column / block
1082
+ obj.iloc[0] = 0
1083
+ if using_copy_on_write:
1084
+ assert not np.shares_memory(get_array(obj, "a"), get_array(view, "a"))
1085
+ tm.assert_equal(view, obj_orig)
1086
+ else:
1087
+ assert np.shares_memory(get_array(obj, "a"), get_array(view, "a"))
1088
+
1089
+
1090
+ @pytest.mark.parametrize("decimals", [-1, 0, 1])
1091
+ def test_round(using_copy_on_write, decimals):
1092
+ df = DataFrame({"a": [1, 2], "b": "c"})
1093
+ df_orig = df.copy()
1094
+ df2 = df.round(decimals=decimals)
1095
+
1096
+ if using_copy_on_write:
1097
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
1098
+ # TODO: Make inplace by using out parameter of ndarray.round?
1099
+ if decimals >= 0:
1100
+ # Ensure lazy copy if no-op
1101
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1102
+ else:
1103
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1104
+ else:
1105
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
1106
+
1107
+ df2.iloc[0, 1] = "d"
1108
+ df2.iloc[0, 0] = 4
1109
+ if using_copy_on_write:
1110
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
1111
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1112
+ tm.assert_frame_equal(df, df_orig)
1113
+
1114
+
1115
+ def test_reorder_levels(using_copy_on_write):
1116
+ index = MultiIndex.from_tuples(
1117
+ [(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"]
1118
+ )
1119
+ df = DataFrame({"a": [1, 2, 3, 4]}, index=index)
1120
+ df_orig = df.copy()
1121
+ df2 = df.reorder_levels(order=["two", "one"])
1122
+
1123
+ if using_copy_on_write:
1124
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1125
+ else:
1126
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1127
+
1128
+ df2.iloc[0, 0] = 0
1129
+ if using_copy_on_write:
1130
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1131
+ tm.assert_frame_equal(df, df_orig)
1132
+
1133
+
1134
+ def test_series_reorder_levels(using_copy_on_write):
1135
+ index = MultiIndex.from_tuples(
1136
+ [(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"]
1137
+ )
1138
+ ser = Series([1, 2, 3, 4], index=index)
1139
+ ser_orig = ser.copy()
1140
+ ser2 = ser.reorder_levels(order=["two", "one"])
1141
+
1142
+ if using_copy_on_write:
1143
+ assert np.shares_memory(ser2.values, ser.values)
1144
+ else:
1145
+ assert not np.shares_memory(ser2.values, ser.values)
1146
+
1147
+ ser2.iloc[0] = 0
1148
+ if using_copy_on_write:
1149
+ assert not np.shares_memory(ser2.values, ser.values)
1150
+ tm.assert_series_equal(ser, ser_orig)
1151
+
1152
+
1153
+ @pytest.mark.parametrize("obj", [Series([1, 2, 3]), DataFrame({"a": [1, 2, 3]})])
1154
+ def test_swaplevel(using_copy_on_write, obj):
1155
+ index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"])
1156
+ obj.index = index
1157
+ obj_orig = obj.copy()
1158
+ obj2 = obj.swaplevel()
1159
+
1160
+ if using_copy_on_write:
1161
+ assert np.shares_memory(obj2.values, obj.values)
1162
+ else:
1163
+ assert not np.shares_memory(obj2.values, obj.values)
1164
+
1165
+ obj2.iloc[0] = 0
1166
+ if using_copy_on_write:
1167
+ assert not np.shares_memory(obj2.values, obj.values)
1168
+ tm.assert_equal(obj, obj_orig)
1169
+
1170
+
1171
+ def test_frame_set_axis(using_copy_on_write):
1172
+ # GH 49473
1173
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
1174
+ df_orig = df.copy()
1175
+ df2 = df.set_axis(["a", "b", "c"], axis="index")
1176
+
1177
+ if using_copy_on_write:
1178
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1179
+ else:
1180
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1181
+
1182
+ # mutating df2 triggers a copy-on-write for that column / block
1183
+ df2.iloc[0, 0] = 0
1184
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1185
+ tm.assert_frame_equal(df, df_orig)
1186
+
1187
+
1188
+ def test_series_set_axis(using_copy_on_write):
1189
+ # GH 49473
1190
+ ser = Series([1, 2, 3])
1191
+ ser_orig = ser.copy()
1192
+ ser2 = ser.set_axis(["a", "b", "c"], axis="index")
1193
+
1194
+ if using_copy_on_write:
1195
+ assert np.shares_memory(ser, ser2)
1196
+ else:
1197
+ assert not np.shares_memory(ser, ser2)
1198
+
1199
+ # mutating ser triggers a copy-on-write for the column / block
1200
+ ser2.iloc[0] = 0
1201
+ assert not np.shares_memory(ser2, ser)
1202
+ tm.assert_series_equal(ser, ser_orig)
1203
+
1204
+
1205
+ def test_set_flags(using_copy_on_write):
1206
+ ser = Series([1, 2, 3])
1207
+ ser_orig = ser.copy()
1208
+ ser2 = ser.set_flags(allows_duplicate_labels=False)
1209
+
1210
+ assert np.shares_memory(ser, ser2)
1211
+
1212
+ # mutating ser triggers a copy-on-write for the column / block
1213
+ ser2.iloc[0] = 0
1214
+ if using_copy_on_write:
1215
+ assert not np.shares_memory(ser2, ser)
1216
+ tm.assert_series_equal(ser, ser_orig)
1217
+ else:
1218
+ assert np.shares_memory(ser2, ser)
1219
+ expected = Series([0, 2, 3])
1220
+ tm.assert_series_equal(ser, expected)
1221
+
1222
+
1223
+ @pytest.mark.parametrize("kwargs", [{"mapper": "test"}, {"index": "test"}])
1224
+ def test_rename_axis(using_copy_on_write, kwargs):
1225
+ df = DataFrame({"a": [1, 2, 3, 4]}, index=Index([1, 2, 3, 4], name="a"))
1226
+ df_orig = df.copy()
1227
+ df2 = df.rename_axis(**kwargs)
1228
+
1229
+ if using_copy_on_write:
1230
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1231
+ else:
1232
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1233
+
1234
+ df2.iloc[0, 0] = 0
1235
+ if using_copy_on_write:
1236
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1237
+ tm.assert_frame_equal(df, df_orig)
1238
+
1239
+
1240
+ @pytest.mark.parametrize(
1241
+ "func, tz", [("tz_convert", "Europe/Berlin"), ("tz_localize", None)]
1242
+ )
1243
+ def test_tz_convert_localize(using_copy_on_write, func, tz):
1244
+ # GH 49473
1245
+ ser = Series(
1246
+ [1, 2], index=date_range(start="2014-08-01 09:00", freq="H", periods=2, tz=tz)
1247
+ )
1248
+ ser_orig = ser.copy()
1249
+ ser2 = getattr(ser, func)("US/Central")
1250
+
1251
+ if using_copy_on_write:
1252
+ assert np.shares_memory(ser.values, ser2.values)
1253
+ else:
1254
+ assert not np.shares_memory(ser.values, ser2.values)
1255
+
1256
+ # mutating ser triggers a copy-on-write for the column / block
1257
+ ser2.iloc[0] = 0
1258
+ assert not np.shares_memory(ser2.values, ser.values)
1259
+ tm.assert_series_equal(ser, ser_orig)
1260
+
1261
+
1262
+ def test_droplevel(using_copy_on_write):
1263
+ # GH 49473
1264
+ index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"])
1265
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, index=index)
1266
+ df_orig = df.copy()
1267
+ df2 = df.droplevel(0)
1268
+
1269
+ if using_copy_on_write:
1270
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
1271
+ else:
1272
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
1273
+
1274
+ # mutating df2 triggers a copy-on-write for that column / block
1275
+ df2.iloc[0, 0] = 0
1276
+
1277
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
1278
+ tm.assert_frame_equal(df, df_orig)
1279
+
1280
+
1281
+ def test_squeeze(using_copy_on_write):
1282
+ df = DataFrame({"a": [1, 2, 3]})
1283
+ df_orig = df.copy()
1284
+ series = df.squeeze()
1285
+
1286
+ # Should share memory regardless of CoW since squeeze is just an iloc
1287
+ assert np.shares_memory(series.values, get_array(df, "a"))
1288
+
1289
+ # mutating squeezed df triggers a copy-on-write for that column/block
1290
+ series.iloc[0] = 0
1291
+ if using_copy_on_write:
1292
+ assert not np.shares_memory(series.values, get_array(df, "a"))
1293
+ tm.assert_frame_equal(df, df_orig)
1294
+ else:
1295
+ # Without CoW the original will be modified
1296
+ assert np.shares_memory(series.values, get_array(df, "a"))
1297
+ assert df.loc[0, "a"] == 0
1298
+
1299
+
1300
+ def test_items(using_copy_on_write):
1301
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
1302
+ df_orig = df.copy()
1303
+
1304
+ # Test this twice, since the second time, the item cache will be
1305
+ # triggered, and we want to make sure it still works then.
1306
+ for i in range(2):
1307
+ for name, ser in df.items():
1308
+ assert np.shares_memory(get_array(ser, name), get_array(df, name))
1309
+
1310
+ # mutating df triggers a copy-on-write for that column / block
1311
+ ser.iloc[0] = 0
1312
+
1313
+ if using_copy_on_write:
1314
+ assert not np.shares_memory(get_array(ser, name), get_array(df, name))
1315
+ tm.assert_frame_equal(df, df_orig)
1316
+ else:
1317
+ # Original frame will be modified
1318
+ assert df.loc[0, name] == 0
1319
+
1320
+
1321
+ @pytest.mark.parametrize("dtype", ["int64", "Int64"])
1322
+ def test_putmask(using_copy_on_write, dtype):
1323
+ df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype)
1324
+ view = df[:]
1325
+ df_orig = df.copy()
1326
+ df[df == df] = 5
1327
+
1328
+ if using_copy_on_write:
1329
+ assert not np.shares_memory(get_array(view, "a"), get_array(df, "a"))
1330
+ tm.assert_frame_equal(view, df_orig)
1331
+ else:
1332
+ # Without CoW the original will be modified
1333
+ assert np.shares_memory(get_array(view, "a"), get_array(df, "a"))
1334
+ assert view.iloc[0, 0] == 5
1335
+
1336
+
1337
+ @pytest.mark.parametrize("dtype", ["int64", "Int64"])
1338
+ def test_putmask_no_reference(using_copy_on_write, dtype):
1339
+ df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype)
1340
+ arr_a = get_array(df, "a")
1341
+ df[df == df] = 5
1342
+
1343
+ if using_copy_on_write:
1344
+ assert np.shares_memory(arr_a, get_array(df, "a"))
1345
+
1346
+
1347
+ @pytest.mark.parametrize("dtype", ["float64", "Float64"])
1348
+ def test_putmask_aligns_rhs_no_reference(using_copy_on_write, dtype):
1349
+ df = DataFrame({"a": [1.5, 2], "b": 1.5}, dtype=dtype)
1350
+ arr_a = get_array(df, "a")
1351
+ df[df == df] = DataFrame({"a": [5.5, 5]})
1352
+
1353
+ if using_copy_on_write:
1354
+ assert np.shares_memory(arr_a, get_array(df, "a"))
1355
+
1356
+
1357
+ @pytest.mark.parametrize("val, exp", [(5.5, True), (5, False)])
1358
+ def test_putmask_dont_copy_some_blocks(using_copy_on_write, val, exp):
1359
+ df = DataFrame({"a": [1, 2], "b": 1, "c": 1.5})
1360
+ view = df[:]
1361
+ df_orig = df.copy()
1362
+ indexer = DataFrame(
1363
+ [[True, False, False], [True, False, False]], columns=list("abc")
1364
+ )
1365
+ df[indexer] = val
1366
+
1367
+ if using_copy_on_write:
1368
+ assert not np.shares_memory(get_array(view, "a"), get_array(df, "a"))
1369
+ # TODO(CoW): Could split blocks to avoid copying the whole block
1370
+ assert np.shares_memory(get_array(view, "b"), get_array(df, "b")) is exp
1371
+ assert np.shares_memory(get_array(view, "c"), get_array(df, "c"))
1372
+ assert df._mgr._has_no_reference(1) is not exp
1373
+ assert not df._mgr._has_no_reference(2)
1374
+ tm.assert_frame_equal(view, df_orig)
1375
+ elif val == 5:
1376
+ # Without CoW the original will be modified, the other case upcasts, e.g. copy
1377
+ assert np.shares_memory(get_array(view, "a"), get_array(df, "a"))
1378
+ assert np.shares_memory(get_array(view, "c"), get_array(df, "c"))
1379
+ assert view.iloc[0, 0] == 5
1380
+
1381
+
1382
+ @pytest.mark.parametrize("dtype", ["int64", "Int64"])
1383
+ def test_where_noop(using_copy_on_write, dtype):
1384
+ ser = Series([1, 2, 3], dtype=dtype)
1385
+ ser_orig = ser.copy()
1386
+
1387
+ result = ser.where(ser > 0, 10)
1388
+
1389
+ if using_copy_on_write:
1390
+ assert np.shares_memory(get_array(ser), get_array(result))
1391
+ else:
1392
+ assert not np.shares_memory(get_array(ser), get_array(result))
1393
+
1394
+ result.iloc[0] = 10
1395
+ if using_copy_on_write:
1396
+ assert not np.shares_memory(get_array(ser), get_array(result))
1397
+ tm.assert_series_equal(ser, ser_orig)
1398
+
1399
+
1400
+ @pytest.mark.parametrize("dtype", ["int64", "Int64"])
1401
+ def test_where(using_copy_on_write, dtype):
1402
+ ser = Series([1, 2, 3], dtype=dtype)
1403
+ ser_orig = ser.copy()
1404
+
1405
+ result = ser.where(ser < 0, 10)
1406
+
1407
+ assert not np.shares_memory(get_array(ser), get_array(result))
1408
+ tm.assert_series_equal(ser, ser_orig)
1409
+
1410
+
1411
+ @pytest.mark.parametrize("dtype, val", [("int64", 10.5), ("Int64", 10)])
1412
+ def test_where_noop_on_single_column(using_copy_on_write, dtype, val):
1413
+ df = DataFrame({"a": [1, 2, 3], "b": [-4, -5, -6]}, dtype=dtype)
1414
+ df_orig = df.copy()
1415
+
1416
+ result = df.where(df < 0, val)
1417
+
1418
+ if using_copy_on_write:
1419
+ assert np.shares_memory(get_array(df, "b"), get_array(result, "b"))
1420
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
1421
+ else:
1422
+ assert not np.shares_memory(get_array(df, "b"), get_array(result, "b"))
1423
+
1424
+ result.iloc[0, 1] = 10
1425
+ if using_copy_on_write:
1426
+ assert not np.shares_memory(get_array(df, "b"), get_array(result, "b"))
1427
+ tm.assert_frame_equal(df, df_orig)
1428
+
1429
+
1430
+ def test_asfreq_noop(using_copy_on_write):
1431
+ df = DataFrame(
1432
+ {"a": [0.0, None, 2.0, 3.0]},
1433
+ index=date_range("1/1/2000", periods=4, freq="T"),
1434
+ )
1435
+ df_orig = df.copy()
1436
+ df2 = df.asfreq(freq="T")
1437
+
1438
+ if using_copy_on_write:
1439
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1440
+ else:
1441
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1442
+
1443
+ # mutating df2 triggers a copy-on-write for that column / block
1444
+ df2.iloc[0, 0] = 0
1445
+
1446
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
1447
+ tm.assert_frame_equal(df, df_orig)
1448
+
1449
+
1450
+ def test_iterrows(using_copy_on_write):
1451
+ df = DataFrame({"a": 0, "b": 1}, index=[1, 2, 3])
1452
+ df_orig = df.copy()
1453
+
1454
+ for _, sub in df.iterrows():
1455
+ sub.iloc[0] = 100
1456
+ if using_copy_on_write:
1457
+ tm.assert_frame_equal(df, df_orig)
1458
+
1459
+
1460
+ def test_interpolate_creates_copy(using_copy_on_write):
1461
+ # GH#51126
1462
+ df = DataFrame({"a": [1.5, np.nan, 3]})
1463
+ view = df[:]
1464
+ expected = df.copy()
1465
+
1466
+ df.ffill(inplace=True)
1467
+ df.iloc[0, 0] = 100.5
1468
+
1469
+ if using_copy_on_write:
1470
+ tm.assert_frame_equal(view, expected)
1471
+ else:
1472
+ expected = DataFrame({"a": [100.5, 1.5, 3]})
1473
+ tm.assert_frame_equal(view, expected)
1474
+
1475
+
1476
+ def test_isetitem(using_copy_on_write):
1477
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
1478
+ df_orig = df.copy()
1479
+ df2 = df.copy(deep=None) # Trigger a CoW
1480
+ df2.isetitem(1, np.array([-1, -2, -3])) # This is inplace
1481
+
1482
+ if using_copy_on_write:
1483
+ assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
1484
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
1485
+ else:
1486
+ assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
1487
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
1488
+
1489
+ df2.loc[0, "a"] = 0
1490
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
1491
+
1492
+ if using_copy_on_write:
1493
+ assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
1494
+ else:
1495
+ assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
1496
+
1497
+
1498
+ @pytest.mark.parametrize(
1499
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
1500
+ )
1501
+ def test_isetitem_series(using_copy_on_write, dtype):
1502
+ df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)})
1503
+ ser = Series([7, 8, 9])
1504
+ ser_orig = ser.copy()
1505
+ df.isetitem(0, ser)
1506
+
1507
+ if using_copy_on_write:
1508
+ # TODO(CoW) this can share memory
1509
+ assert not np.shares_memory(get_array(df, "a"), get_array(ser))
1510
+
1511
+ # mutating dataframe doesn't update series
1512
+ df.loc[0, "a"] = 0
1513
+ tm.assert_series_equal(ser, ser_orig)
1514
+
1515
+ # mutating series doesn't update dataframe
1516
+ df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)})
1517
+ ser = Series([7, 8, 9])
1518
+ df.isetitem(0, ser)
1519
+
1520
+ ser.loc[0] = 0
1521
+ expected = DataFrame({"a": [7, 8, 9], "b": np.array([4, 5, 6], dtype=dtype)})
1522
+ tm.assert_frame_equal(df, expected)
1523
+
1524
+
1525
+ @pytest.mark.parametrize("key", ["a", ["a"]])
1526
+ def test_get(using_copy_on_write, key):
1527
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
1528
+ df_orig = df.copy()
1529
+
1530
+ result = df.get(key)
1531
+
1532
+ if using_copy_on_write:
1533
+ assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
1534
+ result.iloc[0] = 0
1535
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
1536
+ tm.assert_frame_equal(df, df_orig)
1537
+ else:
1538
+ # for non-CoW it depends on whether we got a Series or DataFrame if it
1539
+ # is a view or copy or triggers a warning or not
1540
+ warn = SettingWithCopyWarning if isinstance(key, list) else None
1541
+ with pd.option_context("chained_assignment", "warn"):
1542
+ with tm.assert_produces_warning(warn):
1543
+ result.iloc[0] = 0
1544
+
1545
+ if isinstance(key, list):
1546
+ tm.assert_frame_equal(df, df_orig)
1547
+ else:
1548
+ assert df.iloc[0, 0] == 0
1549
+
1550
+
1551
+ @pytest.mark.parametrize("axis, key", [(0, 0), (1, "a")])
1552
+ @pytest.mark.parametrize(
1553
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
1554
+ )
1555
+ def test_xs(using_copy_on_write, using_array_manager, axis, key, dtype):
1556
+ single_block = (dtype == "int64") and not using_array_manager
1557
+ is_view = single_block or (using_array_manager and axis == 1)
1558
+ df = DataFrame(
1559
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
1560
+ )
1561
+ df_orig = df.copy()
1562
+
1563
+ result = df.xs(key, axis=axis)
1564
+
1565
+ if axis == 1 or single_block:
1566
+ assert np.shares_memory(get_array(df, "a"), get_array(result))
1567
+ elif using_copy_on_write:
1568
+ assert result._mgr._has_no_reference(0)
1569
+
1570
+ if using_copy_on_write or is_view:
1571
+ result.iloc[0] = 0
1572
+ else:
1573
+ with pd.option_context("chained_assignment", "warn"):
1574
+ with tm.assert_produces_warning(SettingWithCopyWarning):
1575
+ result.iloc[0] = 0
1576
+
1577
+ if using_copy_on_write or (not single_block and axis == 0):
1578
+ tm.assert_frame_equal(df, df_orig)
1579
+ else:
1580
+ assert df.iloc[0, 0] == 0
1581
+
1582
+
1583
+ @pytest.mark.parametrize("axis", [0, 1])
1584
+ @pytest.mark.parametrize("key, level", [("l1", 0), (2, 1)])
1585
+ def test_xs_multiindex(using_copy_on_write, using_array_manager, key, level, axis):
1586
+ arr = np.arange(18).reshape(6, 3)
1587
+ index = MultiIndex.from_product([["l1", "l2"], [1, 2, 3]], names=["lev1", "lev2"])
1588
+ df = DataFrame(arr, index=index, columns=list("abc"))
1589
+ if axis == 1:
1590
+ df = df.transpose().copy()
1591
+ df_orig = df.copy()
1592
+
1593
+ result = df.xs(key, level=level, axis=axis)
1594
+
1595
+ if level == 0:
1596
+ assert np.shares_memory(
1597
+ get_array(df, df.columns[0]), get_array(result, result.columns[0])
1598
+ )
1599
+
1600
+ warn = (
1601
+ SettingWithCopyWarning
1602
+ if not using_copy_on_write and not using_array_manager
1603
+ else None
1604
+ )
1605
+ with pd.option_context("chained_assignment", "warn"):
1606
+ with tm.assert_produces_warning(warn):
1607
+ result.iloc[0, 0] = 0
1608
+
1609
+ tm.assert_frame_equal(df, df_orig)
1610
+
1611
+
1612
+ def test_update_frame(using_copy_on_write):
1613
+ df1 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]})
1614
+ df2 = DataFrame({"b": [100.0]}, index=[1])
1615
+ df1_orig = df1.copy()
1616
+ view = df1[:]
1617
+
1618
+ df1.update(df2)
1619
+
1620
+ expected = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 100.0, 6.0]})
1621
+ tm.assert_frame_equal(df1, expected)
1622
+ if using_copy_on_write:
1623
+ # df1 is updated, but its view not
1624
+ tm.assert_frame_equal(view, df1_orig)
1625
+ assert np.shares_memory(get_array(df1, "a"), get_array(view, "a"))
1626
+ assert not np.shares_memory(get_array(df1, "b"), get_array(view, "b"))
1627
+ else:
1628
+ tm.assert_frame_equal(view, expected)
1629
+
1630
+
1631
+ def test_update_series(using_copy_on_write):
1632
+ ser1 = Series([1.0, 2.0, 3.0])
1633
+ ser2 = Series([100.0], index=[1])
1634
+ ser1_orig = ser1.copy()
1635
+ view = ser1[:]
1636
+
1637
+ ser1.update(ser2)
1638
+
1639
+ expected = Series([1.0, 100.0, 3.0])
1640
+ tm.assert_series_equal(ser1, expected)
1641
+ if using_copy_on_write:
1642
+ # ser1 is updated, but its view not
1643
+ tm.assert_series_equal(view, ser1_orig)
1644
+ else:
1645
+ tm.assert_series_equal(view, expected)
1646
+
1647
+
1648
+ def test_inplace_arithmetic_series():
1649
+ ser = Series([1, 2, 3])
1650
+ data = get_array(ser)
1651
+ ser *= 2
1652
+ assert np.shares_memory(get_array(ser), data)
1653
+ tm.assert_numpy_array_equal(data, get_array(ser))
1654
+
1655
+
1656
+ def test_inplace_arithmetic_series_with_reference(using_copy_on_write):
1657
+ ser = Series([1, 2, 3])
1658
+ ser_orig = ser.copy()
1659
+ view = ser[:]
1660
+ ser *= 2
1661
+ if using_copy_on_write:
1662
+ assert not np.shares_memory(get_array(ser), get_array(view))
1663
+ tm.assert_series_equal(ser_orig, view)
1664
+ else:
1665
+ assert np.shares_memory(get_array(ser), get_array(view))
1666
+
1667
+
1668
+ @pytest.mark.parametrize("copy", [True, False])
1669
+ def test_transpose(using_copy_on_write, copy, using_array_manager):
1670
+ df = DataFrame({"a": [1, 2, 3], "b": 1})
1671
+ df_orig = df.copy()
1672
+ result = df.transpose(copy=copy)
1673
+
1674
+ if not copy and not using_array_manager or using_copy_on_write:
1675
+ assert np.shares_memory(get_array(df, "a"), get_array(result, 0))
1676
+ else:
1677
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))
1678
+
1679
+ result.iloc[0, 0] = 100
1680
+ if using_copy_on_write:
1681
+ tm.assert_frame_equal(df, df_orig)
1682
+
1683
+
1684
+ def test_transpose_different_dtypes(using_copy_on_write):
1685
+ df = DataFrame({"a": [1, 2, 3], "b": 1.5})
1686
+ df_orig = df.copy()
1687
+ result = df.T
1688
+
1689
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))
1690
+ result.iloc[0, 0] = 100
1691
+ if using_copy_on_write:
1692
+ tm.assert_frame_equal(df, df_orig)
1693
+
1694
+
1695
+ def test_transpose_ea_single_column(using_copy_on_write):
1696
+ df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")
1697
+ result = df.T
1698
+
1699
+ assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))
1700
+
1701
+
1702
+ def test_count_read_only_array():
1703
+ df = DataFrame({"a": [1, 2], "b": 3})
1704
+ result = df.count()
1705
+ result.iloc[0] = 100
1706
+ expected = Series([100, 2], index=["a", "b"])
1707
+ tm.assert_series_equal(result, expected)
1708
+
1709
+
1710
+ def test_series_view(using_copy_on_write):
1711
+ ser = Series([1, 2, 3])
1712
+ ser_orig = ser.copy()
1713
+
1714
+ ser2 = ser.view()
1715
+ assert np.shares_memory(get_array(ser), get_array(ser2))
1716
+ if using_copy_on_write:
1717
+ assert not ser2._mgr._has_no_reference(0)
1718
+
1719
+ ser2.iloc[0] = 100
1720
+
1721
+ if using_copy_on_write:
1722
+ tm.assert_series_equal(ser_orig, ser)
1723
+ else:
1724
+ expected = Series([100, 2, 3])
1725
+ tm.assert_series_equal(ser, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_replace.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ Categorical,
6
+ DataFrame,
7
+ )
8
+ import pandas._testing as tm
9
+ from pandas.tests.copy_view.util import get_array
10
+
11
+
12
+ @pytest.mark.parametrize(
13
+ "replace_kwargs",
14
+ [
15
+ {"to_replace": {"a": 1, "b": 4}, "value": -1},
16
+ # Test CoW splits blocks to avoid copying unchanged columns
17
+ {"to_replace": {"a": 1}, "value": -1},
18
+ {"to_replace": {"b": 4}, "value": -1},
19
+ {"to_replace": {"b": {4: 1}}},
20
+ # TODO: Add these in a further optimization
21
+ # We would need to see which columns got replaced in the mask
22
+ # which could be expensive
23
+ # {"to_replace": {"b": 1}},
24
+ # 1
25
+ ],
26
+ )
27
+ def test_replace(using_copy_on_write, replace_kwargs):
28
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": ["foo", "bar", "baz"]})
29
+ df_orig = df.copy()
30
+
31
+ df_replaced = df.replace(**replace_kwargs)
32
+
33
+ if using_copy_on_write:
34
+ if (df_replaced["b"] == df["b"]).all():
35
+ assert np.shares_memory(get_array(df_replaced, "b"), get_array(df, "b"))
36
+ assert np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))
37
+
38
+ # mutating squeezed df triggers a copy-on-write for that column/block
39
+ df_replaced.loc[0, "c"] = -1
40
+ if using_copy_on_write:
41
+ assert not np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))
42
+
43
+ if "a" in replace_kwargs["to_replace"]:
44
+ arr = get_array(df_replaced, "a")
45
+ df_replaced.loc[0, "a"] = 100
46
+ assert np.shares_memory(get_array(df_replaced, "a"), arr)
47
+ tm.assert_frame_equal(df, df_orig)
48
+
49
+
50
+ def test_replace_regex_inplace_refs(using_copy_on_write):
51
+ df = DataFrame({"a": ["aaa", "bbb"]})
52
+ df_orig = df.copy()
53
+ view = df[:]
54
+ arr = get_array(df, "a")
55
+ df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True)
56
+ if using_copy_on_write:
57
+ assert not np.shares_memory(arr, get_array(df, "a"))
58
+ assert df._mgr._has_no_reference(0)
59
+ tm.assert_frame_equal(view, df_orig)
60
+ else:
61
+ assert np.shares_memory(arr, get_array(df, "a"))
62
+
63
+
64
+ def test_replace_regex_inplace(using_copy_on_write):
65
+ df = DataFrame({"a": ["aaa", "bbb"]})
66
+ arr = get_array(df, "a")
67
+ df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True)
68
+ if using_copy_on_write:
69
+ assert df._mgr._has_no_reference(0)
70
+ assert np.shares_memory(arr, get_array(df, "a"))
71
+
72
+ df_orig = df.copy()
73
+ df2 = df.replace(to_replace=r"^b.*$", value="new", regex=True)
74
+ tm.assert_frame_equal(df_orig, df)
75
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
76
+
77
+
78
+ def test_replace_regex_inplace_no_op(using_copy_on_write):
79
+ df = DataFrame({"a": [1, 2]})
80
+ arr = get_array(df, "a")
81
+ df.replace(to_replace=r"^a.$", value="new", inplace=True, regex=True)
82
+ if using_copy_on_write:
83
+ assert df._mgr._has_no_reference(0)
84
+ assert np.shares_memory(arr, get_array(df, "a"))
85
+
86
+ df_orig = df.copy()
87
+ df2 = df.replace(to_replace=r"^x.$", value="new", regex=True)
88
+ tm.assert_frame_equal(df_orig, df)
89
+ if using_copy_on_write:
90
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
91
+ else:
92
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
93
+
94
+
95
+ def test_replace_mask_all_false_second_block(using_copy_on_write):
96
+ df = DataFrame({"a": [1.5, 2, 3], "b": 100.5, "c": 1, "d": 2})
97
+ df_orig = df.copy()
98
+
99
+ df2 = df.replace(to_replace=1.5, value=55.5)
100
+
101
+ if using_copy_on_write:
102
+ # TODO: Block splitting would allow us to avoid copying b
103
+ assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
104
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
105
+
106
+ else:
107
+ assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
108
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
109
+
110
+ df2.loc[0, "c"] = 1
111
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
112
+
113
+ if using_copy_on_write:
114
+ assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
115
+ # TODO: This should split and not copy the whole block
116
+ # assert np.shares_memory(get_array(df, "d"), get_array(df2, "d"))
117
+
118
+
119
+ def test_replace_coerce_single_column(using_copy_on_write, using_array_manager):
120
+ df = DataFrame({"a": [1.5, 2, 3], "b": 100.5})
121
+ df_orig = df.copy()
122
+
123
+ df2 = df.replace(to_replace=1.5, value="a")
124
+
125
+ if using_copy_on_write:
126
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
127
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
128
+
129
+ elif not using_array_manager:
130
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
131
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
132
+
133
+ if using_copy_on_write:
134
+ df2.loc[0, "b"] = 0.5
135
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
136
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
137
+
138
+
139
+ def test_replace_to_replace_wrong_dtype(using_copy_on_write):
140
+ df = DataFrame({"a": [1.5, 2, 3], "b": 100.5})
141
+ df_orig = df.copy()
142
+
143
+ df2 = df.replace(to_replace="xxx", value=1.5)
144
+
145
+ if using_copy_on_write:
146
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
147
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
148
+
149
+ else:
150
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
151
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
152
+
153
+ df2.loc[0, "b"] = 0.5
154
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
155
+
156
+ if using_copy_on_write:
157
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
158
+
159
+
160
+ def test_replace_list_categorical(using_copy_on_write):
161
+ df = DataFrame({"a": ["a", "b", "c"]}, dtype="category")
162
+ arr = get_array(df, "a")
163
+ df.replace(["c"], value="a", inplace=True)
164
+ assert np.shares_memory(arr.codes, get_array(df, "a").codes)
165
+ if using_copy_on_write:
166
+ assert df._mgr._has_no_reference(0)
167
+
168
+ df_orig = df.copy()
169
+ df2 = df.replace(["b"], value="a")
170
+ assert not np.shares_memory(arr.codes, get_array(df2, "a").codes)
171
+
172
+ tm.assert_frame_equal(df, df_orig)
173
+
174
+
175
+ def test_replace_list_inplace_refs_categorical(using_copy_on_write):
176
+ df = DataFrame({"a": ["a", "b", "c"]}, dtype="category")
177
+ view = df[:]
178
+ df_orig = df.copy()
179
+ df.replace(["c"], value="a", inplace=True)
180
+ if using_copy_on_write:
181
+ assert not np.shares_memory(
182
+ get_array(view, "a").codes, get_array(df, "a").codes
183
+ )
184
+ tm.assert_frame_equal(df_orig, view)
185
+ else:
186
+ # This could be inplace
187
+ assert not np.shares_memory(
188
+ get_array(view, "a").codes, get_array(df, "a").codes
189
+ )
190
+
191
+
192
+ @pytest.mark.parametrize("to_replace", [1.5, [1.5], []])
193
+ def test_replace_inplace(using_copy_on_write, to_replace):
194
+ df = DataFrame({"a": [1.5, 2, 3]})
195
+ arr_a = get_array(df, "a")
196
+ df.replace(to_replace=1.5, value=15.5, inplace=True)
197
+
198
+ assert np.shares_memory(get_array(df, "a"), arr_a)
199
+ if using_copy_on_write:
200
+ assert df._mgr._has_no_reference(0)
201
+
202
+
203
+ @pytest.mark.parametrize("to_replace", [1.5, [1.5]])
204
+ def test_replace_inplace_reference(using_copy_on_write, to_replace):
205
+ df = DataFrame({"a": [1.5, 2, 3]})
206
+ arr_a = get_array(df, "a")
207
+ view = df[:]
208
+ df.replace(to_replace=to_replace, value=15.5, inplace=True)
209
+
210
+ if using_copy_on_write:
211
+ assert not np.shares_memory(get_array(df, "a"), arr_a)
212
+ assert df._mgr._has_no_reference(0)
213
+ assert view._mgr._has_no_reference(0)
214
+ else:
215
+ assert np.shares_memory(get_array(df, "a"), arr_a)
216
+
217
+
218
+ @pytest.mark.parametrize("to_replace", ["a", 100.5])
219
+ def test_replace_inplace_reference_no_op(using_copy_on_write, to_replace):
220
+ df = DataFrame({"a": [1.5, 2, 3]})
221
+ arr_a = get_array(df, "a")
222
+ view = df[:]
223
+ df.replace(to_replace=to_replace, value=15.5, inplace=True)
224
+
225
+ assert np.shares_memory(get_array(df, "a"), arr_a)
226
+ if using_copy_on_write:
227
+ assert not df._mgr._has_no_reference(0)
228
+ assert not view._mgr._has_no_reference(0)
229
+
230
+
231
+ @pytest.mark.parametrize("to_replace", [1, [1]])
232
+ @pytest.mark.parametrize("val", [1, 1.5])
233
+ def test_replace_categorical_inplace_reference(using_copy_on_write, val, to_replace):
234
+ df = DataFrame({"a": Categorical([1, 2, 3])})
235
+ df_orig = df.copy()
236
+ arr_a = get_array(df, "a")
237
+ view = df[:]
238
+ df.replace(to_replace=to_replace, value=val, inplace=True)
239
+
240
+ if using_copy_on_write:
241
+ assert not np.shares_memory(get_array(df, "a").codes, arr_a.codes)
242
+ assert df._mgr._has_no_reference(0)
243
+ assert view._mgr._has_no_reference(0)
244
+ tm.assert_frame_equal(view, df_orig)
245
+ else:
246
+ assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)
247
+
248
+
249
+ @pytest.mark.parametrize("val", [1, 1.5])
250
+ def test_replace_categorical_inplace(using_copy_on_write, val):
251
+ df = DataFrame({"a": Categorical([1, 2, 3])})
252
+ arr_a = get_array(df, "a")
253
+ df.replace(to_replace=1, value=val, inplace=True)
254
+
255
+ assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)
256
+ if using_copy_on_write:
257
+ assert df._mgr._has_no_reference(0)
258
+
259
+ expected = DataFrame({"a": Categorical([val, 2, 3])})
260
+ tm.assert_frame_equal(df, expected)
261
+
262
+
263
+ @pytest.mark.parametrize("val", [1, 1.5])
264
+ def test_replace_categorical(using_copy_on_write, val):
265
+ df = DataFrame({"a": Categorical([1, 2, 3])})
266
+ df_orig = df.copy()
267
+ df2 = df.replace(to_replace=1, value=val)
268
+
269
+ if using_copy_on_write:
270
+ assert df._mgr._has_no_reference(0)
271
+ assert df2._mgr._has_no_reference(0)
272
+ assert not np.shares_memory(get_array(df, "a").codes, get_array(df2, "a").codes)
273
+ tm.assert_frame_equal(df, df_orig)
274
+
275
+ arr_a = get_array(df2, "a").codes
276
+ df2.iloc[0, 0] = 2.0
277
+ assert np.shares_memory(get_array(df2, "a").codes, arr_a)
278
+
279
+
280
+ @pytest.mark.parametrize("method", ["where", "mask"])
281
+ def test_masking_inplace(using_copy_on_write, method):
282
+ df = DataFrame({"a": [1.5, 2, 3]})
283
+ df_orig = df.copy()
284
+ arr_a = get_array(df, "a")
285
+ view = df[:]
286
+
287
+ method = getattr(df, method)
288
+ method(df["a"] > 1.6, -1, inplace=True)
289
+
290
+ if using_copy_on_write:
291
+ assert not np.shares_memory(get_array(df, "a"), arr_a)
292
+ assert df._mgr._has_no_reference(0)
293
+ assert view._mgr._has_no_reference(0)
294
+ tm.assert_frame_equal(view, df_orig)
295
+ else:
296
+ assert np.shares_memory(get_array(df, "a"), arr_a)
297
+
298
+
299
+ def test_replace_empty_list(using_copy_on_write):
300
+ df = DataFrame({"a": [1, 2]})
301
+
302
+ df2 = df.replace([], [])
303
+ if using_copy_on_write:
304
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
305
+ assert not df._mgr._has_no_reference(0)
306
+ else:
307
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
308
+
309
+ arr_a = get_array(df, "a")
310
+ df.replace([], [])
311
+ if using_copy_on_write:
312
+ assert np.shares_memory(get_array(df, "a"), arr_a)
313
+ assert not df._mgr._has_no_reference(0)
314
+ assert not df2._mgr._has_no_reference(0)
315
+
316
+
317
+ @pytest.mark.parametrize("value", ["d", None])
318
+ def test_replace_object_list_inplace(using_copy_on_write, value):
319
+ df = DataFrame({"a": ["a", "b", "c"]})
320
+ arr = get_array(df, "a")
321
+ df.replace(["c"], value, inplace=True)
322
+ if using_copy_on_write or value is None:
323
+ assert np.shares_memory(arr, get_array(df, "a"))
324
+ else:
325
+ # This could be inplace
326
+ assert not np.shares_memory(arr, get_array(df, "a"))
327
+ if using_copy_on_write:
328
+ assert df._mgr._has_no_reference(0)
329
+
330
+
331
+ def test_replace_list_multiple_elements_inplace(using_copy_on_write):
332
+ df = DataFrame({"a": [1, 2, 3]})
333
+ arr = get_array(df, "a")
334
+ df.replace([1, 2], 4, inplace=True)
335
+ if using_copy_on_write:
336
+ # TODO(CoW): This should share memory
337
+ assert not np.shares_memory(arr, get_array(df, "a"))
338
+ assert df._mgr._has_no_reference(0)
339
+ else:
340
+ assert np.shares_memory(arr, get_array(df, "a"))
341
+
342
+
343
+ def test_replace_list_none(using_copy_on_write):
344
+ df = DataFrame({"a": ["a", "b", "c"]})
345
+
346
+ df_orig = df.copy()
347
+ df2 = df.replace(["b"], value=None)
348
+ tm.assert_frame_equal(df, df_orig)
349
+
350
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
351
+
352
+
353
+ def test_replace_list_none_inplace_refs(using_copy_on_write):
354
+ df = DataFrame({"a": ["a", "b", "c"]})
355
+ arr = get_array(df, "a")
356
+ df_orig = df.copy()
357
+ view = df[:]
358
+ df.replace(["a"], value=None, inplace=True)
359
+ if using_copy_on_write:
360
+ assert df._mgr._has_no_reference(0)
361
+ assert not np.shares_memory(arr, get_array(df, "a"))
362
+ tm.assert_frame_equal(df_orig, view)
363
+ else:
364
+ assert np.shares_memory(arr, get_array(df, "a"))
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/test_util.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas import DataFrame
4
+ from pandas.tests.copy_view.util import get_array
5
+
6
+
7
+ def test_get_array_numpy():
8
+ df = DataFrame({"a": [1, 2, 3]})
9
+ assert np.shares_memory(get_array(df, "a"), get_array(df, "a"))
10
+
11
+
12
+ def test_get_array_masked():
13
+ df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")
14
+ assert np.shares_memory(get_array(df, "a"), get_array(df, "a"))
videochat2/lib/python3.10/site-packages/pandas/tests/copy_view/util.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas import (
2
+ Categorical,
3
+ Index,
4
+ Series,
5
+ )
6
+ from pandas.core.arrays import BaseMaskedArray
7
+
8
+
9
+ def get_array(obj, col=None):
10
+ """
11
+ Helper method to get array for a DataFrame column or a Series.
12
+
13
+ Equivalent of df[col].values, but without going through normal getitem,
14
+ which triggers tracking references / CoW (and we might be testing that
15
+ this is done by some other operation).
16
+ """
17
+ if isinstance(obj, Index):
18
+ arr = obj._values
19
+ elif isinstance(obj, Series) and (col is None or obj.name == col):
20
+ arr = obj._values
21
+ else:
22
+ assert col is not None
23
+ icol = obj.columns.get_loc(col)
24
+ assert isinstance(icol, int)
25
+ arr = obj._get_column_array(icol)
26
+ if isinstance(arr, BaseMaskedArray):
27
+ return arr._data
28
+ elif isinstance(arr, Categorical):
29
+ return arr
30
+ return getattr(arr, "_ndarray", arr)
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (550 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_impl.cpython-310.pyc ADDED
Binary file (8.68 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_spec_conformance.cpython-310.pyc ADDED
Binary file (4.92 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_utils.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/conftest.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import pandas as pd
4
+
5
+
6
+ @pytest.fixture
7
+ def df_from_dict():
8
+ def maker(dct, is_categorical=False):
9
+ df = pd.DataFrame(dct)
10
+ return df.astype("category") if is_categorical else df
11
+
12
+ return maker
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/test_impl.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import random
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas._libs.tslibs import iNaT
8
+ import pandas.util._test_decorators as td
9
+
10
+ import pandas as pd
11
+ import pandas._testing as tm
12
+ from pandas.core.interchange.column import PandasColumn
13
+ from pandas.core.interchange.dataframe_protocol import (
14
+ ColumnNullType,
15
+ DtypeKind,
16
+ )
17
+ from pandas.core.interchange.from_dataframe import from_dataframe
18
+
19
+ test_data_categorical = {
20
+ "ordered": pd.Categorical(list("testdata") * 30, ordered=True),
21
+ "unordered": pd.Categorical(list("testdata") * 30, ordered=False),
22
+ }
23
+
24
+ NCOLS, NROWS = 100, 200
25
+
26
+
27
+ def _make_data(make_one):
28
+ return {
29
+ f"col{int((i - NCOLS / 2) % NCOLS + 1)}": [make_one() for _ in range(NROWS)]
30
+ for i in range(NCOLS)
31
+ }
32
+
33
+
34
+ int_data = _make_data(lambda: random.randint(-100, 100))
35
+ uint_data = _make_data(lambda: random.randint(1, 100))
36
+ bool_data = _make_data(lambda: random.choice([True, False]))
37
+ float_data = _make_data(lambda: random.random())
38
+ datetime_data = _make_data(
39
+ lambda: datetime(
40
+ year=random.randint(1900, 2100),
41
+ month=random.randint(1, 12),
42
+ day=random.randint(1, 20),
43
+ )
44
+ )
45
+
46
+ string_data = {
47
+ "separator data": [
48
+ "abC|DeF,Hik",
49
+ "234,3245.67",
50
+ "gSaf,qWer|Gre",
51
+ "asd3,4sad|",
52
+ np.NaN,
53
+ ]
54
+ }
55
+
56
+
57
+ @pytest.mark.parametrize("data", [("ordered", True), ("unordered", False)])
58
+ def test_categorical_dtype(data):
59
+ df = pd.DataFrame({"A": (test_data_categorical[data[0]])})
60
+
61
+ col = df.__dataframe__().get_column_by_name("A")
62
+ assert col.dtype[0] == DtypeKind.CATEGORICAL
63
+ assert col.null_count == 0
64
+ assert col.describe_null == (ColumnNullType.USE_SENTINEL, -1)
65
+ assert col.num_chunks() == 1
66
+ desc_cat = col.describe_categorical
67
+ assert desc_cat["is_ordered"] == data[1]
68
+ assert desc_cat["is_dictionary"] is True
69
+ assert isinstance(desc_cat["categories"], PandasColumn)
70
+ tm.assert_series_equal(
71
+ desc_cat["categories"]._col, pd.Series(["a", "d", "e", "s", "t"])
72
+ )
73
+
74
+ tm.assert_frame_equal(df, from_dataframe(df.__dataframe__()))
75
+
76
+
77
+ def test_categorical_pyarrow():
78
+ # GH 49889
79
+ pa = pytest.importorskip("pyarrow", "11.0.0")
80
+
81
+ arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"]
82
+ table = pa.table({"weekday": pa.array(arr).dictionary_encode()})
83
+ exchange_df = table.__dataframe__()
84
+ result = from_dataframe(exchange_df)
85
+ weekday = pd.Categorical(
86
+ arr, categories=["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
87
+ )
88
+ expected = pd.DataFrame({"weekday": weekday})
89
+ tm.assert_frame_equal(result, expected)
90
+
91
+
92
+ def test_empty_categorical_pyarrow():
93
+ # https://github.com/pandas-dev/pandas/issues/53077
94
+ pa = pytest.importorskip("pyarrow", "11.0.0")
95
+
96
+ arr = [None]
97
+ table = pa.table({"arr": pa.array(arr, "float64").dictionary_encode()})
98
+ exchange_df = table.__dataframe__()
99
+ result = pd.api.interchange.from_dataframe(exchange_df)
100
+ expected = pd.DataFrame({"arr": pd.Categorical([np.nan])})
101
+ tm.assert_frame_equal(result, expected)
102
+
103
+
104
+ def test_large_string_pyarrow():
105
+ # GH 52795
106
+ pa = pytest.importorskip("pyarrow", "11.0.0")
107
+
108
+ arr = ["Mon", "Tue"]
109
+ table = pa.table({"weekday": pa.array(arr, "large_string")})
110
+ exchange_df = table.__dataframe__()
111
+ result = from_dataframe(exchange_df)
112
+ expected = pd.DataFrame({"weekday": ["Mon", "Tue"]})
113
+ tm.assert_frame_equal(result, expected)
114
+
115
+ # check round-trip
116
+ assert pa.Table.equals(pa.interchange.from_dataframe(result), table)
117
+
118
+
119
+ @pytest.mark.parametrize(
120
+ ("offset", "length", "expected_values"),
121
+ [
122
+ (0, None, [3.3, float("nan"), 2.1]),
123
+ (1, None, [float("nan"), 2.1]),
124
+ (2, None, [2.1]),
125
+ (0, 2, [3.3, float("nan")]),
126
+ (0, 1, [3.3]),
127
+ (1, 1, [float("nan")]),
128
+ ],
129
+ )
130
+ def test_bitmasks_pyarrow(offset, length, expected_values):
131
+ # GH 52795
132
+ pa = pytest.importorskip("pyarrow", "11.0.0")
133
+
134
+ arr = [3.3, None, 2.1]
135
+ table = pa.table({"arr": arr}).slice(offset, length)
136
+ exchange_df = table.__dataframe__()
137
+ result = from_dataframe(exchange_df)
138
+ expected = pd.DataFrame({"arr": expected_values})
139
+ tm.assert_frame_equal(result, expected)
140
+
141
+ # check round-trip
142
+ assert pa.Table.equals(pa.interchange.from_dataframe(result), table)
143
+
144
+
145
+ @pytest.mark.parametrize(
146
+ "data", [int_data, uint_data, float_data, bool_data, datetime_data]
147
+ )
148
+ def test_dataframe(data):
149
+ df = pd.DataFrame(data)
150
+
151
+ df2 = df.__dataframe__()
152
+
153
+ assert df2.num_columns() == NCOLS
154
+ assert df2.num_rows() == NROWS
155
+
156
+ assert list(df2.column_names()) == list(data.keys())
157
+
158
+ indices = (0, 2)
159
+ names = tuple(list(data.keys())[idx] for idx in indices)
160
+
161
+ result = from_dataframe(df2.select_columns(indices))
162
+ expected = from_dataframe(df2.select_columns_by_name(names))
163
+ tm.assert_frame_equal(result, expected)
164
+
165
+ assert isinstance(result.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list)
166
+ assert isinstance(expected.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list)
167
+
168
+
169
+ def test_missing_from_masked():
170
+ df = pd.DataFrame(
171
+ {
172
+ "x": np.array([1, 2, 3, 4, 0]),
173
+ "y": np.array([1.5, 2.5, 3.5, 4.5, 0]),
174
+ "z": np.array([True, False, True, True, True]),
175
+ }
176
+ )
177
+
178
+ df2 = df.__dataframe__()
179
+
180
+ rng = np.random.RandomState(42)
181
+ dict_null = {col: rng.randint(low=0, high=len(df)) for col in df.columns}
182
+ for col, num_nulls in dict_null.items():
183
+ null_idx = df.index[
184
+ rng.choice(np.arange(len(df)), size=num_nulls, replace=False)
185
+ ]
186
+ df.loc[null_idx, col] = None
187
+
188
+ df2 = df.__dataframe__()
189
+
190
+ assert df2.get_column_by_name("x").null_count == dict_null["x"]
191
+ assert df2.get_column_by_name("y").null_count == dict_null["y"]
192
+ assert df2.get_column_by_name("z").null_count == dict_null["z"]
193
+
194
+
195
+ @pytest.mark.parametrize(
196
+ "data",
197
+ [
198
+ {"x": [1.5, 2.5, 3.5], "y": [9.2, 10.5, 11.8]},
199
+ {"x": [1, 2, 0], "y": [9.2, 10.5, 11.8]},
200
+ {
201
+ "x": np.array([True, True, False]),
202
+ "y": np.array([1, 2, 0]),
203
+ "z": np.array([9.2, 10.5, 11.8]),
204
+ },
205
+ ],
206
+ )
207
+ def test_mixed_data(data):
208
+ df = pd.DataFrame(data)
209
+ df2 = df.__dataframe__()
210
+
211
+ for col_name in df.columns:
212
+ assert df2.get_column_by_name(col_name).null_count == 0
213
+
214
+
215
+ def test_mixed_missing():
216
+ df = pd.DataFrame(
217
+ {
218
+ "x": np.array([True, None, False, None, True]),
219
+ "y": np.array([None, 2, None, 1, 2]),
220
+ "z": np.array([9.2, 10.5, None, 11.8, None]),
221
+ }
222
+ )
223
+
224
+ df2 = df.__dataframe__()
225
+
226
+ for col_name in df.columns:
227
+ assert df2.get_column_by_name(col_name).null_count == 2
228
+
229
+
230
+ def test_string():
231
+ test_str_data = string_data["separator data"] + [""]
232
+ df = pd.DataFrame({"A": test_str_data})
233
+ col = df.__dataframe__().get_column_by_name("A")
234
+
235
+ assert col.size() == 6
236
+ assert col.null_count == 1
237
+ assert col.dtype[0] == DtypeKind.STRING
238
+ assert col.describe_null == (ColumnNullType.USE_BYTEMASK, 0)
239
+
240
+ df_sliced = df[1:]
241
+ col = df_sliced.__dataframe__().get_column_by_name("A")
242
+ assert col.size() == 5
243
+ assert col.null_count == 1
244
+ assert col.dtype[0] == DtypeKind.STRING
245
+ assert col.describe_null == (ColumnNullType.USE_BYTEMASK, 0)
246
+
247
+
248
+ def test_nonstring_object():
249
+ df = pd.DataFrame({"A": ["a", 10, 1.0, ()]})
250
+ col = df.__dataframe__().get_column_by_name("A")
251
+ with pytest.raises(NotImplementedError, match="not supported yet"):
252
+ col.dtype
253
+
254
+
255
+ def test_datetime():
256
+ df = pd.DataFrame({"A": [pd.Timestamp("2022-01-01"), pd.NaT]})
257
+ col = df.__dataframe__().get_column_by_name("A")
258
+
259
+ assert col.size() == 2
260
+ assert col.null_count == 1
261
+ assert col.dtype[0] == DtypeKind.DATETIME
262
+ assert col.describe_null == (ColumnNullType.USE_SENTINEL, iNaT)
263
+
264
+ tm.assert_frame_equal(df, from_dataframe(df.__dataframe__()))
265
+
266
+
267
+ @td.skip_if_np_lt("1.23")
268
+ def test_categorical_to_numpy_dlpack():
269
+ # https://github.com/pandas-dev/pandas/issues/48393
270
+ df = pd.DataFrame({"A": pd.Categorical(["a", "b", "a"])})
271
+ col = df.__dataframe__().get_column_by_name("A")
272
+ result = np.from_dlpack(col.get_buffers()["data"][0])
273
+ expected = np.array([0, 1, 0], dtype="int8")
274
+ tm.assert_numpy_array_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/test_spec_conformance.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A verbatim copy (vendored) of the spec tests.
3
+ Taken from https://github.com/data-apis/dataframe-api
4
+ """
5
+ import ctypes
6
+ import math
7
+
8
+ import pytest
9
+
10
+
11
+ @pytest.mark.parametrize(
12
+ "test_data",
13
+ [
14
+ {"a": ["foo", "bar"], "b": ["baz", "qux"]},
15
+ {"a": [1.5, 2.5, 3.5], "b": [9.2, 10.5, 11.8]},
16
+ {"A": [1, 2, 3, 4], "B": [1, 2, 3, 4]},
17
+ ],
18
+ ids=["str_data", "float_data", "int_data"],
19
+ )
20
+ def test_only_one_dtype(test_data, df_from_dict):
21
+ columns = list(test_data.keys())
22
+ df = df_from_dict(test_data)
23
+ dfX = df.__dataframe__()
24
+
25
+ column_size = len(test_data[columns[0]])
26
+ for column in columns:
27
+ null_count = dfX.get_column_by_name(column).null_count
28
+ assert null_count == 0
29
+ assert isinstance(null_count, int)
30
+ assert dfX.get_column_by_name(column).size() == column_size
31
+ assert dfX.get_column_by_name(column).offset == 0
32
+
33
+
34
+ def test_mixed_dtypes(df_from_dict):
35
+ df = df_from_dict(
36
+ {
37
+ "a": [1, 2, 3], # dtype kind INT = 0
38
+ "b": [3, 4, 5], # dtype kind INT = 0
39
+ "c": [1.5, 2.5, 3.5], # dtype kind FLOAT = 2
40
+ "d": [9, 10, 11], # dtype kind INT = 0
41
+ "e": [True, False, True], # dtype kind BOOLEAN = 20
42
+ "f": ["a", "", "c"], # dtype kind STRING = 21
43
+ }
44
+ )
45
+ dfX = df.__dataframe__()
46
+ # for meanings of dtype[0] see the spec; we cannot import the spec here as this
47
+ # file is expected to be vendored *anywhere*;
48
+ # values for dtype[0] are explained above
49
+ columns = {"a": 0, "b": 0, "c": 2, "d": 0, "e": 20, "f": 21}
50
+
51
+ for column, kind in columns.items():
52
+ colX = dfX.get_column_by_name(column)
53
+ assert colX.null_count == 0
54
+ assert isinstance(colX.null_count, int)
55
+ assert colX.size() == 3
56
+ assert colX.offset == 0
57
+
58
+ assert colX.dtype[0] == kind
59
+
60
+ assert dfX.get_column_by_name("c").dtype[1] == 64
61
+
62
+
63
+ def test_na_float(df_from_dict):
64
+ df = df_from_dict({"a": [1.0, math.nan, 2.0]})
65
+ dfX = df.__dataframe__()
66
+ colX = dfX.get_column_by_name("a")
67
+ assert colX.null_count == 1
68
+ assert isinstance(colX.null_count, int)
69
+
70
+
71
+ def test_noncategorical(df_from_dict):
72
+ df = df_from_dict({"a": [1, 2, 3]})
73
+ dfX = df.__dataframe__()
74
+ colX = dfX.get_column_by_name("a")
75
+ with pytest.raises(TypeError, match=".*categorical.*"):
76
+ colX.describe_categorical
77
+
78
+
79
+ def test_categorical(df_from_dict):
80
+ df = df_from_dict(
81
+ {"weekday": ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"]},
82
+ is_categorical=True,
83
+ )
84
+
85
+ colX = df.__dataframe__().get_column_by_name("weekday")
86
+ categorical = colX.describe_categorical
87
+ assert isinstance(categorical["is_ordered"], bool)
88
+ assert isinstance(categorical["is_dictionary"], bool)
89
+
90
+
91
+ def test_dataframe(df_from_dict):
92
+ df = df_from_dict(
93
+ {"x": [True, True, False], "y": [1, 2, 0], "z": [9.2, 10.5, 11.8]}
94
+ )
95
+ dfX = df.__dataframe__()
96
+
97
+ assert dfX.num_columns() == 3
98
+ assert dfX.num_rows() == 3
99
+ assert dfX.num_chunks() == 1
100
+ assert list(dfX.column_names()) == ["x", "y", "z"]
101
+ assert list(dfX.select_columns((0, 2)).column_names()) == list(
102
+ dfX.select_columns_by_name(("x", "z")).column_names()
103
+ )
104
+
105
+
106
+ @pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)])
107
+ def test_df_get_chunks(size, n_chunks, df_from_dict):
108
+ df = df_from_dict({"x": list(range(size))})
109
+ dfX = df.__dataframe__()
110
+ chunks = list(dfX.get_chunks(n_chunks))
111
+ assert len(chunks) == n_chunks
112
+ assert sum(chunk.num_rows() for chunk in chunks) == size
113
+
114
+
115
+ @pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)])
116
+ def test_column_get_chunks(size, n_chunks, df_from_dict):
117
+ df = df_from_dict({"x": list(range(size))})
118
+ dfX = df.__dataframe__()
119
+ chunks = list(dfX.get_column(0).get_chunks(n_chunks))
120
+ assert len(chunks) == n_chunks
121
+ assert sum(chunk.size() for chunk in chunks) == size
122
+
123
+
124
+ def test_get_columns(df_from_dict):
125
+ df = df_from_dict({"a": [0, 1], "b": [2.5, 3.5]})
126
+ dfX = df.__dataframe__()
127
+ for colX in dfX.get_columns():
128
+ assert colX.size() == 2
129
+ assert colX.num_chunks() == 1
130
+ # for meanings of dtype[0] see the spec; we cannot import the spec here as this
131
+ # file is expected to be vendored *anywhere*
132
+ assert dfX.get_column(0).dtype[0] == 0 # INT
133
+ assert dfX.get_column(1).dtype[0] == 2 # FLOAT
134
+
135
+
136
+ def test_buffer(df_from_dict):
137
+ arr = [0, 1, -1]
138
+ df = df_from_dict({"a": arr})
139
+ dfX = df.__dataframe__()
140
+ colX = dfX.get_column(0)
141
+ bufX = colX.get_buffers()
142
+
143
+ dataBuf, dataDtype = bufX["data"]
144
+
145
+ assert dataBuf.bufsize > 0
146
+ assert dataBuf.ptr != 0
147
+ device, _ = dataBuf.__dlpack_device__()
148
+
149
+ # for meanings of dtype[0] see the spec; we cannot import the spec here as this
150
+ # file is expected to be vendored *anywhere*
151
+ assert dataDtype[0] == 0 # INT
152
+
153
+ if device == 1: # CPU-only as we're going to directly read memory here
154
+ bitwidth = dataDtype[1]
155
+ ctype = {
156
+ 8: ctypes.c_int8,
157
+ 16: ctypes.c_int16,
158
+ 32: ctypes.c_int32,
159
+ 64: ctypes.c_int64,
160
+ }[bitwidth]
161
+
162
+ for idx, truth in enumerate(arr):
163
+ val = ctype.from_address(dataBuf.ptr + idx * (bitwidth // 8)).value
164
+ assert val == truth, f"Buffer at index {idx} mismatch"
videochat2/lib/python3.10/site-packages/pandas/tests/interchange/test_utils.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas.core.interchange.utils import dtype_to_arrow_c_fmt
6
+
7
+ # TODO: use ArrowSchema to get reference C-string.
8
+ # At the time, there is no way to access ArrowSchema holding a type format string
9
+ # from python. The only way to access it is to export the structure to a C-pointer,
10
+ # see DataType._export_to_c() method defined in
11
+ # https://github.com/apache/arrow/blob/master/python/pyarrow/types.pxi
12
+
13
+
14
+ @pytest.mark.parametrize(
15
+ "pandas_dtype, c_string",
16
+ [
17
+ (np.dtype("bool"), "b"),
18
+ (np.dtype("int8"), "c"),
19
+ (np.dtype("uint8"), "C"),
20
+ (np.dtype("int16"), "s"),
21
+ (np.dtype("uint16"), "S"),
22
+ (np.dtype("int32"), "i"),
23
+ (np.dtype("uint32"), "I"),
24
+ (np.dtype("int64"), "l"),
25
+ (np.dtype("uint64"), "L"),
26
+ (np.dtype("float16"), "e"),
27
+ (np.dtype("float32"), "f"),
28
+ (np.dtype("float64"), "g"),
29
+ (pd.Series(["a"]).dtype, "u"),
30
+ (
31
+ pd.Series([0]).astype("datetime64[ns]").dtype,
32
+ "tsn:",
33
+ ),
34
+ (pd.CategoricalDtype(["a"]), "l"),
35
+ (np.dtype("O"), "u"),
36
+ ],
37
+ )
38
+ def test_dtype_to_arrow_c_fmt(pandas_dtype, c_string): # PR01
39
+ """Test ``dtype_to_arrow_c_fmt`` utility function."""
40
+ assert dtype_to_arrow_c_fmt(pandas_dtype) == c_string
videochat2/lib/python3.10/site-packages/pandas/tests/test_algos.py ADDED
@@ -0,0 +1,2407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from itertools import permutations
3
+ import struct
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from pandas._libs import (
9
+ algos as libalgos,
10
+ hashtable as ht,
11
+ )
12
+ import pandas.util._test_decorators as td
13
+
14
+ from pandas.core.dtypes.common import (
15
+ is_bool_dtype,
16
+ is_complex_dtype,
17
+ is_float_dtype,
18
+ is_integer_dtype,
19
+ is_object_dtype,
20
+ )
21
+ from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
22
+
23
+ import pandas as pd
24
+ from pandas import (
25
+ Categorical,
26
+ CategoricalIndex,
27
+ DataFrame,
28
+ DatetimeIndex,
29
+ Index,
30
+ IntervalIndex,
31
+ MultiIndex,
32
+ NaT,
33
+ Period,
34
+ PeriodIndex,
35
+ Series,
36
+ Timedelta,
37
+ Timestamp,
38
+ date_range,
39
+ timedelta_range,
40
+ to_datetime,
41
+ to_timedelta,
42
+ )
43
+ import pandas._testing as tm
44
+ import pandas.core.algorithms as algos
45
+ from pandas.core.arrays import (
46
+ DatetimeArray,
47
+ TimedeltaArray,
48
+ )
49
+ import pandas.core.common as com
50
+
51
+
52
+ class TestFactorize:
53
+ @pytest.mark.parametrize("sort", [True, False])
54
+ def test_factorize(self, index_or_series_obj, sort):
55
+ obj = index_or_series_obj
56
+ result_codes, result_uniques = obj.factorize(sort=sort)
57
+
58
+ constructor = Index
59
+ if isinstance(obj, MultiIndex):
60
+ constructor = MultiIndex.from_tuples
61
+ expected_arr = obj.unique()
62
+ if expected_arr.dtype == np.float16:
63
+ expected_arr = expected_arr.astype(np.float32)
64
+ expected_uniques = constructor(expected_arr)
65
+ if (
66
+ isinstance(obj, Index)
67
+ and expected_uniques.dtype == bool
68
+ and obj.dtype == object
69
+ ):
70
+ expected_uniques = expected_uniques.astype(object)
71
+
72
+ if sort:
73
+ expected_uniques = expected_uniques.sort_values()
74
+
75
+ # construct an integer ndarray so that
76
+ # `expected_uniques.take(expected_codes)` is equal to `obj`
77
+ expected_uniques_list = list(expected_uniques)
78
+ expected_codes = [expected_uniques_list.index(val) for val in obj]
79
+ expected_codes = np.asarray(expected_codes, dtype=np.intp)
80
+
81
+ tm.assert_numpy_array_equal(result_codes, expected_codes)
82
+ tm.assert_index_equal(result_uniques, expected_uniques, exact=True)
83
+
84
+ def test_series_factorize_use_na_sentinel_false(self):
85
+ # GH#35667
86
+ values = np.array([1, 2, 1, np.nan])
87
+ ser = Series(values)
88
+ codes, uniques = ser.factorize(use_na_sentinel=False)
89
+
90
+ expected_codes = np.array([0, 1, 0, 2], dtype=np.intp)
91
+ expected_uniques = Index([1.0, 2.0, np.nan])
92
+
93
+ tm.assert_numpy_array_equal(codes, expected_codes)
94
+ tm.assert_index_equal(uniques, expected_uniques)
95
+
96
+ def test_basic(self):
97
+ codes, uniques = algos.factorize(["a", "b", "b", "a", "a", "c", "c", "c"])
98
+ tm.assert_numpy_array_equal(uniques, np.array(["a", "b", "c"], dtype=object))
99
+
100
+ codes, uniques = algos.factorize(
101
+ ["a", "b", "b", "a", "a", "c", "c", "c"], sort=True
102
+ )
103
+ exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
104
+ tm.assert_numpy_array_equal(codes, exp)
105
+ exp = np.array(["a", "b", "c"], dtype=object)
106
+ tm.assert_numpy_array_equal(uniques, exp)
107
+
108
+ arr = np.arange(5, dtype=np.intp)[::-1]
109
+
110
+ codes, uniques = algos.factorize(arr)
111
+ exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
112
+ tm.assert_numpy_array_equal(codes, exp)
113
+ exp = np.array([4, 3, 2, 1, 0], dtype=arr.dtype)
114
+ tm.assert_numpy_array_equal(uniques, exp)
115
+
116
+ codes, uniques = algos.factorize(arr, sort=True)
117
+ exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
118
+ tm.assert_numpy_array_equal(codes, exp)
119
+ exp = np.array([0, 1, 2, 3, 4], dtype=arr.dtype)
120
+ tm.assert_numpy_array_equal(uniques, exp)
121
+
122
+ arr = np.arange(5.0)[::-1]
123
+
124
+ codes, uniques = algos.factorize(arr)
125
+ exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
126
+ tm.assert_numpy_array_equal(codes, exp)
127
+ exp = np.array([4.0, 3.0, 2.0, 1.0, 0.0], dtype=arr.dtype)
128
+ tm.assert_numpy_array_equal(uniques, exp)
129
+
130
+ codes, uniques = algos.factorize(arr, sort=True)
131
+ exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
132
+ tm.assert_numpy_array_equal(codes, exp)
133
+ exp = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=arr.dtype)
134
+ tm.assert_numpy_array_equal(uniques, exp)
135
+
136
+ def test_mixed(self):
137
+ # doc example reshaping.rst
138
+ x = Series(["A", "A", np.nan, "B", 3.14, np.inf])
139
+ codes, uniques = algos.factorize(x)
140
+
141
+ exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
142
+ tm.assert_numpy_array_equal(codes, exp)
143
+ exp = Index(["A", "B", 3.14, np.inf])
144
+ tm.assert_index_equal(uniques, exp)
145
+
146
+ codes, uniques = algos.factorize(x, sort=True)
147
+ exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
148
+ tm.assert_numpy_array_equal(codes, exp)
149
+ exp = Index([3.14, np.inf, "A", "B"])
150
+ tm.assert_index_equal(uniques, exp)
151
+
152
+ def test_datelike(self):
153
+ # M8
154
+ v1 = Timestamp("20130101 09:00:00.00004")
155
+ v2 = Timestamp("20130101")
156
+ x = Series([v1, v1, v1, v2, v2, v1])
157
+ codes, uniques = algos.factorize(x)
158
+
159
+ exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
160
+ tm.assert_numpy_array_equal(codes, exp)
161
+ exp = DatetimeIndex([v1, v2])
162
+ tm.assert_index_equal(uniques, exp)
163
+
164
+ codes, uniques = algos.factorize(x, sort=True)
165
+ exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
166
+ tm.assert_numpy_array_equal(codes, exp)
167
+ exp = DatetimeIndex([v2, v1])
168
+ tm.assert_index_equal(uniques, exp)
169
+
170
+ # period
171
+ v1 = Period("201302", freq="M")
172
+ v2 = Period("201303", freq="M")
173
+ x = Series([v1, v1, v1, v2, v2, v1])
174
+
175
+ # periods are not 'sorted' as they are converted back into an index
176
+ codes, uniques = algos.factorize(x)
177
+ exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
178
+ tm.assert_numpy_array_equal(codes, exp)
179
+ tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))
180
+
181
+ codes, uniques = algos.factorize(x, sort=True)
182
+ exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
183
+ tm.assert_numpy_array_equal(codes, exp)
184
+ tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))
185
+
186
+ # GH 5986
187
+ v1 = to_timedelta("1 day 1 min")
188
+ v2 = to_timedelta("1 day")
189
+ x = Series([v1, v2, v1, v1, v2, v2, v1])
190
+ codes, uniques = algos.factorize(x)
191
+ exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
192
+ tm.assert_numpy_array_equal(codes, exp)
193
+ tm.assert_index_equal(uniques, to_timedelta([v1, v2]))
194
+
195
+ codes, uniques = algos.factorize(x, sort=True)
196
+ exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
197
+ tm.assert_numpy_array_equal(codes, exp)
198
+ tm.assert_index_equal(uniques, to_timedelta([v2, v1]))
199
+
200
+ def test_factorize_nan(self):
201
+ # nan should map to na_sentinel, not reverse_indexer[na_sentinel]
202
+ # rizer.factorize should not raise an exception if na_sentinel indexes
203
+ # outside of reverse_indexer
204
+ key = np.array([1, 2, 1, np.nan], dtype="O")
205
+ rizer = ht.ObjectFactorizer(len(key))
206
+ for na_sentinel in (-1, 20):
207
+ ids = rizer.factorize(key, na_sentinel=na_sentinel)
208
+ expected = np.array([0, 1, 0, na_sentinel], dtype=np.intp)
209
+ assert len(set(key)) == len(set(expected))
210
+ tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
211
+ tm.assert_numpy_array_equal(ids, expected)
212
+
213
+ def test_factorizer_with_mask(self):
214
+ # GH#49549
215
+ data = np.array([1, 2, 3, 1, 1, 0], dtype="int64")
216
+ mask = np.array([False, False, False, False, False, True])
217
+ rizer = ht.Int64Factorizer(len(data))
218
+ result = rizer.factorize(data, mask=mask)
219
+ expected = np.array([0, 1, 2, 0, 0, -1], dtype=np.intp)
220
+ tm.assert_numpy_array_equal(result, expected)
221
+ expected_uniques = np.array([1, 2, 3], dtype="int64")
222
+ tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques)
223
+
224
+ def test_factorizer_object_with_nan(self):
225
+ # GH#49549
226
+ data = np.array([1, 2, 3, 1, np.nan])
227
+ rizer = ht.ObjectFactorizer(len(data))
228
+ result = rizer.factorize(data.astype(object))
229
+ expected = np.array([0, 1, 2, 0, -1], dtype=np.intp)
230
+ tm.assert_numpy_array_equal(result, expected)
231
+ expected_uniques = np.array([1, 2, 3], dtype=object)
232
+ tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques)
233
+
234
+ @pytest.mark.parametrize(
235
+ "data, expected_codes, expected_uniques",
236
+ [
237
+ (
238
+ [(1, 1), (1, 2), (0, 0), (1, 2), "nonsense"],
239
+ [0, 1, 2, 1, 3],
240
+ [(1, 1), (1, 2), (0, 0), "nonsense"],
241
+ ),
242
+ (
243
+ [(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
244
+ [0, 1, 2, 1, 3],
245
+ [(1, 1), (1, 2), (0, 0), (1, 2, 3)],
246
+ ),
247
+ ([(1, 1), (1, 2), (0, 0), (1, 2)], [0, 1, 2, 1], [(1, 1), (1, 2), (0, 0)]),
248
+ ],
249
+ )
250
+ def test_factorize_tuple_list(self, data, expected_codes, expected_uniques):
251
+ # GH9454
252
+ codes, uniques = pd.factorize(data)
253
+
254
+ tm.assert_numpy_array_equal(codes, np.array(expected_codes, dtype=np.intp))
255
+
256
+ expected_uniques_array = com.asarray_tuplesafe(expected_uniques, dtype=object)
257
+ tm.assert_numpy_array_equal(uniques, expected_uniques_array)
258
+
259
+ def test_complex_sorting(self):
260
+ # gh 12666 - check no segfault
261
+ x17 = np.array([complex(i) for i in range(17)], dtype=object)
262
+
263
+ msg = "'[<>]' not supported between instances of .*"
264
+ with pytest.raises(TypeError, match=msg):
265
+ algos.factorize(x17[::-1], sort=True)
266
+
267
+ def test_numeric_dtype_factorize(self, any_real_numpy_dtype):
268
+ # GH41132
269
+ dtype = any_real_numpy_dtype
270
+ data = np.array([1, 2, 2, 1], dtype=dtype)
271
+ expected_codes = np.array([0, 1, 1, 0], dtype=np.intp)
272
+ expected_uniques = np.array([1, 2], dtype=dtype)
273
+
274
+ codes, uniques = algos.factorize(data)
275
+ tm.assert_numpy_array_equal(codes, expected_codes)
276
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
277
+
278
+ def test_float64_factorize(self, writable):
279
+ data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64)
280
+ data.setflags(write=writable)
281
+ expected_codes = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp)
282
+ expected_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64)
283
+
284
+ codes, uniques = algos.factorize(data)
285
+ tm.assert_numpy_array_equal(codes, expected_codes)
286
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
287
+
288
+ def test_uint64_factorize(self, writable):
289
+ data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64)
290
+ data.setflags(write=writable)
291
+ expected_codes = np.array([0, 1, 0], dtype=np.intp)
292
+ expected_uniques = np.array([2**64 - 1, 1], dtype=np.uint64)
293
+
294
+ codes, uniques = algos.factorize(data)
295
+ tm.assert_numpy_array_equal(codes, expected_codes)
296
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
297
+
298
+ def test_int64_factorize(self, writable):
299
+ data = np.array([2**63 - 1, -(2**63), 2**63 - 1], dtype=np.int64)
300
+ data.setflags(write=writable)
301
+ expected_codes = np.array([0, 1, 0], dtype=np.intp)
302
+ expected_uniques = np.array([2**63 - 1, -(2**63)], dtype=np.int64)
303
+
304
+ codes, uniques = algos.factorize(data)
305
+ tm.assert_numpy_array_equal(codes, expected_codes)
306
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
307
+
308
+ def test_string_factorize(self, writable):
309
+ data = np.array(["a", "c", "a", "b", "c"], dtype=object)
310
+ data.setflags(write=writable)
311
+ expected_codes = np.array([0, 1, 0, 2, 1], dtype=np.intp)
312
+ expected_uniques = np.array(["a", "c", "b"], dtype=object)
313
+
314
+ codes, uniques = algos.factorize(data)
315
+ tm.assert_numpy_array_equal(codes, expected_codes)
316
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
317
+
318
+ def test_object_factorize(self, writable):
319
+ data = np.array(["a", "c", None, np.nan, "a", "b", NaT, "c"], dtype=object)
320
+ data.setflags(write=writable)
321
+ expected_codes = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp)
322
+ expected_uniques = np.array(["a", "c", "b"], dtype=object)
323
+
324
+ codes, uniques = algos.factorize(data)
325
+ tm.assert_numpy_array_equal(codes, expected_codes)
326
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
327
+
328
+ def test_datetime64_factorize(self, writable):
329
+ # GH35650 Verify whether read-only datetime64 array can be factorized
330
+ data = np.array([np.datetime64("2020-01-01T00:00:00.000")], dtype="M8[ns]")
331
+ data.setflags(write=writable)
332
+ expected_codes = np.array([0], dtype=np.intp)
333
+ expected_uniques = np.array(
334
+ ["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]"
335
+ )
336
+
337
+ codes, uniques = pd.factorize(data)
338
+ tm.assert_numpy_array_equal(codes, expected_codes)
339
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
340
+
341
+ @pytest.mark.parametrize("sort", [True, False])
342
+ def test_factorize_rangeindex(self, sort):
343
+ # increasing -> sort doesn't matter
344
+ ri = pd.RangeIndex.from_range(range(10))
345
+ expected = np.arange(10, dtype=np.intp), ri
346
+
347
+ result = algos.factorize(ri, sort=sort)
348
+ tm.assert_numpy_array_equal(result[0], expected[0])
349
+ tm.assert_index_equal(result[1], expected[1], exact=True)
350
+
351
+ result = ri.factorize(sort=sort)
352
+ tm.assert_numpy_array_equal(result[0], expected[0])
353
+ tm.assert_index_equal(result[1], expected[1], exact=True)
354
+
355
+ @pytest.mark.parametrize("sort", [True, False])
356
+ def test_factorize_rangeindex_decreasing(self, sort):
357
+ # decreasing -> sort matters
358
+ ri = pd.RangeIndex.from_range(range(10))
359
+ expected = np.arange(10, dtype=np.intp), ri
360
+
361
+ ri2 = ri[::-1]
362
+ expected = expected[0], ri2
363
+ if sort:
364
+ expected = expected[0][::-1], expected[1][::-1]
365
+
366
+ result = algos.factorize(ri2, sort=sort)
367
+ tm.assert_numpy_array_equal(result[0], expected[0])
368
+ tm.assert_index_equal(result[1], expected[1], exact=True)
369
+
370
+ result = ri2.factorize(sort=sort)
371
+ tm.assert_numpy_array_equal(result[0], expected[0])
372
+ tm.assert_index_equal(result[1], expected[1], exact=True)
373
+
374
+ def test_deprecate_order(self):
375
+ # gh 19727 - check warning is raised for deprecated keyword, order.
376
+ # Test not valid once order keyword is removed.
377
+ data = np.array([2**63, 1, 2**63], dtype=np.uint64)
378
+ with pytest.raises(TypeError, match="got an unexpected keyword"):
379
+ algos.factorize(data, order=True)
380
+ with tm.assert_produces_warning(False):
381
+ algos.factorize(data)
382
+
383
+ @pytest.mark.parametrize(
384
+ "data",
385
+ [
386
+ np.array([0, 1, 0], dtype="u8"),
387
+ np.array([-(2**63), 1, -(2**63)], dtype="i8"),
388
+ np.array(["__nan__", "foo", "__nan__"], dtype="object"),
389
+ ],
390
+ )
391
+ def test_parametrized_factorize_na_value_default(self, data):
392
+ # arrays that include the NA default for that type, but isn't used.
393
+ codes, uniques = algos.factorize(data)
394
+ expected_uniques = data[[0, 1]]
395
+ expected_codes = np.array([0, 1, 0], dtype=np.intp)
396
+ tm.assert_numpy_array_equal(codes, expected_codes)
397
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
398
+
399
+ @pytest.mark.parametrize(
400
+ "data, na_value",
401
+ [
402
+ (np.array([0, 1, 0, 2], dtype="u8"), 0),
403
+ (np.array([1, 0, 1, 2], dtype="u8"), 1),
404
+ (np.array([-(2**63), 1, -(2**63), 0], dtype="i8"), -(2**63)),
405
+ (np.array([1, -(2**63), 1, 0], dtype="i8"), 1),
406
+ (np.array(["a", "", "a", "b"], dtype=object), "a"),
407
+ (np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()),
408
+ (np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)),
409
+ ],
410
+ )
411
+ def test_parametrized_factorize_na_value(self, data, na_value):
412
+ codes, uniques = algos.factorize_array(data, na_value=na_value)
413
+ expected_uniques = data[[1, 3]]
414
+ expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp)
415
+ tm.assert_numpy_array_equal(codes, expected_codes)
416
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
417
+
418
+ @pytest.mark.parametrize("sort", [True, False])
419
+ @pytest.mark.parametrize(
420
+ "data, uniques",
421
+ [
422
+ (
423
+ np.array(["b", "a", None, "b"], dtype=object),
424
+ np.array(["b", "a"], dtype=object),
425
+ ),
426
+ (
427
+ pd.array([2, 1, np.nan, 2], dtype="Int64"),
428
+ pd.array([2, 1], dtype="Int64"),
429
+ ),
430
+ ],
431
+ ids=["numpy_array", "extension_array"],
432
+ )
433
+ def test_factorize_use_na_sentinel(self, sort, data, uniques):
434
+ codes, uniques = algos.factorize(data, sort=sort, use_na_sentinel=True)
435
+ if sort:
436
+ expected_codes = np.array([1, 0, -1, 1], dtype=np.intp)
437
+ expected_uniques = algos.safe_sort(uniques)
438
+ else:
439
+ expected_codes = np.array([0, 1, -1, 0], dtype=np.intp)
440
+ expected_uniques = uniques
441
+ tm.assert_numpy_array_equal(codes, expected_codes)
442
+ if isinstance(data, np.ndarray):
443
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
444
+ else:
445
+ tm.assert_extension_array_equal(uniques, expected_uniques)
446
+
447
+ @pytest.mark.parametrize(
448
+ "data, expected_codes, expected_uniques",
449
+ [
450
+ (
451
+ ["a", None, "b", "a"],
452
+ np.array([0, 1, 2, 0], dtype=np.dtype("intp")),
453
+ np.array(["a", np.nan, "b"], dtype=object),
454
+ ),
455
+ (
456
+ ["a", np.nan, "b", "a"],
457
+ np.array([0, 1, 2, 0], dtype=np.dtype("intp")),
458
+ np.array(["a", np.nan, "b"], dtype=object),
459
+ ),
460
+ ],
461
+ )
462
+ def test_object_factorize_use_na_sentinel_false(
463
+ self, data, expected_codes, expected_uniques
464
+ ):
465
+ codes, uniques = algos.factorize(data, use_na_sentinel=False)
466
+
467
+ tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True)
468
+ tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True)
469
+
470
+ @pytest.mark.parametrize(
471
+ "data, expected_codes, expected_uniques",
472
+ [
473
+ (
474
+ [1, None, 1, 2],
475
+ np.array([0, 1, 0, 2], dtype=np.dtype("intp")),
476
+ np.array([1, np.nan, 2], dtype="O"),
477
+ ),
478
+ (
479
+ [1, np.nan, 1, 2],
480
+ np.array([0, 1, 0, 2], dtype=np.dtype("intp")),
481
+ np.array([1, np.nan, 2], dtype=np.float64),
482
+ ),
483
+ ],
484
+ )
485
+ def test_int_factorize_use_na_sentinel_false(
486
+ self, data, expected_codes, expected_uniques
487
+ ):
488
+ codes, uniques = algos.factorize(data, use_na_sentinel=False)
489
+
490
+ tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True)
491
+ tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True)
492
+
493
+ @pytest.mark.parametrize(
494
+ "data, expected_codes, expected_uniques",
495
+ [
496
+ (
497
+ Index(Categorical(["a", "a", "b"])),
498
+ np.array([0, 0, 1], dtype=np.intp),
499
+ CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"),
500
+ ),
501
+ (
502
+ Series(Categorical(["a", "a", "b"])),
503
+ np.array([0, 0, 1], dtype=np.intp),
504
+ CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"),
505
+ ),
506
+ (
507
+ Series(DatetimeIndex(["2017", "2017"], tz="US/Eastern")),
508
+ np.array([0, 0], dtype=np.intp),
509
+ DatetimeIndex(["2017"], tz="US/Eastern"),
510
+ ),
511
+ ],
512
+ )
513
+ def test_factorize_mixed_values(self, data, expected_codes, expected_uniques):
514
+ # GH 19721
515
+ codes, uniques = algos.factorize(data)
516
+ tm.assert_numpy_array_equal(codes, expected_codes)
517
+ tm.assert_index_equal(uniques, expected_uniques)
518
+
519
+
520
+ class TestUnique:
521
+ def test_ints(self):
522
+ arr = np.random.randint(0, 100, size=50)
523
+
524
+ result = algos.unique(arr)
525
+ assert isinstance(result, np.ndarray)
526
+
527
+ def test_objects(self):
528
+ arr = np.random.randint(0, 100, size=50).astype("O")
529
+
530
+ result = algos.unique(arr)
531
+ assert isinstance(result, np.ndarray)
532
+
533
+ def test_object_refcount_bug(self):
534
+ lst = ["A", "B", "C", "D", "E"]
535
+ for i in range(1000):
536
+ len(algos.unique(lst))
537
+
538
+ def test_on_index_object(self):
539
+ mindex = MultiIndex.from_arrays(
540
+ [np.arange(5).repeat(5), np.tile(np.arange(5), 5)]
541
+ )
542
+ expected = mindex.values
543
+ expected.sort()
544
+
545
+ mindex = mindex.repeat(2)
546
+
547
+ result = pd.unique(mindex)
548
+ result.sort()
549
+
550
+ tm.assert_almost_equal(result, expected)
551
+
552
+ def test_dtype_preservation(self, any_numpy_dtype):
553
+ # GH 15442
554
+ if any_numpy_dtype in (tm.BYTES_DTYPES + tm.STRING_DTYPES):
555
+ data = [1, 2, 2]
556
+ uniques = [1, 2]
557
+ elif is_integer_dtype(any_numpy_dtype):
558
+ data = [1, 2, 2]
559
+ uniques = [1, 2]
560
+ elif is_float_dtype(any_numpy_dtype):
561
+ data = [1, 2, 2]
562
+ uniques = [1.0, 2.0]
563
+ elif is_complex_dtype(any_numpy_dtype):
564
+ data = [complex(1, 0), complex(2, 0), complex(2, 0)]
565
+ uniques = [complex(1, 0), complex(2, 0)]
566
+ elif is_bool_dtype(any_numpy_dtype):
567
+ data = [True, True, False]
568
+ uniques = [True, False]
569
+ elif is_object_dtype(any_numpy_dtype):
570
+ data = ["A", "B", "B"]
571
+ uniques = ["A", "B"]
572
+ else:
573
+ # datetime64[ns]/M8[ns]/timedelta64[ns]/m8[ns] tested elsewhere
574
+ data = [1, 2, 2]
575
+ uniques = [1, 2]
576
+
577
+ result = Series(data, dtype=any_numpy_dtype).unique()
578
+ expected = np.array(uniques, dtype=any_numpy_dtype)
579
+
580
+ if any_numpy_dtype in tm.STRING_DTYPES:
581
+ expected = expected.astype(object)
582
+
583
+ if expected.dtype.kind in ["m", "M"]:
584
+ # We get TimedeltaArray/DatetimeArray
585
+ assert isinstance(result, (DatetimeArray, TimedeltaArray))
586
+ result = np.array(result)
587
+ tm.assert_numpy_array_equal(result, expected)
588
+
589
+ def test_datetime64_dtype_array_returned(self):
590
+ # GH 9431
591
+ expected = np.array(
592
+ [
593
+ "2015-01-03T00:00:00.000000000",
594
+ "2015-01-01T00:00:00.000000000",
595
+ ],
596
+ dtype="M8[ns]",
597
+ )
598
+
599
+ dt_index = to_datetime(
600
+ [
601
+ "2015-01-03T00:00:00.000000000",
602
+ "2015-01-01T00:00:00.000000000",
603
+ "2015-01-01T00:00:00.000000000",
604
+ ]
605
+ )
606
+ result = algos.unique(dt_index)
607
+ tm.assert_numpy_array_equal(result, expected)
608
+ assert result.dtype == expected.dtype
609
+
610
+ s = Series(dt_index)
611
+ result = algos.unique(s)
612
+ tm.assert_numpy_array_equal(result, expected)
613
+ assert result.dtype == expected.dtype
614
+
615
+ arr = s.values
616
+ result = algos.unique(arr)
617
+ tm.assert_numpy_array_equal(result, expected)
618
+ assert result.dtype == expected.dtype
619
+
620
+ def test_datetime_non_ns(self):
621
+ a = np.array(["2000", "2000", "2001"], dtype="datetime64[s]")
622
+ result = pd.unique(a)
623
+ expected = np.array(["2000", "2001"], dtype="datetime64[s]")
624
+ tm.assert_numpy_array_equal(result, expected)
625
+
626
+ def test_timedelta_non_ns(self):
627
+ a = np.array(["2000", "2000", "2001"], dtype="timedelta64[s]")
628
+ result = pd.unique(a)
629
+ expected = np.array([2000, 2001], dtype="timedelta64[s]")
630
+ tm.assert_numpy_array_equal(result, expected)
631
+
632
+ def test_timedelta64_dtype_array_returned(self):
633
+ # GH 9431
634
+ expected = np.array([31200, 45678, 10000], dtype="m8[ns]")
635
+
636
+ td_index = to_timedelta([31200, 45678, 31200, 10000, 45678])
637
+ result = algos.unique(td_index)
638
+ tm.assert_numpy_array_equal(result, expected)
639
+ assert result.dtype == expected.dtype
640
+
641
+ s = Series(td_index)
642
+ result = algos.unique(s)
643
+ tm.assert_numpy_array_equal(result, expected)
644
+ assert result.dtype == expected.dtype
645
+
646
+ arr = s.values
647
+ result = algos.unique(arr)
648
+ tm.assert_numpy_array_equal(result, expected)
649
+ assert result.dtype == expected.dtype
650
+
651
+ def test_uint64_overflow(self):
652
+ s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
653
+ exp = np.array([1, 2, 2**63], dtype=np.uint64)
654
+ tm.assert_numpy_array_equal(algos.unique(s), exp)
655
+
656
+ def test_nan_in_object_array(self):
657
+ duplicated_items = ["a", np.nan, "c", "c"]
658
+ result = pd.unique(duplicated_items)
659
+ expected = np.array(["a", np.nan, "c"], dtype=object)
660
+ tm.assert_numpy_array_equal(result, expected)
661
+
662
+ def test_categorical(self):
663
+ # we are expecting to return in the order
664
+ # of appearance
665
+ expected = Categorical(list("bac"))
666
+
667
+ # we are expecting to return in the order
668
+ # of the categories
669
+ expected_o = Categorical(list("bac"), categories=list("abc"), ordered=True)
670
+
671
+ # GH 15939
672
+ c = Categorical(list("baabc"))
673
+ result = c.unique()
674
+ tm.assert_categorical_equal(result, expected)
675
+
676
+ result = algos.unique(c)
677
+ tm.assert_categorical_equal(result, expected)
678
+
679
+ c = Categorical(list("baabc"), ordered=True)
680
+ result = c.unique()
681
+ tm.assert_categorical_equal(result, expected_o)
682
+
683
+ result = algos.unique(c)
684
+ tm.assert_categorical_equal(result, expected_o)
685
+
686
+ # Series of categorical dtype
687
+ s = Series(Categorical(list("baabc")), name="foo")
688
+ result = s.unique()
689
+ tm.assert_categorical_equal(result, expected)
690
+
691
+ result = pd.unique(s)
692
+ tm.assert_categorical_equal(result, expected)
693
+
694
+ # CI -> return CI
695
+ ci = CategoricalIndex(Categorical(list("baabc"), categories=list("abc")))
696
+ expected = CategoricalIndex(expected)
697
+ result = ci.unique()
698
+ tm.assert_index_equal(result, expected)
699
+
700
+ result = pd.unique(ci)
701
+ tm.assert_index_equal(result, expected)
702
+
703
+ def test_datetime64tz_aware(self):
704
+ # GH 15939
705
+
706
+ result = Series(
707
+ Index(
708
+ [
709
+ Timestamp("20160101", tz="US/Eastern"),
710
+ Timestamp("20160101", tz="US/Eastern"),
711
+ ]
712
+ )
713
+ ).unique()
714
+ expected = DatetimeArray._from_sequence(
715
+ np.array([Timestamp("2016-01-01 00:00:00-0500", tz="US/Eastern")])
716
+ )
717
+ tm.assert_extension_array_equal(result, expected)
718
+
719
+ result = Index(
720
+ [
721
+ Timestamp("20160101", tz="US/Eastern"),
722
+ Timestamp("20160101", tz="US/Eastern"),
723
+ ]
724
+ ).unique()
725
+ expected = DatetimeIndex(
726
+ ["2016-01-01 00:00:00"], dtype="datetime64[ns, US/Eastern]", freq=None
727
+ )
728
+ tm.assert_index_equal(result, expected)
729
+
730
+ result = pd.unique(
731
+ Series(
732
+ Index(
733
+ [
734
+ Timestamp("20160101", tz="US/Eastern"),
735
+ Timestamp("20160101", tz="US/Eastern"),
736
+ ]
737
+ )
738
+ )
739
+ )
740
+ expected = DatetimeArray._from_sequence(
741
+ np.array([Timestamp("2016-01-01", tz="US/Eastern")])
742
+ )
743
+ tm.assert_extension_array_equal(result, expected)
744
+
745
+ result = pd.unique(
746
+ Index(
747
+ [
748
+ Timestamp("20160101", tz="US/Eastern"),
749
+ Timestamp("20160101", tz="US/Eastern"),
750
+ ]
751
+ )
752
+ )
753
+ expected = DatetimeIndex(
754
+ ["2016-01-01 00:00:00"], dtype="datetime64[ns, US/Eastern]", freq=None
755
+ )
756
+ tm.assert_index_equal(result, expected)
757
+
758
+ def test_order_of_appearance(self):
759
+ # 9346
760
+ # light testing of guarantee of order of appearance
761
+ # these also are the doc-examples
762
+ result = pd.unique(Series([2, 1, 3, 3]))
763
+ tm.assert_numpy_array_equal(result, np.array([2, 1, 3], dtype="int64"))
764
+
765
+ result = pd.unique(Series([2] + [1] * 5))
766
+ tm.assert_numpy_array_equal(result, np.array([2, 1], dtype="int64"))
767
+
768
+ result = pd.unique(Series([Timestamp("20160101"), Timestamp("20160101")]))
769
+ expected = np.array(["2016-01-01T00:00:00.000000000"], dtype="datetime64[ns]")
770
+ tm.assert_numpy_array_equal(result, expected)
771
+
772
+ result = pd.unique(
773
+ Index(
774
+ [
775
+ Timestamp("20160101", tz="US/Eastern"),
776
+ Timestamp("20160101", tz="US/Eastern"),
777
+ ]
778
+ )
779
+ )
780
+ expected = DatetimeIndex(
781
+ ["2016-01-01 00:00:00"], dtype="datetime64[ns, US/Eastern]", freq=None
782
+ )
783
+ tm.assert_index_equal(result, expected)
784
+
785
+ result = pd.unique(list("aabc"))
786
+ expected = np.array(["a", "b", "c"], dtype=object)
787
+ tm.assert_numpy_array_equal(result, expected)
788
+
789
+ result = pd.unique(Series(Categorical(list("aabc"))))
790
+ expected = Categorical(list("abc"))
791
+ tm.assert_categorical_equal(result, expected)
792
+
793
+ @pytest.mark.parametrize(
794
+ "arg ,expected",
795
+ [
796
+ (("1", "1", "2"), np.array(["1", "2"], dtype=object)),
797
+ (("foo",), np.array(["foo"], dtype=object)),
798
+ ],
799
+ )
800
+ def test_tuple_with_strings(self, arg, expected):
801
+ # see GH 17108
802
+ result = pd.unique(arg)
803
+ tm.assert_numpy_array_equal(result, expected)
804
+
805
+ def test_obj_none_preservation(self):
806
+ # GH 20866
807
+ arr = np.array(["foo", None], dtype=object)
808
+ result = pd.unique(arr)
809
+ expected = np.array(["foo", None], dtype=object)
810
+
811
+ tm.assert_numpy_array_equal(result, expected, strict_nan=True)
812
+
813
+ def test_signed_zero(self):
814
+ # GH 21866
815
+ a = np.array([-0.0, 0.0])
816
+ result = pd.unique(a)
817
+ expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent
818
+ tm.assert_numpy_array_equal(result, expected)
819
+
820
+ def test_different_nans(self):
821
+ # GH 21866
822
+ # create different nans from bit-patterns:
823
+ NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
824
+ NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
825
+ assert NAN1 != NAN1
826
+ assert NAN2 != NAN2
827
+ a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent
828
+ result = pd.unique(a)
829
+ expected = np.array([np.nan])
830
+ tm.assert_numpy_array_equal(result, expected)
831
+
832
+ @pytest.mark.parametrize("el_type", [np.float64, object])
833
+ def test_first_nan_kept(self, el_type):
834
+ # GH 22295
835
+ # create different nans from bit-patterns:
836
+ bits_for_nan1 = 0xFFF8000000000001
837
+ bits_for_nan2 = 0x7FF8000000000001
838
+ NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0]
839
+ NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0]
840
+ assert NAN1 != NAN1
841
+ assert NAN2 != NAN2
842
+ a = np.array([NAN1, NAN2], dtype=el_type)
843
+ result = pd.unique(a)
844
+ assert result.size == 1
845
+ # use bit patterns to identify which nan was kept:
846
+ result_nan_bits = struct.unpack("=Q", struct.pack("d", result[0]))[0]
847
+ assert result_nan_bits == bits_for_nan1
848
+
849
+ def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixture2):
850
+ # GH 22295
851
+ if unique_nulls_fixture is unique_nulls_fixture2:
852
+ return # skip it, values not unique
853
+ a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object)
854
+ result = pd.unique(a)
855
+ assert result.size == 2
856
+ assert a[0] is unique_nulls_fixture
857
+ assert a[1] is unique_nulls_fixture2
858
+
859
+ def test_unique_masked(self, any_numeric_ea_dtype):
860
+ # GH#48019
861
+ ser = Series([1, pd.NA, 2] * 3, dtype=any_numeric_ea_dtype)
862
+ result = pd.unique(ser)
863
+ expected = pd.array([1, pd.NA, 2], dtype=any_numeric_ea_dtype)
864
+ tm.assert_extension_array_equal(result, expected)
865
+
866
+
867
+ def test_nunique_ints(index_or_series_or_array):
868
+ # GH#36327
869
+ values = index_or_series_or_array(np.random.randint(0, 20, 30))
870
+ result = algos.nunique_ints(values)
871
+ expected = len(algos.unique(values))
872
+ assert result == expected
873
+
874
+
875
+ class TestIsin:
876
+ def test_invalid(self):
877
+ msg = (
878
+ r"only list-like objects are allowed to be passed to isin\(\), "
879
+ r"you passed a \[int\]"
880
+ )
881
+ with pytest.raises(TypeError, match=msg):
882
+ algos.isin(1, 1)
883
+ with pytest.raises(TypeError, match=msg):
884
+ algos.isin(1, [1])
885
+ with pytest.raises(TypeError, match=msg):
886
+ algos.isin([1], 1)
887
+
888
+ def test_basic(self):
889
+ result = algos.isin([1, 2], [1])
890
+ expected = np.array([True, False])
891
+ tm.assert_numpy_array_equal(result, expected)
892
+
893
+ result = algos.isin(np.array([1, 2]), [1])
894
+ expected = np.array([True, False])
895
+ tm.assert_numpy_array_equal(result, expected)
896
+
897
+ result = algos.isin(Series([1, 2]), [1])
898
+ expected = np.array([True, False])
899
+ tm.assert_numpy_array_equal(result, expected)
900
+
901
+ result = algos.isin(Series([1, 2]), Series([1]))
902
+ expected = np.array([True, False])
903
+ tm.assert_numpy_array_equal(result, expected)
904
+
905
+ result = algos.isin(Series([1, 2]), {1})
906
+ expected = np.array([True, False])
907
+ tm.assert_numpy_array_equal(result, expected)
908
+
909
+ result = algos.isin(["a", "b"], ["a"])
910
+ expected = np.array([True, False])
911
+ tm.assert_numpy_array_equal(result, expected)
912
+
913
+ result = algos.isin(Series(["a", "b"]), Series(["a"]))
914
+ expected = np.array([True, False])
915
+ tm.assert_numpy_array_equal(result, expected)
916
+
917
+ result = algos.isin(Series(["a", "b"]), {"a"})
918
+ expected = np.array([True, False])
919
+ tm.assert_numpy_array_equal(result, expected)
920
+
921
+ result = algos.isin(["a", "b"], [1])
922
+ expected = np.array([False, False])
923
+ tm.assert_numpy_array_equal(result, expected)
924
+
925
+ def test_i8(self):
926
+ arr = date_range("20130101", periods=3).values
927
+ result = algos.isin(arr, [arr[0]])
928
+ expected = np.array([True, False, False])
929
+ tm.assert_numpy_array_equal(result, expected)
930
+
931
+ result = algos.isin(arr, arr[0:2])
932
+ expected = np.array([True, True, False])
933
+ tm.assert_numpy_array_equal(result, expected)
934
+
935
+ result = algos.isin(arr, set(arr[0:2]))
936
+ expected = np.array([True, True, False])
937
+ tm.assert_numpy_array_equal(result, expected)
938
+
939
+ arr = timedelta_range("1 day", periods=3).values
940
+ result = algos.isin(arr, [arr[0]])
941
+ expected = np.array([True, False, False])
942
+ tm.assert_numpy_array_equal(result, expected)
943
+
944
+ result = algos.isin(arr, arr[0:2])
945
+ expected = np.array([True, True, False])
946
+ tm.assert_numpy_array_equal(result, expected)
947
+
948
+ result = algos.isin(arr, set(arr[0:2]))
949
+ expected = np.array([True, True, False])
950
+ tm.assert_numpy_array_equal(result, expected)
951
+
952
+ @pytest.mark.parametrize("dtype1", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"])
953
+ @pytest.mark.parametrize("dtype", ["i8", "f8", "u8"])
954
+ def test_isin_datetimelike_values_numeric_comps(self, dtype, dtype1):
955
+ # Anything but object and we get all-False shortcut
956
+
957
+ dta = date_range("2013-01-01", periods=3)._values
958
+ if dtype1 == "period[D]":
959
+ # TODO: fix Series.view to get this on its own
960
+ arr = dta.to_period("D")
961
+ elif dtype1 == "M8[ns, UTC]":
962
+ # TODO: fix Series.view to get this on its own
963
+ arr = dta.tz_localize("UTC")
964
+ else:
965
+ arr = Series(dta.view("i8")).view(dtype1)._values
966
+
967
+ comps = arr.view("i8").astype(dtype)
968
+
969
+ result = algos.isin(comps, arr)
970
+ expected = np.zeros(comps.shape, dtype=bool)
971
+ tm.assert_numpy_array_equal(result, expected)
972
+
973
+ def test_large(self):
974
+ s = date_range("20000101", periods=2000000, freq="s").values
975
+ result = algos.isin(s, s[0:2])
976
+ expected = np.zeros(len(s), dtype=bool)
977
+ expected[0] = True
978
+ expected[1] = True
979
+ tm.assert_numpy_array_equal(result, expected)
980
+
981
+ def test_categorical_from_codes(self):
982
+ # GH 16639
983
+ vals = np.array([0, 1, 2, 0])
984
+ cats = ["a", "b", "c"]
985
+ Sd = Series(Categorical([1]).from_codes(vals, cats))
986
+ St = Series(Categorical([1]).from_codes(np.array([0, 1]), cats))
987
+ expected = np.array([True, True, False, True])
988
+ result = algos.isin(Sd, St)
989
+ tm.assert_numpy_array_equal(expected, result)
990
+
991
+ def test_categorical_isin(self):
992
+ vals = np.array([0, 1, 2, 0])
993
+ cats = ["a", "b", "c"]
994
+ cat = Categorical([1]).from_codes(vals, cats)
995
+ other = Categorical([1]).from_codes(np.array([0, 1]), cats)
996
+
997
+ expected = np.array([True, True, False, True])
998
+ result = algos.isin(cat, other)
999
+ tm.assert_numpy_array_equal(expected, result)
1000
+
1001
+ def test_same_nan_is_in(self):
1002
+ # GH 22160
1003
+ # nan is special, because from " a is b" doesn't follow "a == b"
1004
+ # at least, isin() should follow python's "np.nan in [nan] == True"
1005
+ # casting to -> np.float64 -> another float-object somewhere on
1006
+ # the way could lead jepardize this behavior
1007
+ comps = [np.nan] # could be casted to float64
1008
+ values = [np.nan]
1009
+ expected = np.array([True])
1010
+ result = algos.isin(comps, values)
1011
+ tm.assert_numpy_array_equal(expected, result)
1012
+
1013
+ def test_same_nan_is_in_large(self):
1014
+ # https://github.com/pandas-dev/pandas/issues/22205
1015
+ s = np.tile(1.0, 1_000_001)
1016
+ s[0] = np.nan
1017
+ result = algos.isin(s, [np.nan, 1])
1018
+ expected = np.ones(len(s), dtype=bool)
1019
+ tm.assert_numpy_array_equal(result, expected)
1020
+
1021
+ def test_same_nan_is_in_large_series(self):
1022
+ # https://github.com/pandas-dev/pandas/issues/22205
1023
+ s = np.tile(1.0, 1_000_001)
1024
+ series = Series(s)
1025
+ s[0] = np.nan
1026
+ result = series.isin([np.nan, 1])
1027
+ expected = Series(np.ones(len(s), dtype=bool))
1028
+ tm.assert_series_equal(result, expected)
1029
+
1030
+ def test_same_object_is_in(self):
1031
+ # GH 22160
1032
+ # there could be special treatment for nans
1033
+ # the user however could define a custom class
1034
+ # with similar behavior, then we at least should
1035
+ # fall back to usual python's behavior: "a in [a] == True"
1036
+ class LikeNan:
1037
+ def __eq__(self, other) -> bool:
1038
+ return False
1039
+
1040
+ def __hash__(self):
1041
+ return 0
1042
+
1043
+ a, b = LikeNan(), LikeNan()
1044
+ # same object -> True
1045
+ tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True]))
1046
+ # different objects -> False
1047
+ tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False]))
1048
+
1049
+ def test_different_nans(self):
1050
+ # GH 22160
1051
+ # all nans are handled as equivalent
1052
+
1053
+ comps = [float("nan")]
1054
+ values = [float("nan")]
1055
+ assert comps[0] is not values[0] # different nan-objects
1056
+
1057
+ # as list of python-objects:
1058
+ result = algos.isin(comps, values)
1059
+ tm.assert_numpy_array_equal(np.array([True]), result)
1060
+
1061
+ # as object-array:
1062
+ result = algos.isin(
1063
+ np.asarray(comps, dtype=object), np.asarray(values, dtype=object)
1064
+ )
1065
+ tm.assert_numpy_array_equal(np.array([True]), result)
1066
+
1067
+ # as float64-array:
1068
+ result = algos.isin(
1069
+ np.asarray(comps, dtype=np.float64), np.asarray(values, dtype=np.float64)
1070
+ )
1071
+ tm.assert_numpy_array_equal(np.array([True]), result)
1072
+
1073
+ def test_no_cast(self):
1074
+ # GH 22160
1075
+ # ensure 42 is not casted to a string
1076
+ comps = ["ss", 42]
1077
+ values = ["42"]
1078
+ expected = np.array([False, False])
1079
+ result = algos.isin(comps, values)
1080
+ tm.assert_numpy_array_equal(expected, result)
1081
+
1082
+ @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
1083
+ def test_empty(self, empty):
1084
+ # see gh-16991
1085
+ vals = Index(["a", "b"])
1086
+ expected = np.array([False, False])
1087
+
1088
+ result = algos.isin(vals, empty)
1089
+ tm.assert_numpy_array_equal(expected, result)
1090
+
1091
+ def test_different_nan_objects(self):
1092
+ # GH 22119
1093
+ comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=object)
1094
+ vals = np.array([float("nan")], dtype=object)
1095
+ expected = np.array([False, False, True])
1096
+ result = algos.isin(comps, vals)
1097
+ tm.assert_numpy_array_equal(expected, result)
1098
+
1099
+ def test_different_nans_as_float64(self):
1100
+ # GH 21866
1101
+ # create different nans from bit-patterns,
1102
+ # these nans will land in different buckets in the hash-table
1103
+ # if no special care is taken
1104
+ NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
1105
+ NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
1106
+ assert NAN1 != NAN1
1107
+ assert NAN2 != NAN2
1108
+
1109
+ # check that NAN1 and NAN2 are equivalent:
1110
+ arr = np.array([NAN1, NAN2], dtype=np.float64)
1111
+ lookup1 = np.array([NAN1], dtype=np.float64)
1112
+ result = algos.isin(arr, lookup1)
1113
+ expected = np.array([True, True])
1114
+ tm.assert_numpy_array_equal(result, expected)
1115
+
1116
+ lookup2 = np.array([NAN2], dtype=np.float64)
1117
+ result = algos.isin(arr, lookup2)
1118
+ expected = np.array([True, True])
1119
+ tm.assert_numpy_array_equal(result, expected)
1120
+
1121
+ def test_isin_int_df_string_search(self):
1122
+ """Comparing df with int`s (1,2) with a string at isin() ("1")
1123
+ -> should not match values because int 1 is not equal str 1"""
1124
+ df = DataFrame({"values": [1, 2]})
1125
+ result = df.isin(["1"])
1126
+ expected_false = DataFrame({"values": [False, False]})
1127
+ tm.assert_frame_equal(result, expected_false)
1128
+
1129
+ def test_isin_nan_df_string_search(self):
1130
+ """Comparing df with nan value (np.nan,2) with a string at isin() ("NaN")
1131
+ -> should not match values because np.nan is not equal str NaN"""
1132
+ df = DataFrame({"values": [np.nan, 2]})
1133
+ result = df.isin(["NaN"])
1134
+ expected_false = DataFrame({"values": [False, False]})
1135
+ tm.assert_frame_equal(result, expected_false)
1136
+
1137
+ def test_isin_float_df_string_search(self):
1138
+ """Comparing df with floats (1.4245,2.32441) with a string at isin() ("1.4245")
1139
+ -> should not match values because float 1.4245 is not equal str 1.4245"""
1140
+ df = DataFrame({"values": [1.4245, 2.32441]})
1141
+ result = df.isin(["1.4245"])
1142
+ expected_false = DataFrame({"values": [False, False]})
1143
+ tm.assert_frame_equal(result, expected_false)
1144
+
1145
+ def test_isin_unsigned_dtype(self):
1146
+ # GH#46485
1147
+ ser = Series([1378774140726870442], dtype=np.uint64)
1148
+ result = ser.isin([1378774140726870528])
1149
+ expected = Series(False)
1150
+ tm.assert_series_equal(result, expected)
1151
+
1152
+
1153
+ class TestValueCounts:
1154
+ def test_value_counts(self):
1155
+ np.random.seed(1234)
1156
+ from pandas.core.reshape.tile import cut
1157
+
1158
+ arr = np.random.randn(4)
1159
+ factor = cut(arr, 4)
1160
+
1161
+ # assert isinstance(factor, n)
1162
+ result = algos.value_counts(factor)
1163
+ breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
1164
+ index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
1165
+ expected = Series([1, 1, 1, 1], index=index, name="count")
1166
+ tm.assert_series_equal(result.sort_index(), expected.sort_index())
1167
+
1168
+ def test_value_counts_bins(self):
1169
+ s = [1, 2, 3, 4]
1170
+ result = algos.value_counts(s, bins=1)
1171
+ expected = Series(
1172
+ [4], index=IntervalIndex.from_tuples([(0.996, 4.0)]), name="count"
1173
+ )
1174
+ tm.assert_series_equal(result, expected)
1175
+
1176
+ result = algos.value_counts(s, bins=2, sort=False)
1177
+ expected = Series(
1178
+ [2, 2],
1179
+ index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)]),
1180
+ name="count",
1181
+ )
1182
+ tm.assert_series_equal(result, expected)
1183
+
1184
+ def test_value_counts_dtypes(self):
1185
+ result = algos.value_counts([1, 1.0])
1186
+ assert len(result) == 1
1187
+
1188
+ result = algos.value_counts([1, 1.0], bins=1)
1189
+ assert len(result) == 1
1190
+
1191
+ result = algos.value_counts(Series([1, 1.0, "1"])) # object
1192
+ assert len(result) == 2
1193
+
1194
+ msg = "bins argument only works with numeric data"
1195
+ with pytest.raises(TypeError, match=msg):
1196
+ algos.value_counts(["1", 1], bins=1)
1197
+
1198
+ def test_value_counts_nat(self):
1199
+ td = Series([np.timedelta64(10000), NaT], dtype="timedelta64[ns]")
1200
+ dt = to_datetime(["NaT", "2014-01-01"])
1201
+
1202
+ for s in [td, dt]:
1203
+ vc = algos.value_counts(s)
1204
+ vc_with_na = algos.value_counts(s, dropna=False)
1205
+ assert len(vc) == 1
1206
+ assert len(vc_with_na) == 2
1207
+
1208
+ exp_dt = Series({Timestamp("2014-01-01 00:00:00"): 1}, name="count")
1209
+ tm.assert_series_equal(algos.value_counts(dt), exp_dt)
1210
+ # TODO same for (timedelta)
1211
+
1212
+ def test_value_counts_datetime_outofbounds(self):
1213
+ # GH 13663
1214
+ s = Series(
1215
+ [
1216
+ datetime(3000, 1, 1),
1217
+ datetime(5000, 1, 1),
1218
+ datetime(5000, 1, 1),
1219
+ datetime(6000, 1, 1),
1220
+ datetime(3000, 1, 1),
1221
+ datetime(3000, 1, 1),
1222
+ ]
1223
+ )
1224
+ res = s.value_counts()
1225
+
1226
+ exp_index = Index(
1227
+ [datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1)],
1228
+ dtype=object,
1229
+ )
1230
+ exp = Series([3, 2, 1], index=exp_index, name="count")
1231
+ tm.assert_series_equal(res, exp)
1232
+
1233
+ # GH 12424
1234
+ res = to_datetime(Series(["2362-01-01", np.nan]), errors="ignore")
1235
+ exp = Series(["2362-01-01", np.nan], dtype=object)
1236
+ tm.assert_series_equal(res, exp)
1237
+
1238
+ def test_categorical(self):
1239
+ s = Series(Categorical(list("aaabbc")))
1240
+ result = s.value_counts()
1241
+ expected = Series(
1242
+ [3, 2, 1], index=CategoricalIndex(["a", "b", "c"]), name="count"
1243
+ )
1244
+
1245
+ tm.assert_series_equal(result, expected, check_index_type=True)
1246
+
1247
+ # preserve order?
1248
+ s = s.cat.as_ordered()
1249
+ result = s.value_counts()
1250
+ expected.index = expected.index.as_ordered()
1251
+ tm.assert_series_equal(result, expected, check_index_type=True)
1252
+
1253
+ def test_categorical_nans(self):
1254
+ s = Series(Categorical(list("aaaaabbbcc"))) # 4,3,2,1 (nan)
1255
+ s.iloc[1] = np.nan
1256
+ result = s.value_counts()
1257
+ expected = Series(
1258
+ [4, 3, 2],
1259
+ index=CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c"]),
1260
+ name="count",
1261
+ )
1262
+ tm.assert_series_equal(result, expected, check_index_type=True)
1263
+ result = s.value_counts(dropna=False)
1264
+ expected = Series(
1265
+ [4, 3, 2, 1], index=CategoricalIndex(["a", "b", "c", np.nan]), name="count"
1266
+ )
1267
+ tm.assert_series_equal(result, expected, check_index_type=True)
1268
+
1269
+ # out of order
1270
+ s = Series(
1271
+ Categorical(list("aaaaabbbcc"), ordered=True, categories=["b", "a", "c"])
1272
+ )
1273
+ s.iloc[1] = np.nan
1274
+ result = s.value_counts()
1275
+ expected = Series(
1276
+ [4, 3, 2],
1277
+ index=CategoricalIndex(
1278
+ ["a", "b", "c"],
1279
+ categories=["b", "a", "c"],
1280
+ ordered=True,
1281
+ ),
1282
+ name="count",
1283
+ )
1284
+ tm.assert_series_equal(result, expected, check_index_type=True)
1285
+
1286
+ result = s.value_counts(dropna=False)
1287
+ expected = Series(
1288
+ [4, 3, 2, 1],
1289
+ index=CategoricalIndex(
1290
+ ["a", "b", "c", np.nan], categories=["b", "a", "c"], ordered=True
1291
+ ),
1292
+ name="count",
1293
+ )
1294
+ tm.assert_series_equal(result, expected, check_index_type=True)
1295
+
1296
+ def test_categorical_zeroes(self):
1297
+ # keep the `d` category with 0
1298
+ s = Series(Categorical(list("bbbaac"), categories=list("abcd"), ordered=True))
1299
+ result = s.value_counts()
1300
+ expected = Series(
1301
+ [3, 2, 1, 0],
1302
+ index=Categorical(
1303
+ ["b", "a", "c", "d"], categories=list("abcd"), ordered=True
1304
+ ),
1305
+ name="count",
1306
+ )
1307
+ tm.assert_series_equal(result, expected, check_index_type=True)
1308
+
1309
+ def test_dropna(self):
1310
+ # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
1311
+
1312
+ tm.assert_series_equal(
1313
+ Series([True, True, False]).value_counts(dropna=True),
1314
+ Series([2, 1], index=[True, False], name="count"),
1315
+ )
1316
+ tm.assert_series_equal(
1317
+ Series([True, True, False]).value_counts(dropna=False),
1318
+ Series([2, 1], index=[True, False], name="count"),
1319
+ )
1320
+
1321
+ tm.assert_series_equal(
1322
+ Series([True] * 3 + [False] * 2 + [None] * 5).value_counts(dropna=True),
1323
+ Series([3, 2], index=Index([True, False], dtype=object), name="count"),
1324
+ )
1325
+ tm.assert_series_equal(
1326
+ Series([True] * 5 + [False] * 3 + [None] * 2).value_counts(dropna=False),
1327
+ Series([5, 3, 2], index=[True, False, np.nan], name="count"),
1328
+ )
1329
+ tm.assert_series_equal(
1330
+ Series([10.3, 5.0, 5.0]).value_counts(dropna=True),
1331
+ Series([2, 1], index=[5.0, 10.3], name="count"),
1332
+ )
1333
+ tm.assert_series_equal(
1334
+ Series([10.3, 5.0, 5.0]).value_counts(dropna=False),
1335
+ Series([2, 1], index=[5.0, 10.3], name="count"),
1336
+ )
1337
+
1338
+ tm.assert_series_equal(
1339
+ Series([10.3, 5.0, 5.0, None]).value_counts(dropna=True),
1340
+ Series([2, 1], index=[5.0, 10.3], name="count"),
1341
+ )
1342
+
1343
+ result = Series([10.3, 10.3, 5.0, 5.0, 5.0, None]).value_counts(dropna=False)
1344
+ expected = Series([3, 2, 1], index=[5.0, 10.3, np.nan], name="count")
1345
+ tm.assert_series_equal(result, expected)
1346
+
1347
+ @pytest.mark.parametrize("dtype", (np.float64, object, "M8[ns]"))
1348
+ def test_value_counts_normalized(self, dtype):
1349
+ # GH12558
1350
+ s = Series([1] * 2 + [2] * 3 + [np.nan] * 5)
1351
+ s_typed = s.astype(dtype)
1352
+ result = s_typed.value_counts(normalize=True, dropna=False)
1353
+ expected = Series(
1354
+ [0.5, 0.3, 0.2],
1355
+ index=Series([np.nan, 2.0, 1.0], dtype=dtype),
1356
+ name="proportion",
1357
+ )
1358
+ tm.assert_series_equal(result, expected)
1359
+
1360
+ result = s_typed.value_counts(normalize=True, dropna=True)
1361
+ expected = Series(
1362
+ [0.6, 0.4], index=Series([2.0, 1.0], dtype=dtype), name="proportion"
1363
+ )
1364
+ tm.assert_series_equal(result, expected)
1365
+
1366
+ def test_value_counts_uint64(self):
1367
+ arr = np.array([2**63], dtype=np.uint64)
1368
+ expected = Series([1], index=[2**63], name="count")
1369
+ result = algos.value_counts(arr)
1370
+
1371
+ tm.assert_series_equal(result, expected)
1372
+
1373
+ arr = np.array([-1, 2**63], dtype=object)
1374
+ expected = Series([1, 1], index=[-1, 2**63], name="count")
1375
+ result = algos.value_counts(arr)
1376
+
1377
+ tm.assert_series_equal(result, expected)
1378
+
1379
+
1380
+ class TestDuplicated:
1381
+ def test_duplicated_with_nas(self):
1382
+ keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
1383
+
1384
+ result = algos.duplicated(keys)
1385
+ expected = np.array([False, False, False, True, False, True])
1386
+ tm.assert_numpy_array_equal(result, expected)
1387
+
1388
+ result = algos.duplicated(keys, keep="first")
1389
+ expected = np.array([False, False, False, True, False, True])
1390
+ tm.assert_numpy_array_equal(result, expected)
1391
+
1392
+ result = algos.duplicated(keys, keep="last")
1393
+ expected = np.array([True, False, True, False, False, False])
1394
+ tm.assert_numpy_array_equal(result, expected)
1395
+
1396
+ result = algos.duplicated(keys, keep=False)
1397
+ expected = np.array([True, False, True, True, False, True])
1398
+ tm.assert_numpy_array_equal(result, expected)
1399
+
1400
+ keys = np.empty(8, dtype=object)
1401
+ for i, t in enumerate(
1402
+ zip([0, 0, np.nan, np.nan] * 2, [0, np.nan, 0, np.nan] * 2)
1403
+ ):
1404
+ keys[i] = t
1405
+
1406
+ result = algos.duplicated(keys)
1407
+ falses = [False] * 4
1408
+ trues = [True] * 4
1409
+ expected = np.array(falses + trues)
1410
+ tm.assert_numpy_array_equal(result, expected)
1411
+
1412
+ result = algos.duplicated(keys, keep="last")
1413
+ expected = np.array(trues + falses)
1414
+ tm.assert_numpy_array_equal(result, expected)
1415
+
1416
+ result = algos.duplicated(keys, keep=False)
1417
+ expected = np.array(trues + trues)
1418
+ tm.assert_numpy_array_equal(result, expected)
1419
+
1420
+ @pytest.mark.parametrize(
1421
+ "case",
1422
+ [
1423
+ np.array([1, 2, 1, 5, 3, 2, 4, 1, 5, 6]),
1424
+ np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]),
1425
+ np.array(
1426
+ [
1427
+ 1 + 1j,
1428
+ 2 + 2j,
1429
+ 1 + 1j,
1430
+ 5 + 5j,
1431
+ 3 + 3j,
1432
+ 2 + 2j,
1433
+ 4 + 4j,
1434
+ 1 + 1j,
1435
+ 5 + 5j,
1436
+ 6 + 6j,
1437
+ ]
1438
+ ),
1439
+ np.array(["a", "b", "a", "e", "c", "b", "d", "a", "e", "f"], dtype=object),
1440
+ np.array(
1441
+ [1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], dtype=np.uint64
1442
+ ),
1443
+ ],
1444
+ )
1445
+ def test_numeric_object_likes(self, case):
1446
+ exp_first = np.array(
1447
+ [False, False, True, False, False, True, False, True, True, False]
1448
+ )
1449
+ exp_last = np.array(
1450
+ [True, True, True, True, False, False, False, False, False, False]
1451
+ )
1452
+ exp_false = exp_first | exp_last
1453
+
1454
+ res_first = algos.duplicated(case, keep="first")
1455
+ tm.assert_numpy_array_equal(res_first, exp_first)
1456
+
1457
+ res_last = algos.duplicated(case, keep="last")
1458
+ tm.assert_numpy_array_equal(res_last, exp_last)
1459
+
1460
+ res_false = algos.duplicated(case, keep=False)
1461
+ tm.assert_numpy_array_equal(res_false, exp_false)
1462
+
1463
+ # index
1464
+ for idx in [Index(case), Index(case, dtype="category")]:
1465
+ res_first = idx.duplicated(keep="first")
1466
+ tm.assert_numpy_array_equal(res_first, exp_first)
1467
+
1468
+ res_last = idx.duplicated(keep="last")
1469
+ tm.assert_numpy_array_equal(res_last, exp_last)
1470
+
1471
+ res_false = idx.duplicated(keep=False)
1472
+ tm.assert_numpy_array_equal(res_false, exp_false)
1473
+
1474
+ # series
1475
+ for s in [Series(case), Series(case, dtype="category")]:
1476
+ res_first = s.duplicated(keep="first")
1477
+ tm.assert_series_equal(res_first, Series(exp_first))
1478
+
1479
+ res_last = s.duplicated(keep="last")
1480
+ tm.assert_series_equal(res_last, Series(exp_last))
1481
+
1482
+ res_false = s.duplicated(keep=False)
1483
+ tm.assert_series_equal(res_false, Series(exp_false))
1484
+
1485
+ def test_datetime_likes(self):
1486
+ dt = [
1487
+ "2011-01-01",
1488
+ "2011-01-02",
1489
+ "2011-01-01",
1490
+ "NaT",
1491
+ "2011-01-03",
1492
+ "2011-01-02",
1493
+ "2011-01-04",
1494
+ "2011-01-01",
1495
+ "NaT",
1496
+ "2011-01-06",
1497
+ ]
1498
+ td = [
1499
+ "1 days",
1500
+ "2 days",
1501
+ "1 days",
1502
+ "NaT",
1503
+ "3 days",
1504
+ "2 days",
1505
+ "4 days",
1506
+ "1 days",
1507
+ "NaT",
1508
+ "6 days",
1509
+ ]
1510
+
1511
+ cases = [
1512
+ np.array([Timestamp(d) for d in dt]),
1513
+ np.array([Timestamp(d, tz="US/Eastern") for d in dt]),
1514
+ np.array([Period(d, freq="D") for d in dt]),
1515
+ np.array([np.datetime64(d) for d in dt]),
1516
+ np.array([Timedelta(d) for d in td]),
1517
+ ]
1518
+
1519
+ exp_first = np.array(
1520
+ [False, False, True, False, False, True, False, True, True, False]
1521
+ )
1522
+ exp_last = np.array(
1523
+ [True, True, True, True, False, False, False, False, False, False]
1524
+ )
1525
+ exp_false = exp_first | exp_last
1526
+
1527
+ for case in cases:
1528
+ res_first = algos.duplicated(case, keep="first")
1529
+ tm.assert_numpy_array_equal(res_first, exp_first)
1530
+
1531
+ res_last = algos.duplicated(case, keep="last")
1532
+ tm.assert_numpy_array_equal(res_last, exp_last)
1533
+
1534
+ res_false = algos.duplicated(case, keep=False)
1535
+ tm.assert_numpy_array_equal(res_false, exp_false)
1536
+
1537
+ # index
1538
+ for idx in [
1539
+ Index(case),
1540
+ Index(case, dtype="category"),
1541
+ Index(case, dtype=object),
1542
+ ]:
1543
+ res_first = idx.duplicated(keep="first")
1544
+ tm.assert_numpy_array_equal(res_first, exp_first)
1545
+
1546
+ res_last = idx.duplicated(keep="last")
1547
+ tm.assert_numpy_array_equal(res_last, exp_last)
1548
+
1549
+ res_false = idx.duplicated(keep=False)
1550
+ tm.assert_numpy_array_equal(res_false, exp_false)
1551
+
1552
+ # series
1553
+ for s in [
1554
+ Series(case),
1555
+ Series(case, dtype="category"),
1556
+ Series(case, dtype=object),
1557
+ ]:
1558
+ res_first = s.duplicated(keep="first")
1559
+ tm.assert_series_equal(res_first, Series(exp_first))
1560
+
1561
+ res_last = s.duplicated(keep="last")
1562
+ tm.assert_series_equal(res_last, Series(exp_last))
1563
+
1564
+ res_false = s.duplicated(keep=False)
1565
+ tm.assert_series_equal(res_false, Series(exp_false))
1566
+
1567
+ @pytest.mark.parametrize("case", [Index([1, 2, 3]), pd.RangeIndex(0, 3)])
1568
+ def test_unique_index(self, case):
1569
+ assert case.is_unique is True
1570
+ tm.assert_numpy_array_equal(case.duplicated(), np.array([False, False, False]))
1571
+
1572
+ @pytest.mark.parametrize(
1573
+ "arr, uniques",
1574
+ [
1575
+ (
1576
+ [(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
1577
+ [(0, 0), (0, 1), (1, 0), (1, 1)],
1578
+ ),
1579
+ (
1580
+ [("b", "c"), ("a", "b"), ("a", "b"), ("b", "c")],
1581
+ [("b", "c"), ("a", "b")],
1582
+ ),
1583
+ ([("a", 1), ("b", 2), ("a", 3), ("a", 1)], [("a", 1), ("b", 2), ("a", 3)]),
1584
+ ],
1585
+ )
1586
+ def test_unique_tuples(self, arr, uniques):
1587
+ # https://github.com/pandas-dev/pandas/issues/16519
1588
+ expected = np.empty(len(uniques), dtype=object)
1589
+ expected[:] = uniques
1590
+
1591
+ result = pd.unique(arr)
1592
+ tm.assert_numpy_array_equal(result, expected)
1593
+
1594
+ @pytest.mark.parametrize(
1595
+ "array,expected",
1596
+ [
1597
+ (
1598
+ [1 + 1j, 0, 1, 1j, 1 + 2j, 1 + 2j],
1599
+ # Should return a complex dtype in the future
1600
+ np.array([(1 + 1j), 0j, (1 + 0j), 1j, (1 + 2j)], dtype=object),
1601
+ )
1602
+ ],
1603
+ )
1604
+ def test_unique_complex_numbers(self, array, expected):
1605
+ # GH 17927
1606
+ result = pd.unique(array)
1607
+ tm.assert_numpy_array_equal(result, expected)
1608
+
1609
+
1610
+ class TestHashTable:
1611
+ @pytest.mark.parametrize(
1612
+ "htable, tm_dtype",
1613
+ [
1614
+ (ht.PyObjectHashTable, "String"),
1615
+ (ht.StringHashTable, "String"),
1616
+ (ht.Float64HashTable, "Float"),
1617
+ (ht.Int64HashTable, "Int"),
1618
+ (ht.UInt64HashTable, "UInt"),
1619
+ ],
1620
+ )
1621
+ def test_hashtable_unique(self, htable, tm_dtype, writable):
1622
+ # output of maker has guaranteed unique elements
1623
+ maker = getattr(tm, "make" + tm_dtype + "Index")
1624
+ s = Series(maker(1000))
1625
+ if htable == ht.Float64HashTable:
1626
+ # add NaN for float column
1627
+ s.loc[500] = np.nan
1628
+ elif htable == ht.PyObjectHashTable:
1629
+ # use different NaN types for object column
1630
+ s.loc[500:502] = [np.nan, None, NaT]
1631
+
1632
+ # create duplicated selection
1633
+ s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
1634
+ s_duplicated.values.setflags(write=writable)
1635
+
1636
+ # drop_duplicates has own cython code (hash_table_func_helper.pxi)
1637
+ # and is tested separately; keeps first occurrence like ht.unique()
1638
+ expected_unique = s_duplicated.drop_duplicates(keep="first").values
1639
+ result_unique = htable().unique(s_duplicated.values)
1640
+ tm.assert_numpy_array_equal(result_unique, expected_unique)
1641
+
1642
+ # test return_inverse=True
1643
+ # reconstruction can only succeed if the inverse is correct
1644
+ result_unique, result_inverse = htable().unique(
1645
+ s_duplicated.values, return_inverse=True
1646
+ )
1647
+ tm.assert_numpy_array_equal(result_unique, expected_unique)
1648
+ reconstr = result_unique[result_inverse]
1649
+ tm.assert_numpy_array_equal(reconstr, s_duplicated.values)
1650
+
1651
+ @pytest.mark.parametrize(
1652
+ "htable, tm_dtype",
1653
+ [
1654
+ (ht.PyObjectHashTable, "String"),
1655
+ (ht.StringHashTable, "String"),
1656
+ (ht.Float64HashTable, "Float"),
1657
+ (ht.Int64HashTable, "Int"),
1658
+ (ht.UInt64HashTable, "UInt"),
1659
+ ],
1660
+ )
1661
+ def test_hashtable_factorize(self, htable, tm_dtype, writable):
1662
+ # output of maker has guaranteed unique elements
1663
+ maker = getattr(tm, "make" + tm_dtype + "Index")
1664
+ s = Series(maker(1000))
1665
+ if htable == ht.Float64HashTable:
1666
+ # add NaN for float column
1667
+ s.loc[500] = np.nan
1668
+ elif htable == ht.PyObjectHashTable:
1669
+ # use different NaN types for object column
1670
+ s.loc[500:502] = [np.nan, None, NaT]
1671
+
1672
+ # create duplicated selection
1673
+ s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
1674
+ s_duplicated.values.setflags(write=writable)
1675
+ na_mask = s_duplicated.isna().values
1676
+
1677
+ result_unique, result_inverse = htable().factorize(s_duplicated.values)
1678
+
1679
+ # drop_duplicates has own cython code (hash_table_func_helper.pxi)
1680
+ # and is tested separately; keeps first occurrence like ht.factorize()
1681
+ # since factorize removes all NaNs, we do the same here
1682
+ expected_unique = s_duplicated.dropna().drop_duplicates().values
1683
+ tm.assert_numpy_array_equal(result_unique, expected_unique)
1684
+
1685
+ # reconstruction can only succeed if the inverse is correct. Since
1686
+ # factorize removes the NaNs, those have to be excluded here as well
1687
+ result_reconstruct = result_unique[result_inverse[~na_mask]]
1688
+ expected_reconstruct = s_duplicated.dropna().values
1689
+ tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct)
1690
+
1691
+
1692
+ class TestRank:
1693
+ @td.skip_if_no_scipy
1694
+ @pytest.mark.parametrize(
1695
+ "arr",
1696
+ [
1697
+ [np.nan, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 3, np.nan],
1698
+ [4.0, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 4.0, np.nan],
1699
+ ],
1700
+ )
1701
+ def test_scipy_compat(self, arr):
1702
+ from scipy.stats import rankdata
1703
+
1704
+ arr = np.array(arr)
1705
+
1706
+ mask = ~np.isfinite(arr)
1707
+ arr = arr.copy()
1708
+ result = libalgos.rank_1d(arr)
1709
+ arr[mask] = np.inf
1710
+ exp = rankdata(arr)
1711
+ exp[mask] = np.nan
1712
+ tm.assert_almost_equal(result, exp)
1713
+
1714
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
1715
+ def test_basic(self, writable, dtype):
1716
+ exp = np.array([1, 2], dtype=np.float64)
1717
+
1718
+ data = np.array([1, 100], dtype=dtype)
1719
+ data.setflags(write=writable)
1720
+ ser = Series(data)
1721
+ result = algos.rank(ser)
1722
+ tm.assert_numpy_array_equal(result, exp)
1723
+
1724
+ @pytest.mark.parametrize("dtype", [np.float64, np.uint64])
1725
+ def test_uint64_overflow(self, dtype):
1726
+ exp = np.array([1, 2], dtype=np.float64)
1727
+
1728
+ s = Series([1, 2**63], dtype=dtype)
1729
+ tm.assert_numpy_array_equal(algos.rank(s), exp)
1730
+
1731
+ def test_too_many_ndims(self):
1732
+ arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
1733
+ msg = "Array with ndim > 2 are not supported"
1734
+
1735
+ with pytest.raises(TypeError, match=msg):
1736
+ algos.rank(arr)
1737
+
1738
+ @pytest.mark.single_cpu
1739
+ def test_pct_max_many_rows(self):
1740
+ # GH 18271
1741
+ values = np.arange(2**24 + 1)
1742
+ result = algos.rank(values, pct=True).max()
1743
+ assert result == 1
1744
+
1745
+ values = np.arange(2**25 + 2).reshape(2**24 + 1, 2)
1746
+ result = algos.rank(values, pct=True).max()
1747
+ assert result == 1
1748
+
1749
+
1750
+ def test_pad_backfill_object_segfault():
1751
+ old = np.array([], dtype="O")
1752
+ new = np.array([datetime(2010, 12, 31)], dtype="O")
1753
+
1754
+ result = libalgos.pad["object"](old, new)
1755
+ expected = np.array([-1], dtype=np.intp)
1756
+ tm.assert_numpy_array_equal(result, expected)
1757
+
1758
+ result = libalgos.pad["object"](new, old)
1759
+ expected = np.array([], dtype=np.intp)
1760
+ tm.assert_numpy_array_equal(result, expected)
1761
+
1762
+ result = libalgos.backfill["object"](old, new)
1763
+ expected = np.array([-1], dtype=np.intp)
1764
+ tm.assert_numpy_array_equal(result, expected)
1765
+
1766
+ result = libalgos.backfill["object"](new, old)
1767
+ expected = np.array([], dtype=np.intp)
1768
+ tm.assert_numpy_array_equal(result, expected)
1769
+
1770
+
1771
+ class TestTseriesUtil:
1772
+ def test_backfill(self):
1773
+ old = Index([1, 5, 10])
1774
+ new = Index(list(range(12)))
1775
+
1776
+ filler = libalgos.backfill["int64_t"](old.values, new.values)
1777
+
1778
+ expect_filler = np.array([0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1], dtype=np.intp)
1779
+ tm.assert_numpy_array_equal(filler, expect_filler)
1780
+
1781
+ # corner case
1782
+ old = Index([1, 4])
1783
+ new = Index(list(range(5, 10)))
1784
+ filler = libalgos.backfill["int64_t"](old.values, new.values)
1785
+
1786
+ expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
1787
+ tm.assert_numpy_array_equal(filler, expect_filler)
1788
+
1789
+ def test_pad(self):
1790
+ old = Index([1, 5, 10])
1791
+ new = Index(list(range(12)))
1792
+
1793
+ filler = libalgos.pad["int64_t"](old.values, new.values)
1794
+
1795
+ expect_filler = np.array([-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2], dtype=np.intp)
1796
+ tm.assert_numpy_array_equal(filler, expect_filler)
1797
+
1798
+ # corner case
1799
+ old = Index([5, 10])
1800
+ new = Index(np.arange(5, dtype=np.int64))
1801
+ filler = libalgos.pad["int64_t"](old.values, new.values)
1802
+ expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
1803
+ tm.assert_numpy_array_equal(filler, expect_filler)
1804
+
1805
+
1806
+ def test_is_lexsorted():
1807
+ failure = [
1808
+ np.array(
1809
+ [
1810
+ 3,
1811
+ 3,
1812
+ 3,
1813
+ 3,
1814
+ 3,
1815
+ 3,
1816
+ 3,
1817
+ 3,
1818
+ 3,
1819
+ 3,
1820
+ 3,
1821
+ 3,
1822
+ 3,
1823
+ 3,
1824
+ 3,
1825
+ 3,
1826
+ 3,
1827
+ 3,
1828
+ 3,
1829
+ 3,
1830
+ 3,
1831
+ 3,
1832
+ 3,
1833
+ 3,
1834
+ 3,
1835
+ 3,
1836
+ 3,
1837
+ 3,
1838
+ 3,
1839
+ 3,
1840
+ 3,
1841
+ 2,
1842
+ 2,
1843
+ 2,
1844
+ 2,
1845
+ 2,
1846
+ 2,
1847
+ 2,
1848
+ 2,
1849
+ 2,
1850
+ 2,
1851
+ 2,
1852
+ 2,
1853
+ 2,
1854
+ 2,
1855
+ 2,
1856
+ 2,
1857
+ 2,
1858
+ 2,
1859
+ 2,
1860
+ 2,
1861
+ 2,
1862
+ 2,
1863
+ 2,
1864
+ 2,
1865
+ 2,
1866
+ 2,
1867
+ 2,
1868
+ 2,
1869
+ 2,
1870
+ 2,
1871
+ 2,
1872
+ 1,
1873
+ 1,
1874
+ 1,
1875
+ 1,
1876
+ 1,
1877
+ 1,
1878
+ 1,
1879
+ 1,
1880
+ 1,
1881
+ 1,
1882
+ 1,
1883
+ 1,
1884
+ 1,
1885
+ 1,
1886
+ 1,
1887
+ 1,
1888
+ 1,
1889
+ 1,
1890
+ 1,
1891
+ 1,
1892
+ 1,
1893
+ 1,
1894
+ 1,
1895
+ 1,
1896
+ 1,
1897
+ 1,
1898
+ 1,
1899
+ 1,
1900
+ 1,
1901
+ 1,
1902
+ 1,
1903
+ 0,
1904
+ 0,
1905
+ 0,
1906
+ 0,
1907
+ 0,
1908
+ 0,
1909
+ 0,
1910
+ 0,
1911
+ 0,
1912
+ 0,
1913
+ 0,
1914
+ 0,
1915
+ 0,
1916
+ 0,
1917
+ 0,
1918
+ 0,
1919
+ 0,
1920
+ 0,
1921
+ 0,
1922
+ 0,
1923
+ 0,
1924
+ 0,
1925
+ 0,
1926
+ 0,
1927
+ 0,
1928
+ 0,
1929
+ 0,
1930
+ 0,
1931
+ 0,
1932
+ 0,
1933
+ 0,
1934
+ ],
1935
+ dtype="int64",
1936
+ ),
1937
+ np.array(
1938
+ [
1939
+ 30,
1940
+ 29,
1941
+ 28,
1942
+ 27,
1943
+ 26,
1944
+ 25,
1945
+ 24,
1946
+ 23,
1947
+ 22,
1948
+ 21,
1949
+ 20,
1950
+ 19,
1951
+ 18,
1952
+ 17,
1953
+ 16,
1954
+ 15,
1955
+ 14,
1956
+ 13,
1957
+ 12,
1958
+ 11,
1959
+ 10,
1960
+ 9,
1961
+ 8,
1962
+ 7,
1963
+ 6,
1964
+ 5,
1965
+ 4,
1966
+ 3,
1967
+ 2,
1968
+ 1,
1969
+ 0,
1970
+ 30,
1971
+ 29,
1972
+ 28,
1973
+ 27,
1974
+ 26,
1975
+ 25,
1976
+ 24,
1977
+ 23,
1978
+ 22,
1979
+ 21,
1980
+ 20,
1981
+ 19,
1982
+ 18,
1983
+ 17,
1984
+ 16,
1985
+ 15,
1986
+ 14,
1987
+ 13,
1988
+ 12,
1989
+ 11,
1990
+ 10,
1991
+ 9,
1992
+ 8,
1993
+ 7,
1994
+ 6,
1995
+ 5,
1996
+ 4,
1997
+ 3,
1998
+ 2,
1999
+ 1,
2000
+ 0,
2001
+ 30,
2002
+ 29,
2003
+ 28,
2004
+ 27,
2005
+ 26,
2006
+ 25,
2007
+ 24,
2008
+ 23,
2009
+ 22,
2010
+ 21,
2011
+ 20,
2012
+ 19,
2013
+ 18,
2014
+ 17,
2015
+ 16,
2016
+ 15,
2017
+ 14,
2018
+ 13,
2019
+ 12,
2020
+ 11,
2021
+ 10,
2022
+ 9,
2023
+ 8,
2024
+ 7,
2025
+ 6,
2026
+ 5,
2027
+ 4,
2028
+ 3,
2029
+ 2,
2030
+ 1,
2031
+ 0,
2032
+ 30,
2033
+ 29,
2034
+ 28,
2035
+ 27,
2036
+ 26,
2037
+ 25,
2038
+ 24,
2039
+ 23,
2040
+ 22,
2041
+ 21,
2042
+ 20,
2043
+ 19,
2044
+ 18,
2045
+ 17,
2046
+ 16,
2047
+ 15,
2048
+ 14,
2049
+ 13,
2050
+ 12,
2051
+ 11,
2052
+ 10,
2053
+ 9,
2054
+ 8,
2055
+ 7,
2056
+ 6,
2057
+ 5,
2058
+ 4,
2059
+ 3,
2060
+ 2,
2061
+ 1,
2062
+ 0,
2063
+ ],
2064
+ dtype="int64",
2065
+ ),
2066
+ ]
2067
+
2068
+ assert not libalgos.is_lexsorted(failure)
2069
+
2070
+
2071
+ def test_groupsort_indexer():
2072
+ a = np.random.randint(0, 1000, 100).astype(np.intp)
2073
+ b = np.random.randint(0, 1000, 100).astype(np.intp)
2074
+
2075
+ result = libalgos.groupsort_indexer(a, 1000)[0]
2076
+
2077
+ # need to use a stable sort
2078
+ # np.argsort returns int, groupsort_indexer
2079
+ # always returns intp
2080
+ expected = np.argsort(a, kind="mergesort")
2081
+ expected = expected.astype(np.intp)
2082
+
2083
+ tm.assert_numpy_array_equal(result, expected)
2084
+
2085
+ # compare with lexsort
2086
+ # np.lexsort returns int, groupsort_indexer
2087
+ # always returns intp
2088
+ key = a * 1000 + b
2089
+ result = libalgos.groupsort_indexer(key, 1000000)[0]
2090
+ expected = np.lexsort((b, a))
2091
+ expected = expected.astype(np.intp)
2092
+
2093
+ tm.assert_numpy_array_equal(result, expected)
2094
+
2095
+
2096
+ def test_infinity_sort():
2097
+ # GH 13445
2098
+ # numpy's argsort can be unhappy if something is less than
2099
+ # itself. Instead, let's give our infinities a self-consistent
2100
+ # ordering, but outside the float extended real line.
2101
+
2102
+ Inf = libalgos.Infinity()
2103
+ NegInf = libalgos.NegInfinity()
2104
+
2105
+ ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
2106
+
2107
+ assert all(Inf >= x for x in ref_nums)
2108
+ assert all(Inf > x or x is Inf for x in ref_nums)
2109
+ assert Inf >= Inf and Inf == Inf
2110
+ assert not Inf < Inf and not Inf > Inf
2111
+ assert libalgos.Infinity() == libalgos.Infinity()
2112
+ assert not libalgos.Infinity() != libalgos.Infinity()
2113
+
2114
+ assert all(NegInf <= x for x in ref_nums)
2115
+ assert all(NegInf < x or x is NegInf for x in ref_nums)
2116
+ assert NegInf <= NegInf and NegInf == NegInf
2117
+ assert not NegInf < NegInf and not NegInf > NegInf
2118
+ assert libalgos.NegInfinity() == libalgos.NegInfinity()
2119
+ assert not libalgos.NegInfinity() != libalgos.NegInfinity()
2120
+
2121
+ for perm in permutations(ref_nums):
2122
+ assert sorted(perm) == ref_nums
2123
+
2124
+ # smoke tests
2125
+ np.array([libalgos.Infinity()] * 32).argsort()
2126
+ np.array([libalgos.NegInfinity()] * 32).argsort()
2127
+
2128
+
2129
+ def test_infinity_against_nan():
2130
+ Inf = libalgos.Infinity()
2131
+ NegInf = libalgos.NegInfinity()
2132
+
2133
+ assert not Inf > np.nan
2134
+ assert not Inf >= np.nan
2135
+ assert not Inf < np.nan
2136
+ assert not Inf <= np.nan
2137
+ assert not Inf == np.nan
2138
+ assert Inf != np.nan
2139
+
2140
+ assert not NegInf > np.nan
2141
+ assert not NegInf >= np.nan
2142
+ assert not NegInf < np.nan
2143
+ assert not NegInf <= np.nan
2144
+ assert not NegInf == np.nan
2145
+ assert NegInf != np.nan
2146
+
2147
+
2148
+ def test_ensure_platform_int():
2149
+ arr = np.arange(100, dtype=np.intp)
2150
+
2151
+ result = libalgos.ensure_platform_int(arr)
2152
+ assert result is arr
2153
+
2154
+
2155
+ def test_int64_add_overflow():
2156
+ # see gh-14068
2157
+ msg = "Overflow in int64 addition"
2158
+ m = np.iinfo(np.int64).max
2159
+ n = np.iinfo(np.int64).min
2160
+
2161
+ with pytest.raises(OverflowError, match=msg):
2162
+ algos.checked_add_with_arr(np.array([m, m]), m)
2163
+ with pytest.raises(OverflowError, match=msg):
2164
+ algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
2165
+ with pytest.raises(OverflowError, match=msg):
2166
+ algos.checked_add_with_arr(np.array([n, n]), n)
2167
+ with pytest.raises(OverflowError, match=msg):
2168
+ algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
2169
+ with pytest.raises(OverflowError, match=msg):
2170
+ algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
2171
+ with pytest.raises(OverflowError, match=msg):
2172
+ algos.checked_add_with_arr(
2173
+ np.array([m, m]), np.array([m, m]), arr_mask=np.array([False, True])
2174
+ )
2175
+ with pytest.raises(OverflowError, match=msg):
2176
+ algos.checked_add_with_arr(
2177
+ np.array([m, m]), np.array([m, m]), b_mask=np.array([False, True])
2178
+ )
2179
+ with pytest.raises(OverflowError, match=msg):
2180
+ algos.checked_add_with_arr(
2181
+ np.array([m, m]),
2182
+ np.array([m, m]),
2183
+ arr_mask=np.array([False, True]),
2184
+ b_mask=np.array([False, True]),
2185
+ )
2186
+ with pytest.raises(OverflowError, match=msg):
2187
+ algos.checked_add_with_arr(np.array([m, m]), np.array([np.nan, m]))
2188
+
2189
+ # Check that the nan boolean arrays override whether or not
2190
+ # the addition overflows. We don't check the result but just
2191
+ # the fact that an OverflowError is not raised.
2192
+ algos.checked_add_with_arr(
2193
+ np.array([m, m]), np.array([m, m]), arr_mask=np.array([True, True])
2194
+ )
2195
+ algos.checked_add_with_arr(
2196
+ np.array([m, m]), np.array([m, m]), b_mask=np.array([True, True])
2197
+ )
2198
+ algos.checked_add_with_arr(
2199
+ np.array([m, m]),
2200
+ np.array([m, m]),
2201
+ arr_mask=np.array([True, False]),
2202
+ b_mask=np.array([False, True]),
2203
+ )
2204
+
2205
+
2206
+ class TestMode:
2207
+ def test_no_mode(self):
2208
+ exp = Series([], dtype=np.float64, index=Index([], dtype=int))
2209
+ tm.assert_numpy_array_equal(algos.mode([]), exp.values)
2210
+
2211
+ @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"])
2212
+ def test_mode_single(self, dt):
2213
+ # GH 15714
2214
+ exp_single = [1]
2215
+ data_single = [1]
2216
+
2217
+ exp_multi = [1]
2218
+ data_multi = [1, 1]
2219
+
2220
+ ser = Series(data_single, dtype=dt)
2221
+ exp = Series(exp_single, dtype=dt)
2222
+ tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
2223
+ tm.assert_series_equal(ser.mode(), exp)
2224
+
2225
+ ser = Series(data_multi, dtype=dt)
2226
+ exp = Series(exp_multi, dtype=dt)
2227
+ tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
2228
+ tm.assert_series_equal(ser.mode(), exp)
2229
+
2230
+ def test_mode_obj_int(self):
2231
+ exp = Series([1], dtype=int)
2232
+ tm.assert_numpy_array_equal(algos.mode([1]), exp.values)
2233
+
2234
+ exp = Series(["a", "b", "c"], dtype=object)
2235
+ tm.assert_numpy_array_equal(algos.mode(["a", "b", "c"]), exp.values)
2236
+
2237
+ @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"])
2238
+ def test_number_mode(self, dt):
2239
+ exp_single = [1]
2240
+ data_single = [1] * 5 + [2] * 3
2241
+
2242
+ exp_multi = [1, 3]
2243
+ data_multi = [1] * 5 + [2] * 3 + [3] * 5
2244
+
2245
+ ser = Series(data_single, dtype=dt)
2246
+ exp = Series(exp_single, dtype=dt)
2247
+ tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
2248
+ tm.assert_series_equal(ser.mode(), exp)
2249
+
2250
+ ser = Series(data_multi, dtype=dt)
2251
+ exp = Series(exp_multi, dtype=dt)
2252
+ tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
2253
+ tm.assert_series_equal(ser.mode(), exp)
2254
+
2255
+ def test_strobj_mode(self):
2256
+ exp = ["b"]
2257
+ data = ["a"] * 2 + ["b"] * 3
2258
+
2259
+ ser = Series(data, dtype="c")
2260
+ exp = Series(exp, dtype="c")
2261
+ tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
2262
+ tm.assert_series_equal(ser.mode(), exp)
2263
+
2264
+ @pytest.mark.parametrize("dt", [str, object])
2265
+ def test_strobj_multi_char(self, dt):
2266
+ exp = ["bar"]
2267
+ data = ["foo"] * 2 + ["bar"] * 3
2268
+
2269
+ ser = Series(data, dtype=dt)
2270
+ exp = Series(exp, dtype=dt)
2271
+ tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
2272
+ tm.assert_series_equal(ser.mode(), exp)
2273
+
2274
+ def test_datelike_mode(self):
2275
+ exp = Series(["1900-05-03", "2011-01-03", "2013-01-02"], dtype="M8[ns]")
2276
+ ser = Series(["2011-01-03", "2013-01-02", "1900-05-03"], dtype="M8[ns]")
2277
+ tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)
2278
+ tm.assert_series_equal(ser.mode(), exp)
2279
+
2280
+ exp = Series(["2011-01-03", "2013-01-02"], dtype="M8[ns]")
2281
+ ser = Series(
2282
+ ["2011-01-03", "2013-01-02", "1900-05-03", "2011-01-03", "2013-01-02"],
2283
+ dtype="M8[ns]",
2284
+ )
2285
+ tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)
2286
+ tm.assert_series_equal(ser.mode(), exp)
2287
+
2288
+ def test_timedelta_mode(self):
2289
+ exp = Series(["-1 days", "0 days", "1 days"], dtype="timedelta64[ns]")
2290
+ ser = Series(["1 days", "-1 days", "0 days"], dtype="timedelta64[ns]")
2291
+ tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)
2292
+ tm.assert_series_equal(ser.mode(), exp)
2293
+
2294
+ exp = Series(["2 min", "1 day"], dtype="timedelta64[ns]")
2295
+ ser = Series(
2296
+ ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"],
2297
+ dtype="timedelta64[ns]",
2298
+ )
2299
+ tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)
2300
+ tm.assert_series_equal(ser.mode(), exp)
2301
+
2302
+ def test_mixed_dtype(self):
2303
+ exp = Series(["foo"])
2304
+ ser = Series([1, "foo", "foo"])
2305
+ tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
2306
+ tm.assert_series_equal(ser.mode(), exp)
2307
+
2308
+ def test_uint64_overflow(self):
2309
+ exp = Series([2**63], dtype=np.uint64)
2310
+ ser = Series([1, 2**63, 2**63], dtype=np.uint64)
2311
+ tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
2312
+ tm.assert_series_equal(ser.mode(), exp)
2313
+
2314
+ exp = Series([1, 2**63], dtype=np.uint64)
2315
+ ser = Series([1, 2**63], dtype=np.uint64)
2316
+ tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
2317
+ tm.assert_series_equal(ser.mode(), exp)
2318
+
2319
+ def test_categorical(self):
2320
+ c = Categorical([1, 2])
2321
+ exp = c
2322
+ res = Series(c).mode()._values
2323
+ tm.assert_categorical_equal(res, exp)
2324
+
2325
+ c = Categorical([1, "a", "a"])
2326
+ exp = Categorical(["a"], categories=[1, "a"])
2327
+ res = Series(c).mode()._values
2328
+ tm.assert_categorical_equal(res, exp)
2329
+
2330
+ c = Categorical([1, 1, 2, 3, 3])
2331
+ exp = Categorical([1, 3], categories=[1, 2, 3])
2332
+ res = Series(c).mode()._values
2333
+ tm.assert_categorical_equal(res, exp)
2334
+
2335
+ def test_index(self):
2336
+ idx = Index([1, 2, 3])
2337
+ exp = Series([1, 2, 3], dtype=np.int64)
2338
+ tm.assert_numpy_array_equal(algos.mode(idx), exp.values)
2339
+
2340
+ idx = Index([1, "a", "a"])
2341
+ exp = Series(["a"], dtype=object)
2342
+ tm.assert_numpy_array_equal(algos.mode(idx), exp.values)
2343
+
2344
+ idx = Index([1, 1, 2, 3, 3])
2345
+ exp = Series([1, 3], dtype=np.int64)
2346
+ tm.assert_numpy_array_equal(algos.mode(idx), exp.values)
2347
+
2348
+ idx = Index(
2349
+ ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"],
2350
+ dtype="timedelta64[ns]",
2351
+ )
2352
+ with pytest.raises(AttributeError, match="TimedeltaIndex"):
2353
+ # algos.mode expects Arraylike, does *not* unwrap TimedeltaIndex
2354
+ algos.mode(idx)
2355
+
2356
+ def test_ser_mode_with_name(self):
2357
+ # GH 46737
2358
+ ser = Series([1, 1, 3], name="foo")
2359
+ result = ser.mode()
2360
+ expected = Series([1], name="foo")
2361
+ tm.assert_series_equal(result, expected)
2362
+
2363
+
2364
+ class TestDiff:
2365
+ @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
2366
+ def test_diff_datetimelike_nat(self, dtype):
2367
+ # NaT - NaT is NaT, not 0
2368
+ arr = np.arange(12).astype(np.int64).view(dtype).reshape(3, 4)
2369
+ arr[:, 2] = arr.dtype.type("NaT", "ns")
2370
+ result = algos.diff(arr, 1, axis=0)
2371
+
2372
+ expected = np.ones(arr.shape, dtype="timedelta64[ns]") * 4
2373
+ expected[:, 2] = np.timedelta64("NaT", "ns")
2374
+ expected[0, :] = np.timedelta64("NaT", "ns")
2375
+
2376
+ tm.assert_numpy_array_equal(result, expected)
2377
+
2378
+ result = algos.diff(arr.T, 1, axis=1)
2379
+ tm.assert_numpy_array_equal(result, expected.T)
2380
+
2381
+ def test_diff_ea_axis(self):
2382
+ dta = date_range("2016-01-01", periods=3, tz="US/Pacific")._data
2383
+
2384
+ msg = "cannot diff DatetimeArray on axis=1"
2385
+ with pytest.raises(ValueError, match=msg):
2386
+ algos.diff(dta, 1, axis=1)
2387
+
2388
+ @pytest.mark.parametrize("dtype", ["int8", "int16"])
2389
+ def test_diff_low_precision_int(self, dtype):
2390
+ arr = np.array([0, 1, 1, 0, 0], dtype=dtype)
2391
+ result = algos.diff(arr, 1)
2392
+ expected = np.array([np.nan, 1, 0, -1, 0], dtype="float32")
2393
+ tm.assert_numpy_array_equal(result, expected)
2394
+
2395
+
2396
+ @pytest.mark.parametrize("op", [np.array, pd.array])
2397
+ def test_union_with_duplicates(op):
2398
+ # GH#36289
2399
+ lvals = op([3, 1, 3, 4])
2400
+ rvals = op([2, 3, 1, 1])
2401
+ expected = op([3, 3, 1, 1, 4, 2])
2402
+ if isinstance(expected, np.ndarray):
2403
+ result = algos.union_with_duplicates(lvals, rvals)
2404
+ tm.assert_numpy_array_equal(result, expected)
2405
+ else:
2406
+ result = algos.union_with_duplicates(lvals, rvals)
2407
+ tm.assert_extension_array_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/test_downstream.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing that we work in the downstream packages
3
+ """
4
+ import importlib
5
+ import subprocess
6
+ import sys
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas.errors import IntCastingNaNError
12
+ import pandas.util._test_decorators as td
13
+
14
+ import pandas as pd
15
+ from pandas import (
16
+ DataFrame,
17
+ Series,
18
+ )
19
+ import pandas._testing as tm
20
+
21
+
22
+ def import_module(name):
23
+ # we *only* want to skip if the module is truly not available
24
+ # and NOT just an actual import error because of pandas changes
25
+
26
+ try:
27
+ return importlib.import_module(name)
28
+ except ModuleNotFoundError:
29
+ pytest.skip(f"skipping as {name} not available")
30
+
31
+
32
+ @pytest.fixture
33
+ def df():
34
+ return DataFrame({"A": [1, 2, 3]})
35
+
36
+
37
+ def test_dask(df):
38
+ # dask sets "compute.use_numexpr" to False, so catch the current value
39
+ # and ensure to reset it afterwards to avoid impacting other tests
40
+ olduse = pd.get_option("compute.use_numexpr")
41
+
42
+ try:
43
+ toolz = import_module("toolz") # noqa:F841
44
+ dask = import_module("dask") # noqa:F841
45
+
46
+ import dask.dataframe as dd
47
+
48
+ ddf = dd.from_pandas(df, npartitions=3)
49
+ assert ddf.A is not None
50
+ assert ddf.compute() is not None
51
+ finally:
52
+ pd.set_option("compute.use_numexpr", olduse)
53
+
54
+
55
+ def test_dask_ufunc():
56
+ # dask sets "compute.use_numexpr" to False, so catch the current value
57
+ # and ensure to reset it afterwards to avoid impacting other tests
58
+ olduse = pd.get_option("compute.use_numexpr")
59
+
60
+ try:
61
+ dask = import_module("dask") # noqa:F841
62
+ import dask.array as da
63
+ import dask.dataframe as dd
64
+
65
+ s = Series([1.5, 2.3, 3.7, 4.0])
66
+ ds = dd.from_pandas(s, npartitions=2)
67
+
68
+ result = da.fix(ds).compute()
69
+ expected = np.fix(s)
70
+ tm.assert_series_equal(result, expected)
71
+ finally:
72
+ pd.set_option("compute.use_numexpr", olduse)
73
+
74
+
75
+ @td.skip_if_no("dask")
76
+ def test_construct_dask_float_array_int_dtype_match_ndarray():
77
+ # GH#40110 make sure we treat a float-dtype dask array with the same
78
+ # rules we would for an ndarray
79
+ import dask.dataframe as dd
80
+
81
+ arr = np.array([1, 2.5, 3])
82
+ darr = dd.from_array(arr)
83
+
84
+ res = Series(darr)
85
+ expected = Series(arr)
86
+ tm.assert_series_equal(res, expected)
87
+
88
+ # GH#49599 in 2.0 we raise instead of silently ignoring the dtype
89
+ msg = "Trying to coerce float values to integers"
90
+ with pytest.raises(ValueError, match=msg):
91
+ Series(darr, dtype="i8")
92
+
93
+ msg = r"Cannot convert non-finite values \(NA or inf\) to integer"
94
+ arr[2] = np.nan
95
+ with pytest.raises(IntCastingNaNError, match=msg):
96
+ Series(darr, dtype="i8")
97
+ # which is the same as we get with a numpy input
98
+ with pytest.raises(IntCastingNaNError, match=msg):
99
+ Series(arr, dtype="i8")
100
+
101
+
102
+ def test_xarray(df):
103
+ xarray = import_module("xarray") # noqa:F841
104
+
105
+ assert df.to_xarray() is not None
106
+
107
+
108
+ @td.skip_if_no("cftime")
109
+ @td.skip_if_no("xarray", "0.21.0")
110
+ def test_xarray_cftimeindex_nearest():
111
+ # https://github.com/pydata/xarray/issues/3751
112
+ import cftime
113
+ import xarray
114
+
115
+ times = xarray.cftime_range("0001", periods=2)
116
+ key = cftime.DatetimeGregorian(2000, 1, 1)
117
+ result = times.get_indexer([key], method="nearest")
118
+ expected = 1
119
+ assert result == expected
120
+
121
+
122
+ def test_oo_optimizable():
123
+ # GH 21071
124
+ subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
125
+
126
+
127
+ def test_oo_optimized_datetime_index_unpickle():
128
+ # GH 42866
129
+ subprocess.check_call(
130
+ [
131
+ sys.executable,
132
+ "-OO",
133
+ "-c",
134
+ (
135
+ "import pandas as pd, pickle; "
136
+ "pickle.loads(pickle.dumps(pd.date_range('2021-01-01', periods=1)))"
137
+ ),
138
+ ]
139
+ )
140
+
141
+
142
+ @pytest.mark.network
143
+ @tm.network
144
+ def test_statsmodels():
145
+ statsmodels = import_module("statsmodels") # noqa:F841
146
+ import statsmodels.api as sm
147
+ import statsmodels.formula.api as smf
148
+
149
+ df = sm.datasets.get_rdataset("Guerry", "HistData").data
150
+ smf.ols("Lottery ~ Literacy + np.log(Pop1831)", data=df).fit()
151
+
152
+
153
+ def test_scikit_learn():
154
+ sklearn = import_module("sklearn") # noqa:F841
155
+ from sklearn import (
156
+ datasets,
157
+ svm,
158
+ )
159
+
160
+ digits = datasets.load_digits()
161
+ clf = svm.SVC(gamma=0.001, C=100.0)
162
+ clf.fit(digits.data[:-1], digits.target[:-1])
163
+ clf.predict(digits.data[-1:])
164
+
165
+
166
+ @pytest.mark.network
167
+ @tm.network
168
+ def test_seaborn():
169
+ seaborn = import_module("seaborn")
170
+ tips = seaborn.load_dataset("tips")
171
+ seaborn.stripplot(x="day", y="total_bill", data=tips)
172
+
173
+
174
+ def test_pandas_gbq():
175
+ # Older versions import from non-public, non-existent pandas funcs
176
+ pytest.importorskip("pandas_gbq", minversion="0.10.0")
177
+ pandas_gbq = import_module("pandas_gbq") # noqa:F841
178
+
179
+
180
+ @pytest.mark.network
181
+ @tm.network
182
+ @pytest.mark.xfail(
183
+ raises=ValueError,
184
+ reason="The Quandl API key must be provided either through the api_key "
185
+ "variable or through the environmental variable QUANDL_API_KEY",
186
+ )
187
+ def test_pandas_datareader():
188
+ pandas_datareader = import_module("pandas_datareader")
189
+ pandas_datareader.DataReader("F", "quandl", "2017-01-01", "2017-02-01")
190
+
191
+
192
+ def test_pyarrow(df):
193
+ pyarrow = import_module("pyarrow")
194
+ table = pyarrow.Table.from_pandas(df)
195
+ result = table.to_pandas()
196
+ tm.assert_frame_equal(result, df)
197
+
198
+
199
+ def test_yaml_dump(df):
200
+ # GH#42748
201
+ yaml = import_module("yaml")
202
+
203
+ dumped = yaml.dump(df)
204
+
205
+ loaded = yaml.load(dumped, Loader=yaml.Loader)
206
+ tm.assert_frame_equal(df, loaded)
207
+
208
+ loaded2 = yaml.load(dumped, Loader=yaml.UnsafeLoader)
209
+ tm.assert_frame_equal(df, loaded2)
210
+
211
+
212
+ def test_missing_required_dependency():
213
+ # GH 23868
214
+ # To ensure proper isolation, we pass these flags
215
+ # -S : disable site-packages
216
+ # -s : disable user site-packages
217
+ # -E : disable PYTHON* env vars, especially PYTHONPATH
218
+ # https://github.com/MacPython/pandas-wheels/pull/50
219
+
220
+ pyexe = sys.executable.replace("\\", "/")
221
+
222
+ # We skip this test if pandas is installed as a site package. We first
223
+ # import the package normally and check the path to the module before
224
+ # executing the test which imports pandas with site packages disabled.
225
+ call = [pyexe, "-c", "import pandas;print(pandas.__file__)"]
226
+ output = subprocess.check_output(call).decode()
227
+ if "site-packages" in output:
228
+ pytest.skip("pandas installed as site package")
229
+
230
+ # This test will fail if pandas is installed as a site package. The flags
231
+ # prevent pandas being imported and the test will report Failed: DID NOT
232
+ # RAISE <class 'subprocess.CalledProcessError'>
233
+ call = [pyexe, "-sSE", "-c", "import pandas"]
234
+
235
+ msg = (
236
+ rf"Command '\['{pyexe}', '-sSE', '-c', 'import pandas'\]' "
237
+ "returned non-zero exit status 1."
238
+ )
239
+
240
+ with pytest.raises(subprocess.CalledProcessError, match=msg) as exc:
241
+ subprocess.check_output(call, stderr=subprocess.STDOUT)
242
+
243
+ output = exc.value.stdout.decode()
244
+ for name in ["numpy", "pytz", "dateutil"]:
245
+ assert name in output
246
+
247
+
248
+ def test_frame_setitem_dask_array_into_new_col():
249
+ # GH#47128
250
+
251
+ # dask sets "compute.use_numexpr" to False, so catch the current value
252
+ # and ensure to reset it afterwards to avoid impacting other tests
253
+ olduse = pd.get_option("compute.use_numexpr")
254
+
255
+ try:
256
+ dask = import_module("dask") # noqa:F841
257
+
258
+ import dask.array as da
259
+
260
+ dda = da.array([1, 2])
261
+ df = DataFrame({"a": ["a", "b"]})
262
+ df["b"] = dda
263
+ df["c"] = dda
264
+ df.loc[[False, True], "b"] = 100
265
+ result = df.loc[[1], :]
266
+ expected = DataFrame({"a": ["b"], "b": [100], "c": [2]}, index=[1])
267
+ tm.assert_frame_equal(result, expected)
268
+ finally:
269
+ pd.set_option("compute.use_numexpr", olduse)
videochat2/lib/python3.10/site-packages/pandas/tests/test_errors.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas.errors import (
4
+ AbstractMethodError,
5
+ UndefinedVariableError,
6
+ )
7
+
8
+ import pandas as pd
9
+
10
+
11
+ @pytest.mark.parametrize(
12
+ "exc",
13
+ [
14
+ "AttributeConflictWarning",
15
+ "CSSWarning",
16
+ "CategoricalConversionWarning",
17
+ "ClosedFileError",
18
+ "DataError",
19
+ "DatabaseError",
20
+ "DtypeWarning",
21
+ "EmptyDataError",
22
+ "IncompatibilityWarning",
23
+ "IndexingError",
24
+ "InvalidColumnName",
25
+ "InvalidComparison",
26
+ "InvalidVersion",
27
+ "LossySetitemError",
28
+ "MergeError",
29
+ "NoBufferPresent",
30
+ "NumExprClobberingError",
31
+ "NumbaUtilError",
32
+ "OptionError",
33
+ "OutOfBoundsDatetime",
34
+ "ParserError",
35
+ "ParserWarning",
36
+ "PerformanceWarning",
37
+ "PossibleDataLossError",
38
+ "PossiblePrecisionLoss",
39
+ "PyperclipException",
40
+ "SettingWithCopyError",
41
+ "SettingWithCopyWarning",
42
+ "SpecificationError",
43
+ "UnsortedIndexError",
44
+ "UnsupportedFunctionCall",
45
+ "ValueLabelTypeMismatch",
46
+ ],
47
+ )
48
+ def test_exception_importable(exc):
49
+ from pandas import errors
50
+
51
+ err = getattr(errors, exc)
52
+ assert err is not None
53
+
54
+ # check that we can raise on them
55
+
56
+ msg = "^$"
57
+
58
+ with pytest.raises(err, match=msg):
59
+ raise err()
60
+
61
+
62
+ def test_catch_oob():
63
+ from pandas import errors
64
+
65
+ msg = "Cannot cast 1500-01-01 00:00:00 to unit='ns' without overflow"
66
+ with pytest.raises(errors.OutOfBoundsDatetime, match=msg):
67
+ pd.Timestamp("15000101").as_unit("ns")
68
+
69
+
70
+ @pytest.mark.parametrize(
71
+ "is_local",
72
+ [
73
+ True,
74
+ False,
75
+ ],
76
+ )
77
+ def test_catch_undefined_variable_error(is_local):
78
+ variable_name = "x"
79
+ if is_local:
80
+ msg = f"local variable '{variable_name}' is not defined"
81
+ else:
82
+ msg = f"name '{variable_name}' is not defined"
83
+
84
+ with pytest.raises(UndefinedVariableError, match=msg):
85
+ raise UndefinedVariableError(variable_name, is_local)
86
+
87
+
88
+ class Foo:
89
+ @classmethod
90
+ def classmethod(cls):
91
+ raise AbstractMethodError(cls, methodtype="classmethod")
92
+
93
+ @property
94
+ def property(self):
95
+ raise AbstractMethodError(self, methodtype="property")
96
+
97
+ def method(self):
98
+ raise AbstractMethodError(self)
99
+
100
+
101
+ def test_AbstractMethodError_classmethod():
102
+ xpr = "This classmethod must be defined in the concrete class Foo"
103
+ with pytest.raises(AbstractMethodError, match=xpr):
104
+ Foo.classmethod()
105
+
106
+ xpr = "This property must be defined in the concrete class Foo"
107
+ with pytest.raises(AbstractMethodError, match=xpr):
108
+ Foo().property
109
+
110
+ xpr = "This method must be defined in the concrete class Foo"
111
+ with pytest.raises(AbstractMethodError, match=xpr):
112
+ Foo().method()
videochat2/lib/python3.10/site-packages/pandas/tests/test_expressions.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ import re
3
+ import warnings
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from pandas import option_context
9
+ import pandas._testing as tm
10
+ from pandas.core.api import (
11
+ DataFrame,
12
+ Index,
13
+ Series,
14
+ )
15
+ from pandas.core.computation import expressions as expr
16
+
17
+
18
+ @pytest.fixture
19
+ def _frame():
20
+ return DataFrame(np.random.randn(10001, 4), columns=list("ABCD"), dtype="float64")
21
+
22
+
23
+ @pytest.fixture
24
+ def _frame2():
25
+ return DataFrame(np.random.randn(100, 4), columns=list("ABCD"), dtype="float64")
26
+
27
+
28
+ @pytest.fixture
29
+ def _mixed(_frame):
30
+ return DataFrame(
31
+ {
32
+ "A": _frame["A"].copy(),
33
+ "B": _frame["B"].astype("float32"),
34
+ "C": _frame["C"].astype("int64"),
35
+ "D": _frame["D"].astype("int32"),
36
+ }
37
+ )
38
+
39
+
40
+ @pytest.fixture
41
+ def _mixed2(_frame2):
42
+ return DataFrame(
43
+ {
44
+ "A": _frame2["A"].copy(),
45
+ "B": _frame2["B"].astype("float32"),
46
+ "C": _frame2["C"].astype("int64"),
47
+ "D": _frame2["D"].astype("int32"),
48
+ }
49
+ )
50
+
51
+
52
+ @pytest.fixture
53
+ def _integer():
54
+ return DataFrame(
55
+ np.random.randint(1, 100, size=(10001, 4)), columns=list("ABCD"), dtype="int64"
56
+ )
57
+
58
+
59
+ @pytest.fixture
60
+ def _integer_randint(_integer):
61
+ # randint to get a case with zeros
62
+ return _integer * np.random.randint(0, 2, size=np.shape(_integer))
63
+
64
+
65
+ @pytest.fixture
66
+ def _integer2():
67
+ return DataFrame(
68
+ np.random.randint(1, 100, size=(101, 4)), columns=list("ABCD"), dtype="int64"
69
+ )
70
+
71
+
72
+ @pytest.fixture
73
+ def _array(_frame):
74
+ return _frame["A"].values.copy()
75
+
76
+
77
+ @pytest.fixture
78
+ def _array2(_frame2):
79
+ return _frame2["A"].values.copy()
80
+
81
+
82
+ @pytest.fixture
83
+ def _array_mixed(_mixed):
84
+ return _mixed["D"].values.copy()
85
+
86
+
87
+ @pytest.fixture
88
+ def _array_mixed2(_mixed2):
89
+ return _mixed2["D"].values.copy()
90
+
91
+
92
+ @pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr")
93
+ class TestExpressions:
94
+ @pytest.fixture(autouse=True)
95
+ def save_min_elements(self):
96
+ min_elements = expr._MIN_ELEMENTS
97
+ yield
98
+ expr._MIN_ELEMENTS = min_elements
99
+
100
+ @staticmethod
101
+ def call_op(df, other, flex: bool, opname: str):
102
+ if flex:
103
+ op = lambda x, y: getattr(x, opname)(y)
104
+ op.__name__ = opname
105
+ else:
106
+ op = getattr(operator, opname)
107
+
108
+ with option_context("compute.use_numexpr", False):
109
+ expected = op(df, other)
110
+
111
+ expr.get_test_result()
112
+
113
+ result = op(df, other)
114
+ return result, expected
115
+
116
+ @pytest.mark.parametrize(
117
+ "fixture",
118
+ [
119
+ "_integer",
120
+ "_integer2",
121
+ "_integer_randint",
122
+ "_frame",
123
+ "_frame2",
124
+ "_mixed",
125
+ "_mixed2",
126
+ ],
127
+ )
128
+ @pytest.mark.parametrize("flex", [True, False])
129
+ @pytest.mark.parametrize(
130
+ "arith", ["add", "sub", "mul", "mod", "truediv", "floordiv"]
131
+ )
132
+ def test_run_arithmetic(self, request, fixture, flex, arith):
133
+ df = request.getfixturevalue(fixture)
134
+ expr._MIN_ELEMENTS = 0
135
+ result, expected = self.call_op(df, df, flex, arith)
136
+
137
+ if arith == "truediv":
138
+ assert all(x.kind == "f" for x in expected.dtypes.values)
139
+ tm.assert_equal(expected, result)
140
+
141
+ for i in range(len(df.columns)):
142
+ result, expected = self.call_op(df.iloc[:, i], df.iloc[:, i], flex, arith)
143
+ if arith == "truediv":
144
+ assert expected.dtype.kind == "f"
145
+ tm.assert_equal(expected, result)
146
+
147
+ @pytest.mark.parametrize(
148
+ "fixture",
149
+ [
150
+ "_integer",
151
+ "_integer2",
152
+ "_integer_randint",
153
+ "_frame",
154
+ "_frame2",
155
+ "_mixed",
156
+ "_mixed2",
157
+ ],
158
+ )
159
+ @pytest.mark.parametrize("flex", [True, False])
160
+ def test_run_binary(self, request, fixture, flex, comparison_op):
161
+ """
162
+ tests solely that the result is the same whether or not numexpr is
163
+ enabled. Need to test whether the function does the correct thing
164
+ elsewhere.
165
+ """
166
+ df = request.getfixturevalue(fixture)
167
+ arith = comparison_op.__name__
168
+ with option_context("compute.use_numexpr", False):
169
+ other = df.copy() + 1
170
+
171
+ expr._MIN_ELEMENTS = 0
172
+ expr.set_test_mode(True)
173
+
174
+ result, expected = self.call_op(df, other, flex, arith)
175
+
176
+ used_numexpr = expr.get_test_result()
177
+ assert used_numexpr, "Did not use numexpr as expected."
178
+ tm.assert_equal(expected, result)
179
+
180
+ # FIXME: dont leave commented-out
181
+ # series doesn't uses vec_compare instead of numexpr...
182
+ # for i in range(len(df.columns)):
183
+ # binary_comp = other.iloc[:, i] + 1
184
+ # self.run_binary(df.iloc[:, i], binary_comp, flex)
185
+
186
+ def test_invalid(self):
187
+ array = np.random.randn(1_000_001)
188
+ array2 = np.random.randn(100)
189
+
190
+ # no op
191
+ result = expr._can_use_numexpr(operator.add, None, array, array, "evaluate")
192
+ assert not result
193
+
194
+ # min elements
195
+ result = expr._can_use_numexpr(operator.add, "+", array2, array2, "evaluate")
196
+ assert not result
197
+
198
+ # ok, we only check on first part of expression
199
+ result = expr._can_use_numexpr(operator.add, "+", array, array2, "evaluate")
200
+ assert result
201
+
202
+ @pytest.mark.filterwarnings(
203
+ "ignore:invalid value encountered in true_divide:RuntimeWarning"
204
+ )
205
+ @pytest.mark.parametrize(
206
+ "opname,op_str",
207
+ [("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")],
208
+ )
209
+ @pytest.mark.parametrize(
210
+ "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")]
211
+ )
212
+ def test_binary_ops(self, request, opname, op_str, left_fix, right_fix):
213
+ left = request.getfixturevalue(left_fix)
214
+ right = request.getfixturevalue(right_fix)
215
+
216
+ def testit():
217
+ if opname == "pow":
218
+ # TODO: get this working
219
+ return
220
+
221
+ op = getattr(operator, opname)
222
+
223
+ with warnings.catch_warnings():
224
+ # array has 0s
225
+ msg = "invalid value encountered in divide|true_divide"
226
+ warnings.filterwarnings("ignore", msg, RuntimeWarning)
227
+ result = expr.evaluate(op, left, left, use_numexpr=True)
228
+ expected = expr.evaluate(op, left, left, use_numexpr=False)
229
+ tm.assert_numpy_array_equal(result, expected)
230
+
231
+ result = expr._can_use_numexpr(op, op_str, right, right, "evaluate")
232
+ assert not result
233
+
234
+ with option_context("compute.use_numexpr", False):
235
+ testit()
236
+
237
+ expr.set_numexpr_threads(1)
238
+ testit()
239
+ expr.set_numexpr_threads()
240
+ testit()
241
+
242
+ @pytest.mark.parametrize(
243
+ "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")]
244
+ )
245
+ def test_comparison_ops(self, request, comparison_op, left_fix, right_fix):
246
+ left = request.getfixturevalue(left_fix)
247
+ right = request.getfixturevalue(right_fix)
248
+
249
+ def testit():
250
+ f12 = left + 1
251
+ f22 = right + 1
252
+
253
+ op = comparison_op
254
+
255
+ result = expr.evaluate(op, left, f12, use_numexpr=True)
256
+ expected = expr.evaluate(op, left, f12, use_numexpr=False)
257
+ tm.assert_numpy_array_equal(result, expected)
258
+
259
+ result = expr._can_use_numexpr(op, op, right, f22, "evaluate")
260
+ assert not result
261
+
262
+ with option_context("compute.use_numexpr", False):
263
+ testit()
264
+
265
+ expr.set_numexpr_threads(1)
266
+ testit()
267
+ expr.set_numexpr_threads()
268
+ testit()
269
+
270
+ @pytest.mark.parametrize("cond", [True, False])
271
+ @pytest.mark.parametrize("fixture", ["_frame", "_frame2", "_mixed", "_mixed2"])
272
+ def test_where(self, request, cond, fixture):
273
+ df = request.getfixturevalue(fixture)
274
+
275
+ def testit():
276
+ c = np.empty(df.shape, dtype=np.bool_)
277
+ c.fill(cond)
278
+ result = expr.where(c, df.values, df.values + 1)
279
+ expected = np.where(c, df.values, df.values + 1)
280
+ tm.assert_numpy_array_equal(result, expected)
281
+
282
+ with option_context("compute.use_numexpr", False):
283
+ testit()
284
+
285
+ expr.set_numexpr_threads(1)
286
+ testit()
287
+ expr.set_numexpr_threads()
288
+ testit()
289
+
290
+ @pytest.mark.parametrize(
291
+ "op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")]
292
+ )
293
+ def test_bool_ops_raise_on_arithmetic(self, op_str, opname):
294
+ df = DataFrame({"a": np.random.rand(10) > 0.5, "b": np.random.rand(10) > 0.5})
295
+
296
+ msg = f"operator '{opname}' not implemented for bool dtypes"
297
+ f = getattr(operator, opname)
298
+ err_msg = re.escape(msg)
299
+
300
+ with pytest.raises(NotImplementedError, match=err_msg):
301
+ f(df, df)
302
+
303
+ with pytest.raises(NotImplementedError, match=err_msg):
304
+ f(df.a, df.b)
305
+
306
+ with pytest.raises(NotImplementedError, match=err_msg):
307
+ f(df.a, True)
308
+
309
+ with pytest.raises(NotImplementedError, match=err_msg):
310
+ f(False, df.a)
311
+
312
+ with pytest.raises(NotImplementedError, match=err_msg):
313
+ f(False, df)
314
+
315
+ with pytest.raises(NotImplementedError, match=err_msg):
316
+ f(df, True)
317
+
318
+ @pytest.mark.parametrize(
319
+ "op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")]
320
+ )
321
+ def test_bool_ops_warn_on_arithmetic(self, op_str, opname):
322
+ n = 10
323
+ df = DataFrame({"a": np.random.rand(n) > 0.5, "b": np.random.rand(n) > 0.5})
324
+
325
+ subs = {"+": "|", "*": "&", "-": "^"}
326
+ sub_funcs = {"|": "or_", "&": "and_", "^": "xor"}
327
+
328
+ f = getattr(operator, opname)
329
+ fe = getattr(operator, sub_funcs[subs[op_str]])
330
+
331
+ if op_str == "-":
332
+ # raises TypeError
333
+ return
334
+
335
+ with tm.use_numexpr(True, min_elements=5):
336
+ with tm.assert_produces_warning():
337
+ r = f(df, df)
338
+ e = fe(df, df)
339
+ tm.assert_frame_equal(r, e)
340
+
341
+ with tm.assert_produces_warning():
342
+ r = f(df.a, df.b)
343
+ e = fe(df.a, df.b)
344
+ tm.assert_series_equal(r, e)
345
+
346
+ with tm.assert_produces_warning():
347
+ r = f(df.a, True)
348
+ e = fe(df.a, True)
349
+ tm.assert_series_equal(r, e)
350
+
351
+ with tm.assert_produces_warning():
352
+ r = f(False, df.a)
353
+ e = fe(False, df.a)
354
+ tm.assert_series_equal(r, e)
355
+
356
+ with tm.assert_produces_warning():
357
+ r = f(False, df)
358
+ e = fe(False, df)
359
+ tm.assert_frame_equal(r, e)
360
+
361
+ with tm.assert_produces_warning():
362
+ r = f(df, True)
363
+ e = fe(df, True)
364
+ tm.assert_frame_equal(r, e)
365
+
366
+ @pytest.mark.parametrize(
367
+ "test_input,expected",
368
+ [
369
+ (
370
+ DataFrame(
371
+ [[0, 1, 2, "aa"], [0, 1, 2, "aa"]], columns=["a", "b", "c", "dtype"]
372
+ ),
373
+ DataFrame([[False, False], [False, False]], columns=["a", "dtype"]),
374
+ ),
375
+ (
376
+ DataFrame(
377
+ [[0, 3, 2, "aa"], [0, 4, 2, "aa"], [0, 1, 1, "bb"]],
378
+ columns=["a", "b", "c", "dtype"],
379
+ ),
380
+ DataFrame(
381
+ [[False, False], [False, False], [False, False]],
382
+ columns=["a", "dtype"],
383
+ ),
384
+ ),
385
+ ],
386
+ )
387
+ def test_bool_ops_column_name_dtype(self, test_input, expected):
388
+ # GH 22383 - .ne fails if columns containing column name 'dtype'
389
+ result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]])
390
+ tm.assert_frame_equal(result, expected)
391
+
392
+ @pytest.mark.parametrize(
393
+ "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv")
394
+ )
395
+ @pytest.mark.parametrize("axis", (0, 1))
396
+ def test_frame_series_axis(self, axis, arith, _frame):
397
+ # GH#26736 Dataframe.floordiv(Series, axis=1) fails
398
+
399
+ df = _frame
400
+ if axis == 1:
401
+ other = df.iloc[0, :]
402
+ else:
403
+ other = df.iloc[:, 0]
404
+
405
+ expr._MIN_ELEMENTS = 0
406
+
407
+ op_func = getattr(df, arith)
408
+
409
+ with option_context("compute.use_numexpr", False):
410
+ expected = op_func(other, axis=axis)
411
+
412
+ result = op_func(other, axis=axis)
413
+ tm.assert_frame_equal(expected, result)
414
+
415
+ @pytest.mark.parametrize(
416
+ "op",
417
+ [
418
+ "__mod__",
419
+ "__rmod__",
420
+ "__floordiv__",
421
+ "__rfloordiv__",
422
+ ],
423
+ )
424
+ @pytest.mark.parametrize("box", [DataFrame, Series, Index])
425
+ @pytest.mark.parametrize("scalar", [-5, 5])
426
+ def test_python_semantics_with_numexpr_installed(self, op, box, scalar):
427
+ # https://github.com/pandas-dev/pandas/issues/36047
428
+ expr._MIN_ELEMENTS = 0
429
+ data = np.arange(-50, 50)
430
+ obj = box(data)
431
+ method = getattr(obj, op)
432
+ result = method(scalar)
433
+
434
+ # compare result with numpy
435
+ with option_context("compute.use_numexpr", False):
436
+ expected = method(scalar)
437
+
438
+ tm.assert_equal(result, expected)
439
+
440
+ # compare result element-wise with Python
441
+ for i, elem in enumerate(data):
442
+ if box == DataFrame:
443
+ scalar_result = result.iloc[i, 0]
444
+ else:
445
+ scalar_result = result[i]
446
+ try:
447
+ expected = getattr(int(elem), op)(scalar)
448
+ except ZeroDivisionError:
449
+ pass
450
+ else:
451
+ assert scalar_result == expected
videochat2/lib/python3.10/site-packages/pandas/tests/test_flags.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import pandas as pd
4
+
5
+
6
+ class TestFlags:
7
+ def test_equality(self):
8
+ a = pd.DataFrame().set_flags(allows_duplicate_labels=True).flags
9
+ b = pd.DataFrame().set_flags(allows_duplicate_labels=False).flags
10
+
11
+ assert a == a
12
+ assert b == b
13
+ assert a != b
14
+ assert a != 2
15
+
16
+ def test_set(self):
17
+ df = pd.DataFrame().set_flags(allows_duplicate_labels=True)
18
+ a = df.flags
19
+ a.allows_duplicate_labels = False
20
+ assert a.allows_duplicate_labels is False
21
+ a["allows_duplicate_labels"] = True
22
+ assert a.allows_duplicate_labels is True
23
+
24
+ def test_repr(self):
25
+ a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=True).flags)
26
+ assert a == "<Flags(allows_duplicate_labels=True)>"
27
+ a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=False).flags)
28
+ assert a == "<Flags(allows_duplicate_labels=False)>"
29
+
30
+ def test_obj_ref(self):
31
+ df = pd.DataFrame()
32
+ flags = df.flags
33
+ del df
34
+ with pytest.raises(ValueError, match="object has been deleted"):
35
+ flags.allows_duplicate_labels = True
36
+
37
+ def test_getitem(self):
38
+ df = pd.DataFrame()
39
+ flags = df.flags
40
+ assert flags["allows_duplicate_labels"] is True
41
+ flags["allows_duplicate_labels"] = False
42
+ assert flags["allows_duplicate_labels"] is False
43
+
44
+ with pytest.raises(KeyError, match="a"):
45
+ flags["a"]
46
+
47
+ with pytest.raises(ValueError, match="a"):
48
+ flags["a"] = 10
videochat2/lib/python3.10/site-packages/pandas/tests/test_multilevel.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ MultiIndex,
8
+ Series,
9
+ )
10
+ import pandas._testing as tm
11
+
12
+
13
+ class TestMultiLevel:
14
+ def test_reindex_level(self, multiindex_year_month_day_dataframe_random_data):
15
+ # axis=0
16
+ ymd = multiindex_year_month_day_dataframe_random_data
17
+
18
+ month_sums = ymd.groupby("month").sum()
19
+ result = month_sums.reindex(ymd.index, level=1)
20
+ expected = ymd.groupby(level="month").transform(np.sum)
21
+
22
+ tm.assert_frame_equal(result, expected)
23
+
24
+ # Series
25
+ result = month_sums["A"].reindex(ymd.index, level=1)
26
+ expected = ymd["A"].groupby(level="month").transform(np.sum)
27
+ tm.assert_series_equal(result, expected, check_names=False)
28
+
29
+ # axis=1
30
+ month_sums = ymd.T.groupby("month", axis=1).sum()
31
+ result = month_sums.reindex(columns=ymd.index, level=1)
32
+ expected = ymd.groupby(level="month").transform(np.sum).T
33
+ tm.assert_frame_equal(result, expected)
34
+
35
+ def test_reindex(self, multiindex_dataframe_random_data):
36
+ frame = multiindex_dataframe_random_data
37
+
38
+ expected = frame.iloc[[0, 3]]
39
+ reindexed = frame.loc[[("foo", "one"), ("bar", "one")]]
40
+ tm.assert_frame_equal(reindexed, expected)
41
+
42
+ def test_reindex_preserve_levels(
43
+ self, multiindex_year_month_day_dataframe_random_data
44
+ ):
45
+ ymd = multiindex_year_month_day_dataframe_random_data
46
+
47
+ new_index = ymd.index[::10]
48
+ chunk = ymd.reindex(new_index)
49
+ assert chunk.index is new_index
50
+
51
+ chunk = ymd.loc[new_index]
52
+ assert chunk.index.equals(new_index)
53
+
54
+ ymdT = ymd.T
55
+ chunk = ymdT.reindex(columns=new_index)
56
+ assert chunk.columns is new_index
57
+
58
+ chunk = ymdT.loc[:, new_index]
59
+ assert chunk.columns.equals(new_index)
60
+
61
+ def test_groupby_transform(self, multiindex_dataframe_random_data):
62
+ frame = multiindex_dataframe_random_data
63
+
64
+ s = frame["A"]
65
+ grouper = s.index.get_level_values(0)
66
+
67
+ grouped = s.groupby(grouper, group_keys=False)
68
+
69
+ applied = grouped.apply(lambda x: x * 2)
70
+ expected = grouped.transform(lambda x: x * 2)
71
+ result = applied.reindex(expected.index)
72
+ tm.assert_series_equal(result, expected, check_names=False)
73
+
74
+ def test_groupby_corner(self):
75
+ midx = MultiIndex(
76
+ levels=[["foo"], ["bar"], ["baz"]],
77
+ codes=[[0], [0], [0]],
78
+ names=["one", "two", "three"],
79
+ )
80
+ df = DataFrame([np.random.rand(4)], columns=["a", "b", "c", "d"], index=midx)
81
+ # should work
82
+ df.groupby(level="three")
83
+
84
+ def test_groupby_level_no_obs(self):
85
+ # #1697
86
+ midx = MultiIndex.from_tuples(
87
+ [
88
+ ("f1", "s1"),
89
+ ("f1", "s2"),
90
+ ("f2", "s1"),
91
+ ("f2", "s2"),
92
+ ("f3", "s1"),
93
+ ("f3", "s2"),
94
+ ]
95
+ )
96
+ df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
97
+ df1 = df.loc(axis=1)[df.columns.map(lambda u: u[0] in ["f2", "f3"])]
98
+
99
+ grouped = df1.groupby(axis=1, level=0)
100
+ result = grouped.sum()
101
+ assert (result.columns == ["f2", "f3"]).all()
102
+
103
+ def test_setitem_with_expansion_multiindex_columns(
104
+ self, multiindex_year_month_day_dataframe_random_data
105
+ ):
106
+ ymd = multiindex_year_month_day_dataframe_random_data
107
+
108
+ df = ymd[:5].T
109
+ df[2000, 1, 10] = df[2000, 1, 7]
110
+ assert isinstance(df.columns, MultiIndex)
111
+ assert (df[2000, 1, 10] == df[2000, 1, 7]).all()
112
+
113
+ def test_alignment(self):
114
+ x = Series(
115
+ data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)])
116
+ )
117
+
118
+ y = Series(
119
+ data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)])
120
+ )
121
+
122
+ res = x - y
123
+ exp_index = x.index.union(y.index)
124
+ exp = x.reindex(exp_index) - y.reindex(exp_index)
125
+ tm.assert_series_equal(res, exp)
126
+
127
+ # hit non-monotonic code path
128
+ res = x[::-1] - y[::-1]
129
+ exp_index = x.index.union(y.index)
130
+ exp = x.reindex(exp_index) - y.reindex(exp_index)
131
+ tm.assert_series_equal(res, exp)
132
+
133
+ def test_groupby_multilevel(self, multiindex_year_month_day_dataframe_random_data):
134
+ ymd = multiindex_year_month_day_dataframe_random_data
135
+
136
+ result = ymd.groupby(level=[0, 1]).mean()
137
+
138
+ k1 = ymd.index.get_level_values(0)
139
+ k2 = ymd.index.get_level_values(1)
140
+
141
+ expected = ymd.groupby([k1, k2]).mean()
142
+
143
+ # TODO groupby with level_values drops names
144
+ tm.assert_frame_equal(result, expected, check_names=False)
145
+ assert result.index.names == ymd.index.names[:2]
146
+
147
+ result2 = ymd.groupby(level=ymd.index.names[:2]).mean()
148
+ tm.assert_frame_equal(result, result2)
149
+
150
+ def test_multilevel_consolidate(self):
151
+ index = MultiIndex.from_tuples(
152
+ [("foo", "one"), ("foo", "two"), ("bar", "one"), ("bar", "two")]
153
+ )
154
+ df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
155
+ df["Totals", ""] = df.sum(1)
156
+ df = df._consolidate()
157
+
158
+ def test_level_with_tuples(self):
159
+ index = MultiIndex(
160
+ levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]],
161
+ codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
162
+ )
163
+
164
+ series = Series(np.random.randn(6), index=index)
165
+ frame = DataFrame(np.random.randn(6, 4), index=index)
166
+
167
+ result = series[("foo", "bar", 0)]
168
+ result2 = series.loc[("foo", "bar", 0)]
169
+ expected = series[:2]
170
+ expected.index = expected.index.droplevel(0)
171
+ tm.assert_series_equal(result, expected)
172
+ tm.assert_series_equal(result2, expected)
173
+
174
+ with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"):
175
+ series[("foo", "bar", 0), 2]
176
+
177
+ result = frame.loc[("foo", "bar", 0)]
178
+ result2 = frame.xs(("foo", "bar", 0))
179
+ expected = frame[:2]
180
+ expected.index = expected.index.droplevel(0)
181
+ tm.assert_frame_equal(result, expected)
182
+ tm.assert_frame_equal(result2, expected)
183
+
184
+ index = MultiIndex(
185
+ levels=[[("foo", "bar"), ("foo", "baz"), ("foo", "qux")], [0, 1]],
186
+ codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
187
+ )
188
+
189
+ series = Series(np.random.randn(6), index=index)
190
+ frame = DataFrame(np.random.randn(6, 4), index=index)
191
+
192
+ result = series[("foo", "bar")]
193
+ result2 = series.loc[("foo", "bar")]
194
+ expected = series[:2]
195
+ expected.index = expected.index.droplevel(0)
196
+ tm.assert_series_equal(result, expected)
197
+ tm.assert_series_equal(result2, expected)
198
+
199
+ result = frame.loc[("foo", "bar")]
200
+ result2 = frame.xs(("foo", "bar"))
201
+ expected = frame[:2]
202
+ expected.index = expected.index.droplevel(0)
203
+ tm.assert_frame_equal(result, expected)
204
+ tm.assert_frame_equal(result2, expected)
205
+
206
+ def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data):
207
+ frame = multiindex_dataframe_random_data
208
+
209
+ result = frame.reindex(["foo", "qux"], level=0)
210
+ expected = frame.iloc[[0, 1, 2, 7, 8, 9]]
211
+ tm.assert_frame_equal(result, expected)
212
+
213
+ result = frame.T.reindex(["foo", "qux"], axis=1, level=0)
214
+ tm.assert_frame_equal(result, expected.T)
215
+
216
+ result = frame.loc[["foo", "qux"]]
217
+ tm.assert_frame_equal(result, expected)
218
+
219
+ result = frame["A"].loc[["foo", "qux"]]
220
+ tm.assert_series_equal(result, expected["A"])
221
+
222
+ result = frame.T.loc[:, ["foo", "qux"]]
223
+ tm.assert_frame_equal(result, expected.T)
224
+
225
+ @pytest.mark.parametrize("d", [4, "d"])
226
+ def test_empty_frame_groupby_dtypes_consistency(self, d):
227
+ # GH 20888
228
+ group_keys = ["a", "b", "c"]
229
+ df = DataFrame({"a": [1], "b": [2], "c": [3], "d": [d]})
230
+
231
+ g = df[df.a == 2].groupby(group_keys)
232
+ result = g.first().index
233
+ expected = MultiIndex(
234
+ levels=[[1], [2], [3]], codes=[[], [], []], names=["a", "b", "c"]
235
+ )
236
+
237
+ tm.assert_index_equal(result, expected)
238
+
239
+ def test_duplicate_groupby_issues(self):
240
+ idx_tp = [
241
+ ("600809", "20061231"),
242
+ ("600809", "20070331"),
243
+ ("600809", "20070630"),
244
+ ("600809", "20070331"),
245
+ ]
246
+ dt = ["demo", "demo", "demo", "demo"]
247
+
248
+ idx = MultiIndex.from_tuples(idx_tp, names=["STK_ID", "RPT_Date"])
249
+ s = Series(dt, index=idx)
250
+
251
+ result = s.groupby(s.index).first()
252
+ assert len(result) == 3
253
+
254
+ def test_subsets_multiindex_dtype(self):
255
+ # GH 20757
256
+ data = [["x", 1]]
257
+ columns = [("a", "b", np.nan), ("a", "c", 0.0)]
258
+ df = DataFrame(data, columns=MultiIndex.from_tuples(columns))
259
+ expected = df.dtypes.a.b
260
+ result = df.a.b.dtypes
261
+ tm.assert_series_equal(result, expected)
262
+
263
+
264
+ class TestSorted:
265
+ """everything you wanted to test about sorting"""
266
+
267
+ def test_sort_non_lexsorted(self):
268
+ # degenerate case where we sort but don't
269
+ # have a satisfying result :<
270
+ # GH 15797
271
+ idx = MultiIndex(
272
+ [["A", "B", "C"], ["c", "b", "a"]], [[0, 1, 2, 0, 1, 2], [0, 2, 1, 1, 0, 2]]
273
+ )
274
+
275
+ df = DataFrame({"col": range(len(idx))}, index=idx, dtype="int64")
276
+ assert df.index.is_monotonic_increasing is False
277
+
278
+ sorted = df.sort_index()
279
+ assert sorted.index.is_monotonic_increasing is True
280
+
281
+ expected = DataFrame(
282
+ {"col": [1, 4, 5, 2]},
283
+ index=MultiIndex.from_tuples(
284
+ [("B", "a"), ("B", "c"), ("C", "a"), ("C", "b")]
285
+ ),
286
+ dtype="int64",
287
+ )
288
+ result = sorted.loc[pd.IndexSlice["B":"C", "a":"c"], :]
289
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import types
3
+
4
+ import pytest
5
+
6
+ from pandas.compat._optional import (
7
+ VERSIONS,
8
+ import_optional_dependency,
9
+ )
10
+
11
+ import pandas._testing as tm
12
+
13
+
14
+ def test_import_optional():
15
+ match = "Missing .*notapackage.* pip .* conda .* notapackage"
16
+ with pytest.raises(ImportError, match=match) as exc_info:
17
+ import_optional_dependency("notapackage")
18
+ # The original exception should be there as context:
19
+ assert isinstance(exc_info.value.__context__, ImportError)
20
+
21
+ result = import_optional_dependency("notapackage", errors="ignore")
22
+ assert result is None
23
+
24
+
25
+ def test_xlrd_version_fallback():
26
+ pytest.importorskip("xlrd")
27
+ import_optional_dependency("xlrd")
28
+
29
+
30
+ def test_bad_version(monkeypatch):
31
+ name = "fakemodule"
32
+ module = types.ModuleType(name)
33
+ module.__version__ = "0.9.0"
34
+ sys.modules[name] = module
35
+ monkeypatch.setitem(VERSIONS, name, "1.0.0")
36
+
37
+ match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'"
38
+ with pytest.raises(ImportError, match=match):
39
+ import_optional_dependency("fakemodule")
40
+
41
+ # Test min_version parameter
42
+ result = import_optional_dependency("fakemodule", min_version="0.8")
43
+ assert result is module
44
+
45
+ with tm.assert_produces_warning(UserWarning):
46
+ result = import_optional_dependency("fakemodule", errors="warn")
47
+ assert result is None
48
+
49
+ module.__version__ = "1.0.0" # exact match is OK
50
+ result = import_optional_dependency("fakemodule")
51
+ assert result is module
52
+
53
+
54
+ def test_submodule(monkeypatch):
55
+ # Create a fake module with a submodule
56
+ name = "fakemodule"
57
+ module = types.ModuleType(name)
58
+ module.__version__ = "0.9.0"
59
+ sys.modules[name] = module
60
+ sub_name = "submodule"
61
+ submodule = types.ModuleType(sub_name)
62
+ setattr(module, sub_name, submodule)
63
+ sys.modules[f"{name}.{sub_name}"] = submodule
64
+ monkeypatch.setitem(VERSIONS, name, "1.0.0")
65
+
66
+ match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'"
67
+ with pytest.raises(ImportError, match=match):
68
+ import_optional_dependency("fakemodule.submodule")
69
+
70
+ with tm.assert_produces_warning(UserWarning):
71
+ result = import_optional_dependency("fakemodule.submodule", errors="warn")
72
+ assert result is None
73
+
74
+ module.__version__ = "1.0.0" # exact match is OK
75
+ result = import_optional_dependency("fakemodule.submodule")
76
+ assert result is submodule
77
+
78
+
79
+ def test_no_version_raises(monkeypatch):
80
+ name = "fakemodule"
81
+ module = types.ModuleType(name)
82
+ sys.modules[name] = module
83
+ monkeypatch.setitem(VERSIONS, name, "1.0.0")
84
+
85
+ with pytest.raises(ImportError, match="Can't determine .* fakemodule"):
86
+ import_optional_dependency(name)
videochat2/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from typing import Generator
3
+
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ import pandas._testing as tm
8
+ from pandas.core import accessor
9
+
10
+
11
+ def test_dirname_mixin():
12
+ # GH37173
13
+
14
+ class X(accessor.DirNamesMixin):
15
+ x = 1
16
+ y: int
17
+
18
+ def __init__(self) -> None:
19
+ self.z = 3
20
+
21
+ result = [attr_name for attr_name in dir(X()) if not attr_name.startswith("_")]
22
+
23
+ assert result == ["x", "z"]
24
+
25
+
26
+ @contextlib.contextmanager
27
+ def ensure_removed(obj, attr) -> Generator[None, None, None]:
28
+ """Ensure that an attribute added to 'obj' during the test is
29
+ removed when we're done
30
+ """
31
+ try:
32
+ yield
33
+ finally:
34
+ try:
35
+ delattr(obj, attr)
36
+ except AttributeError:
37
+ pass
38
+ obj._accessors.discard(attr)
39
+
40
+
41
+ class MyAccessor:
42
+ def __init__(self, obj) -> None:
43
+ self.obj = obj
44
+ self.item = "item"
45
+
46
+ @property
47
+ def prop(self):
48
+ return self.item
49
+
50
+ def method(self):
51
+ return self.item
52
+
53
+
54
+ @pytest.mark.parametrize(
55
+ "obj, registrar",
56
+ [
57
+ (pd.Series, pd.api.extensions.register_series_accessor),
58
+ (pd.DataFrame, pd.api.extensions.register_dataframe_accessor),
59
+ (pd.Index, pd.api.extensions.register_index_accessor),
60
+ ],
61
+ )
62
+ def test_register(obj, registrar):
63
+ with ensure_removed(obj, "mine"):
64
+ before = set(dir(obj))
65
+ registrar("mine")(MyAccessor)
66
+ o = obj([]) if obj is not pd.Series else obj([], dtype=object)
67
+ assert o.mine.prop == "item"
68
+ after = set(dir(obj))
69
+ assert (before ^ after) == {"mine"}
70
+ assert "mine" in obj._accessors
71
+
72
+
73
+ def test_accessor_works():
74
+ with ensure_removed(pd.Series, "mine"):
75
+ pd.api.extensions.register_series_accessor("mine")(MyAccessor)
76
+
77
+ s = pd.Series([1, 2])
78
+ assert s.mine.obj is s
79
+
80
+ assert s.mine.prop == "item"
81
+ assert s.mine.method() == "item"
82
+
83
+
84
+ def test_overwrite_warns():
85
+ # Need to restore mean
86
+ mean = pd.Series.mean
87
+ try:
88
+ with tm.assert_produces_warning(UserWarning) as w:
89
+ pd.api.extensions.register_series_accessor("mean")(MyAccessor)
90
+ s = pd.Series([1, 2])
91
+ assert s.mean.prop == "item"
92
+ msg = str(w[0].message)
93
+ assert "mean" in msg
94
+ assert "MyAccessor" in msg
95
+ assert "Series" in msg
96
+ finally:
97
+ pd.Series.mean = mean
98
+
99
+
100
+ def test_raises_attribute_error():
101
+ with ensure_removed(pd.Series, "bad"):
102
+
103
+ @pd.api.extensions.register_series_accessor("bad")
104
+ class Bad:
105
+ def __init__(self, data) -> None:
106
+ raise AttributeError("whoops")
107
+
108
+ with pytest.raises(AttributeError, match="whoops"):
109
+ pd.Series([], dtype=object).bad
videochat2/lib/python3.10/site-packages/pandas/tests/test_take.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import re
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas._libs import iNaT
8
+
9
+ import pandas._testing as tm
10
+ import pandas.core.algorithms as algos
11
+
12
+
13
+ @pytest.fixture(
14
+ params=[
15
+ (np.int8, np.int16(127), np.int8),
16
+ (np.int8, np.int16(128), np.int16),
17
+ (np.int32, 1, np.int32),
18
+ (np.int32, 2.0, np.float64),
19
+ (np.int32, 3.0 + 4.0j, np.complex128),
20
+ (np.int32, True, np.object_),
21
+ (np.int32, "", np.object_),
22
+ (np.float64, 1, np.float64),
23
+ (np.float64, 2.0, np.float64),
24
+ (np.float64, 3.0 + 4.0j, np.complex128),
25
+ (np.float64, True, np.object_),
26
+ (np.float64, "", np.object_),
27
+ (np.complex128, 1, np.complex128),
28
+ (np.complex128, 2.0, np.complex128),
29
+ (np.complex128, 3.0 + 4.0j, np.complex128),
30
+ (np.complex128, True, np.object_),
31
+ (np.complex128, "", np.object_),
32
+ (np.bool_, 1, np.object_),
33
+ (np.bool_, 2.0, np.object_),
34
+ (np.bool_, 3.0 + 4.0j, np.object_),
35
+ (np.bool_, True, np.bool_),
36
+ (np.bool_, "", np.object_),
37
+ ]
38
+ )
39
+ def dtype_fill_out_dtype(request):
40
+ return request.param
41
+
42
+
43
+ class TestTake:
44
+ # Standard incompatible fill error.
45
+ fill_error = re.compile("Incompatible type for fill_value")
46
+
47
+ def test_1d_fill_nonna(self, dtype_fill_out_dtype):
48
+ dtype, fill_value, out_dtype = dtype_fill_out_dtype
49
+ data = np.random.randint(0, 2, 4).astype(dtype)
50
+ indexer = [2, 1, 0, -1]
51
+
52
+ result = algos.take_nd(data, indexer, fill_value=fill_value)
53
+ assert (result[[0, 1, 2]] == data[[2, 1, 0]]).all()
54
+ assert result[3] == fill_value
55
+ assert result.dtype == out_dtype
56
+
57
+ indexer = [2, 1, 0, 1]
58
+
59
+ result = algos.take_nd(data, indexer, fill_value=fill_value)
60
+ assert (result[[0, 1, 2, 3]] == data[indexer]).all()
61
+ assert result.dtype == dtype
62
+
63
+ def test_2d_fill_nonna(self, dtype_fill_out_dtype):
64
+ dtype, fill_value, out_dtype = dtype_fill_out_dtype
65
+ data = np.random.randint(0, 2, (5, 3)).astype(dtype)
66
+ indexer = [2, 1, 0, -1]
67
+
68
+ result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
69
+ assert (result[[0, 1, 2], :] == data[[2, 1, 0], :]).all()
70
+ assert (result[3, :] == fill_value).all()
71
+ assert result.dtype == out_dtype
72
+
73
+ result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
74
+ assert (result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all()
75
+ assert (result[:, 3] == fill_value).all()
76
+ assert result.dtype == out_dtype
77
+
78
+ indexer = [2, 1, 0, 1]
79
+ result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
80
+ assert (result[[0, 1, 2, 3], :] == data[indexer, :]).all()
81
+ assert result.dtype == dtype
82
+
83
+ result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
84
+ assert (result[:, [0, 1, 2, 3]] == data[:, indexer]).all()
85
+ assert result.dtype == dtype
86
+
87
+ def test_3d_fill_nonna(self, dtype_fill_out_dtype):
88
+ dtype, fill_value, out_dtype = dtype_fill_out_dtype
89
+
90
+ data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
91
+ indexer = [2, 1, 0, -1]
92
+
93
+ result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
94
+ assert (result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all()
95
+ assert (result[3, :, :] == fill_value).all()
96
+ assert result.dtype == out_dtype
97
+
98
+ result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
99
+ assert (result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all()
100
+ assert (result[:, 3, :] == fill_value).all()
101
+ assert result.dtype == out_dtype
102
+
103
+ result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
104
+ assert (result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all()
105
+ assert (result[:, :, 3] == fill_value).all()
106
+ assert result.dtype == out_dtype
107
+
108
+ indexer = [2, 1, 0, 1]
109
+ result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
110
+ assert (result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all()
111
+ assert result.dtype == dtype
112
+
113
+ result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
114
+ assert (result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all()
115
+ assert result.dtype == dtype
116
+
117
+ result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
118
+ assert (result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all()
119
+ assert result.dtype == dtype
120
+
121
+ def test_1d_other_dtypes(self):
122
+ arr = np.random.randn(10).astype(np.float32)
123
+
124
+ indexer = [1, 2, 3, -1]
125
+ result = algos.take_nd(arr, indexer)
126
+ expected = arr.take(indexer)
127
+ expected[-1] = np.nan
128
+ tm.assert_almost_equal(result, expected)
129
+
130
+ def test_2d_other_dtypes(self):
131
+ arr = np.random.randn(10, 5).astype(np.float32)
132
+
133
+ indexer = [1, 2, 3, -1]
134
+
135
+ # axis=0
136
+ result = algos.take_nd(arr, indexer, axis=0)
137
+ expected = arr.take(indexer, axis=0)
138
+ expected[-1] = np.nan
139
+ tm.assert_almost_equal(result, expected)
140
+
141
+ # axis=1
142
+ result = algos.take_nd(arr, indexer, axis=1)
143
+ expected = arr.take(indexer, axis=1)
144
+ expected[:, -1] = np.nan
145
+ tm.assert_almost_equal(result, expected)
146
+
147
+ def test_1d_bool(self):
148
+ arr = np.array([0, 1, 0], dtype=bool)
149
+
150
+ result = algos.take_nd(arr, [0, 2, 2, 1])
151
+ expected = arr.take([0, 2, 2, 1])
152
+ tm.assert_numpy_array_equal(result, expected)
153
+
154
+ result = algos.take_nd(arr, [0, 2, -1])
155
+ assert result.dtype == np.object_
156
+
157
+ def test_2d_bool(self):
158
+ arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool)
159
+
160
+ result = algos.take_nd(arr, [0, 2, 2, 1])
161
+ expected = arr.take([0, 2, 2, 1], axis=0)
162
+ tm.assert_numpy_array_equal(result, expected)
163
+
164
+ result = algos.take_nd(arr, [0, 2, 2, 1], axis=1)
165
+ expected = arr.take([0, 2, 2, 1], axis=1)
166
+ tm.assert_numpy_array_equal(result, expected)
167
+
168
+ result = algos.take_nd(arr, [0, 2, -1])
169
+ assert result.dtype == np.object_
170
+
171
+ def test_2d_float32(self):
172
+ arr = np.random.randn(4, 3).astype(np.float32)
173
+ indexer = [0, 2, -1, 1, -1]
174
+
175
+ # axis=0
176
+ result = algos.take_nd(arr, indexer, axis=0)
177
+
178
+ expected = arr.take(indexer, axis=0)
179
+ expected[[2, 4], :] = np.nan
180
+ tm.assert_almost_equal(result, expected)
181
+
182
+ # axis=1
183
+ result = algos.take_nd(arr, indexer, axis=1)
184
+ expected = arr.take(indexer, axis=1)
185
+ expected[:, [2, 4]] = np.nan
186
+ tm.assert_almost_equal(result, expected)
187
+
188
+ def test_2d_datetime64(self):
189
+ # 2005/01/01 - 2006/01/01
190
+ arr = np.random.randint(11_045_376, 11_360_736, (5, 3)) * 100_000_000_000
191
+ arr = arr.view(dtype="datetime64[ns]")
192
+ indexer = [0, 2, -1, 1, -1]
193
+
194
+ # axis=0
195
+ result = algos.take_nd(arr, indexer, axis=0)
196
+ expected = arr.take(indexer, axis=0)
197
+ expected.view(np.int64)[[2, 4], :] = iNaT
198
+ tm.assert_almost_equal(result, expected)
199
+
200
+ result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1))
201
+ expected = arr.take(indexer, axis=0)
202
+ expected[[2, 4], :] = datetime(2007, 1, 1)
203
+ tm.assert_almost_equal(result, expected)
204
+
205
+ # axis=1
206
+ result = algos.take_nd(arr, indexer, axis=1)
207
+ expected = arr.take(indexer, axis=1)
208
+ expected.view(np.int64)[:, [2, 4]] = iNaT
209
+ tm.assert_almost_equal(result, expected)
210
+
211
+ result = algos.take_nd(arr, indexer, axis=1, fill_value=datetime(2007, 1, 1))
212
+ expected = arr.take(indexer, axis=1)
213
+ expected[:, [2, 4]] = datetime(2007, 1, 1)
214
+ tm.assert_almost_equal(result, expected)
215
+
216
+ def test_take_axis_0(self):
217
+ arr = np.arange(12).reshape(4, 3)
218
+ result = algos.take(arr, [0, -1])
219
+ expected = np.array([[0, 1, 2], [9, 10, 11]])
220
+ tm.assert_numpy_array_equal(result, expected)
221
+
222
+ # allow_fill=True
223
+ result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0)
224
+ expected = np.array([[0, 1, 2], [0, 0, 0]])
225
+ tm.assert_numpy_array_equal(result, expected)
226
+
227
+ def test_take_axis_1(self):
228
+ arr = np.arange(12).reshape(4, 3)
229
+ result = algos.take(arr, [0, -1], axis=1)
230
+ expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]])
231
+ tm.assert_numpy_array_equal(result, expected)
232
+
233
+ # allow_fill=True
234
+ result = algos.take(arr, [0, -1], axis=1, allow_fill=True, fill_value=0)
235
+ expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]])
236
+ tm.assert_numpy_array_equal(result, expected)
237
+
238
+ # GH#26976 make sure we validate along the correct axis
239
+ with pytest.raises(IndexError, match="indices are out-of-bounds"):
240
+ algos.take(arr, [0, 3], axis=1, allow_fill=True, fill_value=0)
241
+
242
+ def test_take_non_hashable_fill_value(self):
243
+ arr = np.array([1, 2, 3])
244
+ indexer = np.array([1, -1])
245
+ with pytest.raises(ValueError, match="fill_value must be a scalar"):
246
+ algos.take(arr, indexer, allow_fill=True, fill_value=[1])
247
+
248
+ # with object dtype it is allowed
249
+ arr = np.array([1, 2, 3], dtype=object)
250
+ result = algos.take(arr, indexer, allow_fill=True, fill_value=[1])
251
+ expected = np.array([2, [1]], dtype=object)
252
+ tm.assert_numpy_array_equal(result, expected)
253
+
254
+
255
+ class TestExtensionTake:
256
+ # The take method found in pd.api.extensions
257
+
258
+ def test_bounds_check_large(self):
259
+ arr = np.array([1, 2])
260
+
261
+ msg = "indices are out-of-bounds"
262
+ with pytest.raises(IndexError, match=msg):
263
+ algos.take(arr, [2, 3], allow_fill=True)
264
+
265
+ msg = "index 2 is out of bounds for( axis 0 with)? size 2"
266
+ with pytest.raises(IndexError, match=msg):
267
+ algos.take(arr, [2, 3], allow_fill=False)
268
+
269
+ def test_bounds_check_small(self):
270
+ arr = np.array([1, 2, 3], dtype=np.int64)
271
+ indexer = [0, -1, -2]
272
+
273
+ msg = r"'indices' contains values less than allowed \(-2 < -1\)"
274
+ with pytest.raises(ValueError, match=msg):
275
+ algos.take(arr, indexer, allow_fill=True)
276
+
277
+ result = algos.take(arr, indexer)
278
+ expected = np.array([1, 3, 2], dtype=np.int64)
279
+ tm.assert_numpy_array_equal(result, expected)
280
+
281
+ @pytest.mark.parametrize("allow_fill", [True, False])
282
+ def test_take_empty(self, allow_fill):
283
+ arr = np.array([], dtype=np.int64)
284
+ # empty take is ok
285
+ result = algos.take(arr, [], allow_fill=allow_fill)
286
+ tm.assert_numpy_array_equal(arr, result)
287
+
288
+ msg = "|".join(
289
+ [
290
+ "cannot do a non-empty take from an empty axes.",
291
+ "indices are out-of-bounds",
292
+ ]
293
+ )
294
+ with pytest.raises(IndexError, match=msg):
295
+ algos.take(arr, [0], allow_fill=allow_fill)
296
+
297
+ def test_take_na_empty(self):
298
+ result = algos.take(np.array([]), [-1, -1], allow_fill=True, fill_value=0.0)
299
+ expected = np.array([0.0, 0.0])
300
+ tm.assert_numpy_array_equal(result, expected)
301
+
302
+ def test_take_coerces_list(self):
303
+ arr = [1, 2, 3]
304
+ result = algos.take(arr, [0, 0])
305
+ expected = np.array([1, 1])
306
+ tm.assert_numpy_array_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (175 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_api.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_array_to_datetime.cpython-310.pyc ADDED
Binary file (5.03 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_ccalendar.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_conversion.cpython-310.pyc ADDED
Binary file (4.68 kB). View file