ZTWHHH commited on
Commit
fa7e5ae
·
verified ·
1 Parent(s): 92ee5ac

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/masked_shared.cpython-310.pyc +0 -0
  3. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_array.cpython-310.pyc +0 -0
  4. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimelike.cpython-310.pyc +0 -0
  5. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimes.cpython-310.pyc +0 -0
  6. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_ndarray_backed.cpython-310.pyc +0 -0
  7. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_period.cpython-310.pyc +0 -0
  8. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_timedeltas.cpython-310.pyc +0 -0
  9. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__init__.py +0 -0
  10. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_constructors.py +284 -0
  11. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_reductions.py +183 -0
  12. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/__init__.cpython-310.pyc +0 -0
  13. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/test_arithmetic.cpython-310.pyc +0 -0
  14. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/test_arrow_compat.cpython-310.pyc +0 -0
  15. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/test_function.cpython-310.pyc +0 -0
  16. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/test_indexing.cpython-310.pyc +0 -0
  17. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/test_arithmetic.py +248 -0
  18. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__init__.py +0 -0
  19. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/__init__.cpython-310.pyc +0 -0
  20. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_indexing.cpython-310.pyc +0 -0
  21. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_numpy.cpython-310.pyc +0 -0
  22. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_indexing.py +41 -0
  23. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_numpy.py +324 -0
  24. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__init__.py +0 -0
  25. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_arrow_compat.cpython-310.pyc +0 -0
  27. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_astype.cpython-310.pyc +0 -0
  28. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_constructors.cpython-310.pyc +0 -0
  29. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_reductions.cpython-310.pyc +0 -0
  30. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/test_arrow_compat.py +130 -0
  31. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/test_astype.py +67 -0
  32. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/test_constructors.py +156 -0
  33. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/test_reductions.py +42 -0
  34. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__init__.py +0 -0
  35. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  36. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_combine_concat.cpython-310.pyc +0 -0
  37. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_libsparse.cpython-310.pyc +0 -0
  38. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_reductions.cpython-310.pyc +0 -0
  39. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_accessor.py +253 -0
  40. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py +514 -0
  41. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_array.py +480 -0
  42. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_astype.py +133 -0
  43. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py +62 -0
  44. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_constructors.py +285 -0
  45. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_dtype.py +224 -0
  46. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_indexing.py +302 -0
  47. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_libsparse.py +551 -0
  48. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_reductions.py +306 -0
  49. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_unary.py +79 -0
  50. llava_next/lib/python3.10/site-packages/pandas/tests/arrays/string_/__init__.py +0 -0
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (175 Bytes). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/masked_shared.cpython-310.pyc ADDED
Binary file (4.32 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_array.cpython-310.pyc ADDED
Binary file (9.31 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimelike.cpython-310.pyc ADDED
Binary file (35.5 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimes.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_ndarray_backed.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_period.cpython-310.pyc ADDED
Binary file (5.7 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_timedeltas.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_constructors.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs import iNaT
5
+
6
+ from pandas.core.dtypes.dtypes import DatetimeTZDtype
7
+
8
+ import pandas as pd
9
+ import pandas._testing as tm
10
+ from pandas.core.arrays import DatetimeArray
11
+
12
+
13
+ class TestDatetimeArrayConstructor:
14
+ def test_from_sequence_invalid_type(self):
15
+ mi = pd.MultiIndex.from_product([np.arange(5), np.arange(5)])
16
+ with pytest.raises(TypeError, match="Cannot create a DatetimeArray"):
17
+ DatetimeArray._from_sequence(mi, dtype="M8[ns]")
18
+
19
+ def test_only_1dim_accepted(self):
20
+ arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]")
21
+
22
+ depr_msg = "DatetimeArray.__init__ is deprecated"
23
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
24
+ with pytest.raises(ValueError, match="Only 1-dimensional"):
25
+ # 3-dim, we allow 2D to sneak in for ops purposes GH#29853
26
+ DatetimeArray(arr.reshape(2, 2, 1))
27
+
28
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
29
+ with pytest.raises(ValueError, match="Only 1-dimensional"):
30
+ # 0-dim
31
+ DatetimeArray(arr[[0]].squeeze())
32
+
33
+ def test_freq_validation(self):
34
+ # GH#24623 check that invalid instances cannot be created with the
35
+ # public constructor
36
+ arr = np.arange(5, dtype=np.int64) * 3600 * 10**9
37
+
38
+ msg = (
39
+ "Inferred frequency h from passed values does not "
40
+ "conform to passed frequency W-SUN"
41
+ )
42
+ depr_msg = "DatetimeArray.__init__ is deprecated"
43
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
44
+ with pytest.raises(ValueError, match=msg):
45
+ DatetimeArray(arr, freq="W")
46
+
47
+ @pytest.mark.parametrize(
48
+ "meth",
49
+ [
50
+ DatetimeArray._from_sequence,
51
+ pd.to_datetime,
52
+ pd.DatetimeIndex,
53
+ ],
54
+ )
55
+ def test_mixing_naive_tzaware_raises(self, meth):
56
+ # GH#24569
57
+ arr = np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")])
58
+
59
+ msg = (
60
+ "Cannot mix tz-aware with tz-naive values|"
61
+ "Tz-aware datetime.datetime cannot be converted "
62
+ "to datetime64 unless utc=True"
63
+ )
64
+
65
+ for obj in [arr, arr[::-1]]:
66
+ # check that we raise regardless of whether naive is found
67
+ # before aware or vice-versa
68
+ with pytest.raises(ValueError, match=msg):
69
+ meth(obj)
70
+
71
+ def test_from_pandas_array(self):
72
+ arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9
73
+
74
+ result = DatetimeArray._from_sequence(arr, dtype="M8[ns]")._with_freq("infer")
75
+
76
+ expected = pd.date_range("1970-01-01", periods=5, freq="h")._data
77
+ tm.assert_datetime_array_equal(result, expected)
78
+
79
+ def test_mismatched_timezone_raises(self):
80
+ depr_msg = "DatetimeArray.__init__ is deprecated"
81
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
82
+ arr = DatetimeArray(
83
+ np.array(["2000-01-01T06:00:00"], dtype="M8[ns]"),
84
+ dtype=DatetimeTZDtype(tz="US/Central"),
85
+ )
86
+ dtype = DatetimeTZDtype(tz="US/Eastern")
87
+ msg = r"dtype=datetime64\[ns.*\] does not match data dtype datetime64\[ns.*\]"
88
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
89
+ with pytest.raises(TypeError, match=msg):
90
+ DatetimeArray(arr, dtype=dtype)
91
+
92
+ # also with mismatched tzawareness
93
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
94
+ with pytest.raises(TypeError, match=msg):
95
+ DatetimeArray(arr, dtype=np.dtype("M8[ns]"))
96
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
97
+ with pytest.raises(TypeError, match=msg):
98
+ DatetimeArray(arr.tz_localize(None), dtype=arr.dtype)
99
+
100
+ def test_non_array_raises(self):
101
+ depr_msg = "DatetimeArray.__init__ is deprecated"
102
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
103
+ with pytest.raises(ValueError, match="list"):
104
+ DatetimeArray([1, 2, 3])
105
+
106
+ def test_bool_dtype_raises(self):
107
+ arr = np.array([1, 2, 3], dtype="bool")
108
+
109
+ depr_msg = "DatetimeArray.__init__ is deprecated"
110
+ msg = "Unexpected value for 'dtype': 'bool'. Must be"
111
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
112
+ with pytest.raises(ValueError, match=msg):
113
+ DatetimeArray(arr)
114
+
115
+ msg = r"dtype bool cannot be converted to datetime64\[ns\]"
116
+ with pytest.raises(TypeError, match=msg):
117
+ DatetimeArray._from_sequence(arr, dtype="M8[ns]")
118
+
119
+ with pytest.raises(TypeError, match=msg):
120
+ pd.DatetimeIndex(arr)
121
+
122
+ with pytest.raises(TypeError, match=msg):
123
+ pd.to_datetime(arr)
124
+
125
+ def test_incorrect_dtype_raises(self):
126
+ depr_msg = "DatetimeArray.__init__ is deprecated"
127
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
128
+ with pytest.raises(ValueError, match="Unexpected value for 'dtype'."):
129
+ DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="category")
130
+
131
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
132
+ with pytest.raises(ValueError, match="Unexpected value for 'dtype'."):
133
+ DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="m8[s]")
134
+
135
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
136
+ with pytest.raises(ValueError, match="Unexpected value for 'dtype'."):
137
+ DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="M8[D]")
138
+
139
+ def test_mismatched_values_dtype_units(self):
140
+ arr = np.array([1, 2, 3], dtype="M8[s]")
141
+ dtype = np.dtype("M8[ns]")
142
+ msg = "Values resolution does not match dtype."
143
+ depr_msg = "DatetimeArray.__init__ is deprecated"
144
+
145
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
146
+ with pytest.raises(ValueError, match=msg):
147
+ DatetimeArray(arr, dtype=dtype)
148
+
149
+ dtype2 = DatetimeTZDtype(tz="UTC", unit="ns")
150
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
151
+ with pytest.raises(ValueError, match=msg):
152
+ DatetimeArray(arr, dtype=dtype2)
153
+
154
+ def test_freq_infer_raises(self):
155
+ depr_msg = "DatetimeArray.__init__ is deprecated"
156
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
157
+ with pytest.raises(ValueError, match="Frequency inference"):
158
+ DatetimeArray(np.array([1, 2, 3], dtype="i8"), freq="infer")
159
+
160
+ def test_copy(self):
161
+ data = np.array([1, 2, 3], dtype="M8[ns]")
162
+ arr = DatetimeArray._from_sequence(data, copy=False)
163
+ assert arr._ndarray is data
164
+
165
+ arr = DatetimeArray._from_sequence(data, copy=True)
166
+ assert arr._ndarray is not data
167
+
168
+ @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
169
+ def test_numpy_datetime_unit(self, unit):
170
+ data = np.array([1, 2, 3], dtype=f"M8[{unit}]")
171
+ arr = DatetimeArray._from_sequence(data)
172
+ assert arr.unit == unit
173
+ assert arr[0].unit == unit
174
+
175
+
176
+ class TestSequenceToDT64NS:
177
+ def test_tz_dtype_mismatch_raises(self):
178
+ arr = DatetimeArray._from_sequence(
179
+ ["2000"], dtype=DatetimeTZDtype(tz="US/Central")
180
+ )
181
+ with pytest.raises(TypeError, match="data is already tz-aware"):
182
+ DatetimeArray._from_sequence(arr, dtype=DatetimeTZDtype(tz="UTC"))
183
+
184
+ def test_tz_dtype_matches(self):
185
+ dtype = DatetimeTZDtype(tz="US/Central")
186
+ arr = DatetimeArray._from_sequence(["2000"], dtype=dtype)
187
+ result = DatetimeArray._from_sequence(arr, dtype=dtype)
188
+ tm.assert_equal(arr, result)
189
+
190
+ @pytest.mark.parametrize("order", ["F", "C"])
191
+ def test_2d(self, order):
192
+ dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific")
193
+ arr = np.array(dti, dtype=object).reshape(3, 2)
194
+ if order == "F":
195
+ arr = arr.T
196
+
197
+ res = DatetimeArray._from_sequence(arr, dtype=dti.dtype)
198
+ expected = DatetimeArray._from_sequence(arr.ravel(), dtype=dti.dtype).reshape(
199
+ arr.shape
200
+ )
201
+ tm.assert_datetime_array_equal(res, expected)
202
+
203
+
204
+ # ----------------------------------------------------------------------------
205
+ # Arrow interaction
206
+
207
+
208
+ EXTREME_VALUES = [0, 123456789, None, iNaT, 2**63 - 1, -(2**63) + 1]
209
+ FINE_TO_COARSE_SAFE = [123_000_000_000, None, -123_000_000_000]
210
+ COARSE_TO_FINE_SAFE = [123, None, -123]
211
+
212
+
213
+ @pytest.mark.parametrize(
214
+ ("pa_unit", "pd_unit", "pa_tz", "pd_tz", "data"),
215
+ [
216
+ ("s", "s", "UTC", "UTC", EXTREME_VALUES),
217
+ ("ms", "ms", "UTC", "Europe/Berlin", EXTREME_VALUES),
218
+ ("us", "us", "US/Eastern", "UTC", EXTREME_VALUES),
219
+ ("ns", "ns", "US/Central", "Asia/Kolkata", EXTREME_VALUES),
220
+ ("ns", "s", "UTC", "UTC", FINE_TO_COARSE_SAFE),
221
+ ("us", "ms", "UTC", "Europe/Berlin", FINE_TO_COARSE_SAFE),
222
+ ("ms", "us", "US/Eastern", "UTC", COARSE_TO_FINE_SAFE),
223
+ ("s", "ns", "US/Central", "Asia/Kolkata", COARSE_TO_FINE_SAFE),
224
+ ],
225
+ )
226
+ def test_from_arrow_with_different_units_and_timezones_with(
227
+ pa_unit, pd_unit, pa_tz, pd_tz, data
228
+ ):
229
+ pa = pytest.importorskip("pyarrow")
230
+
231
+ pa_type = pa.timestamp(pa_unit, tz=pa_tz)
232
+ arr = pa.array(data, type=pa_type)
233
+ dtype = DatetimeTZDtype(unit=pd_unit, tz=pd_tz)
234
+
235
+ result = dtype.__from_arrow__(arr)
236
+ expected = DatetimeArray._from_sequence(data, dtype=f"M8[{pa_unit}, UTC]").astype(
237
+ dtype, copy=False
238
+ )
239
+ tm.assert_extension_array_equal(result, expected)
240
+
241
+ result = dtype.__from_arrow__(pa.chunked_array([arr]))
242
+ tm.assert_extension_array_equal(result, expected)
243
+
244
+
245
+ @pytest.mark.parametrize(
246
+ ("unit", "tz"),
247
+ [
248
+ ("s", "UTC"),
249
+ ("ms", "Europe/Berlin"),
250
+ ("us", "US/Eastern"),
251
+ ("ns", "Asia/Kolkata"),
252
+ ("ns", "UTC"),
253
+ ],
254
+ )
255
+ def test_from_arrow_from_empty(unit, tz):
256
+ pa = pytest.importorskip("pyarrow")
257
+
258
+ data = []
259
+ arr = pa.array(data)
260
+ dtype = DatetimeTZDtype(unit=unit, tz=tz)
261
+
262
+ result = dtype.__from_arrow__(arr)
263
+ expected = DatetimeArray._from_sequence(np.array(data, dtype=f"datetime64[{unit}]"))
264
+ expected = expected.tz_localize(tz=tz)
265
+ tm.assert_extension_array_equal(result, expected)
266
+
267
+ result = dtype.__from_arrow__(pa.chunked_array([arr]))
268
+ tm.assert_extension_array_equal(result, expected)
269
+
270
+
271
+ def test_from_arrow_from_integers():
272
+ pa = pytest.importorskip("pyarrow")
273
+
274
+ data = [0, 123456789, None, 2**63 - 1, iNaT, -123456789]
275
+ arr = pa.array(data)
276
+ dtype = DatetimeTZDtype(unit="ns", tz="UTC")
277
+
278
+ result = dtype.__from_arrow__(arr)
279
+ expected = DatetimeArray._from_sequence(np.array(data, dtype="datetime64[ns]"))
280
+ expected = expected.tz_localize("UTC")
281
+ tm.assert_extension_array_equal(result, expected)
282
+
283
+ result = dtype.__from_arrow__(pa.chunked_array([arr]))
284
+ tm.assert_extension_array_equal(result, expected)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_reductions.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.core.dtypes.dtypes import DatetimeTZDtype
5
+
6
+ import pandas as pd
7
+ from pandas import NaT
8
+ import pandas._testing as tm
9
+ from pandas.core.arrays import DatetimeArray
10
+
11
+
12
+ class TestReductions:
13
+ @pytest.fixture(params=["s", "ms", "us", "ns"])
14
+ def unit(self, request):
15
+ return request.param
16
+
17
+ @pytest.fixture
18
+ def arr1d(self, tz_naive_fixture):
19
+ """Fixture returning DatetimeArray with parametrized timezones"""
20
+ tz = tz_naive_fixture
21
+ dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]")
22
+ arr = DatetimeArray._from_sequence(
23
+ [
24
+ "2000-01-03",
25
+ "2000-01-03",
26
+ "NaT",
27
+ "2000-01-02",
28
+ "2000-01-05",
29
+ "2000-01-04",
30
+ ],
31
+ dtype=dtype,
32
+ )
33
+ return arr
34
+
35
+ def test_min_max(self, arr1d, unit):
36
+ arr = arr1d
37
+ arr = arr.as_unit(unit)
38
+ tz = arr.tz
39
+
40
+ result = arr.min()
41
+ expected = pd.Timestamp("2000-01-02", tz=tz).as_unit(unit)
42
+ assert result == expected
43
+ assert result.unit == expected.unit
44
+
45
+ result = arr.max()
46
+ expected = pd.Timestamp("2000-01-05", tz=tz).as_unit(unit)
47
+ assert result == expected
48
+ assert result.unit == expected.unit
49
+
50
+ result = arr.min(skipna=False)
51
+ assert result is NaT
52
+
53
+ result = arr.max(skipna=False)
54
+ assert result is NaT
55
+
56
+ @pytest.mark.parametrize("tz", [None, "US/Central"])
57
+ @pytest.mark.parametrize("skipna", [True, False])
58
+ def test_min_max_empty(self, skipna, tz):
59
+ dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]")
60
+ arr = DatetimeArray._from_sequence([], dtype=dtype)
61
+ result = arr.min(skipna=skipna)
62
+ assert result is NaT
63
+
64
+ result = arr.max(skipna=skipna)
65
+ assert result is NaT
66
+
67
+ @pytest.mark.parametrize("tz", [None, "US/Central"])
68
+ @pytest.mark.parametrize("skipna", [True, False])
69
+ def test_median_empty(self, skipna, tz):
70
+ dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]")
71
+ arr = DatetimeArray._from_sequence([], dtype=dtype)
72
+ result = arr.median(skipna=skipna)
73
+ assert result is NaT
74
+
75
+ arr = arr.reshape(0, 3)
76
+ result = arr.median(axis=0, skipna=skipna)
77
+ expected = type(arr)._from_sequence([NaT, NaT, NaT], dtype=arr.dtype)
78
+ tm.assert_equal(result, expected)
79
+
80
+ result = arr.median(axis=1, skipna=skipna)
81
+ expected = type(arr)._from_sequence([], dtype=arr.dtype)
82
+ tm.assert_equal(result, expected)
83
+
84
+ def test_median(self, arr1d):
85
+ arr = arr1d
86
+
87
+ result = arr.median()
88
+ assert result == arr[0]
89
+ result = arr.median(skipna=False)
90
+ assert result is NaT
91
+
92
+ result = arr.dropna().median(skipna=False)
93
+ assert result == arr[0]
94
+
95
+ result = arr.median(axis=0)
96
+ assert result == arr[0]
97
+
98
+ def test_median_axis(self, arr1d):
99
+ arr = arr1d
100
+ assert arr.median(axis=0) == arr.median()
101
+ assert arr.median(axis=0, skipna=False) is NaT
102
+
103
+ msg = r"abs\(axis\) must be less than ndim"
104
+ with pytest.raises(ValueError, match=msg):
105
+ arr.median(axis=1)
106
+
107
+ @pytest.mark.filterwarnings("ignore:All-NaN slice encountered:RuntimeWarning")
108
+ def test_median_2d(self, arr1d):
109
+ arr = arr1d.reshape(1, -1)
110
+
111
+ # axis = None
112
+ assert arr.median() == arr1d.median()
113
+ assert arr.median(skipna=False) is NaT
114
+
115
+ # axis = 0
116
+ result = arr.median(axis=0)
117
+ expected = arr1d
118
+ tm.assert_equal(result, expected)
119
+
120
+ # Since column 3 is all-NaT, we get NaT there with or without skipna
121
+ result = arr.median(axis=0, skipna=False)
122
+ expected = arr1d
123
+ tm.assert_equal(result, expected)
124
+
125
+ # axis = 1
126
+ result = arr.median(axis=1)
127
+ expected = type(arr)._from_sequence([arr1d.median()], dtype=arr.dtype)
128
+ tm.assert_equal(result, expected)
129
+
130
+ result = arr.median(axis=1, skipna=False)
131
+ expected = type(arr)._from_sequence([NaT], dtype=arr.dtype)
132
+ tm.assert_equal(result, expected)
133
+
134
+ def test_mean(self, arr1d):
135
+ arr = arr1d
136
+
137
+ # manually verified result
138
+ expected = arr[0] + 0.4 * pd.Timedelta(days=1)
139
+
140
+ result = arr.mean()
141
+ assert result == expected
142
+ result = arr.mean(skipna=False)
143
+ assert result is NaT
144
+
145
+ result = arr.dropna().mean(skipna=False)
146
+ assert result == expected
147
+
148
+ result = arr.mean(axis=0)
149
+ assert result == expected
150
+
151
+ def test_mean_2d(self):
152
+ dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific")
153
+ dta = dti._data.reshape(3, 2)
154
+
155
+ result = dta.mean(axis=0)
156
+ expected = dta[1]
157
+ tm.assert_datetime_array_equal(result, expected)
158
+
159
+ result = dta.mean(axis=1)
160
+ expected = dta[:, 0] + pd.Timedelta(hours=12)
161
+ tm.assert_datetime_array_equal(result, expected)
162
+
163
+ result = dta.mean(axis=None)
164
+ expected = dti.mean()
165
+ assert result == expected
166
+
167
+ @pytest.mark.parametrize("skipna", [True, False])
168
+ def test_mean_empty(self, arr1d, skipna):
169
+ arr = arr1d[:0]
170
+
171
+ assert arr.mean(skipna=skipna) is NaT
172
+
173
+ arr2d = arr.reshape(0, 3)
174
+ result = arr2d.mean(axis=0, skipna=skipna)
175
+ expected = DatetimeArray._from_sequence([NaT, NaT, NaT], dtype=arr.dtype)
176
+ tm.assert_datetime_array_equal(result, expected)
177
+
178
+ result = arr2d.mean(axis=1, skipna=skipna)
179
+ expected = arr # i.e. 1D, empty
180
+ tm.assert_datetime_array_equal(result, expected)
181
+
182
+ result = arr2d.mean(axis=None, skipna=skipna)
183
+ assert result is NaT
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/test_arithmetic.cpython-310.pyc ADDED
Binary file (6.47 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/test_arrow_compat.cpython-310.pyc ADDED
Binary file (6.34 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/test_function.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/__pycache__/test_indexing.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/masked/test_arithmetic.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import pandas as pd
9
+ import pandas._testing as tm
10
+
11
+ # integer dtypes
12
+ arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_INT_EA_DTYPES]
13
+ scalars: list[Any] = [2] * len(arrays)
14
+ # floating dtypes
15
+ arrays += [pd.array([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES]
16
+ scalars += [0.2, 0.2]
17
+ # boolean
18
+ arrays += [pd.array([True, False, True, None], dtype="boolean")]
19
+ scalars += [False]
20
+
21
+
22
+ @pytest.fixture(params=zip(arrays, scalars), ids=[a.dtype.name for a in arrays])
23
+ def data(request):
24
+ """Fixture returning parametrized (array, scalar) tuple.
25
+
26
+ Used to test equivalence of scalars, numpy arrays with array ops, and the
27
+ equivalence of DataFrame and Series ops.
28
+ """
29
+ return request.param
30
+
31
+
32
+ def check_skip(data, op_name):
33
+ if isinstance(data.dtype, pd.BooleanDtype) and "sub" in op_name:
34
+ pytest.skip("subtract not implemented for boolean")
35
+
36
+
37
+ def is_bool_not_implemented(data, op_name):
38
+ # match non-masked behavior
39
+ return data.dtype.kind == "b" and op_name.strip("_").lstrip("r") in [
40
+ "pow",
41
+ "truediv",
42
+ "floordiv",
43
+ ]
44
+
45
+
46
+ # Test equivalence of scalars, numpy arrays with array ops
47
+ # -----------------------------------------------------------------------------
48
+
49
+
50
+ def test_array_scalar_like_equivalence(data, all_arithmetic_operators):
51
+ data, scalar = data
52
+ op = tm.get_op_from_name(all_arithmetic_operators)
53
+ check_skip(data, all_arithmetic_operators)
54
+
55
+ scalar_array = pd.array([scalar] * len(data), dtype=data.dtype)
56
+
57
+ # TODO also add len-1 array (np.array([scalar], dtype=data.dtype.numpy_dtype))
58
+ for scalar in [scalar, data.dtype.type(scalar)]:
59
+ if is_bool_not_implemented(data, all_arithmetic_operators):
60
+ msg = "operator '.*' not implemented for bool dtypes"
61
+ with pytest.raises(NotImplementedError, match=msg):
62
+ op(data, scalar)
63
+ with pytest.raises(NotImplementedError, match=msg):
64
+ op(data, scalar_array)
65
+ else:
66
+ result = op(data, scalar)
67
+ expected = op(data, scalar_array)
68
+ tm.assert_extension_array_equal(result, expected)
69
+
70
+
71
+ def test_array_NA(data, all_arithmetic_operators):
72
+ data, _ = data
73
+ op = tm.get_op_from_name(all_arithmetic_operators)
74
+ check_skip(data, all_arithmetic_operators)
75
+
76
+ scalar = pd.NA
77
+ scalar_array = pd.array([pd.NA] * len(data), dtype=data.dtype)
78
+
79
+ mask = data._mask.copy()
80
+
81
+ if is_bool_not_implemented(data, all_arithmetic_operators):
82
+ msg = "operator '.*' not implemented for bool dtypes"
83
+ with pytest.raises(NotImplementedError, match=msg):
84
+ op(data, scalar)
85
+ # GH#45421 check op doesn't alter data._mask inplace
86
+ tm.assert_numpy_array_equal(mask, data._mask)
87
+ return
88
+
89
+ result = op(data, scalar)
90
+ # GH#45421 check op doesn't alter data._mask inplace
91
+ tm.assert_numpy_array_equal(mask, data._mask)
92
+
93
+ expected = op(data, scalar_array)
94
+ tm.assert_numpy_array_equal(mask, data._mask)
95
+
96
+ tm.assert_extension_array_equal(result, expected)
97
+
98
+
99
+ def test_numpy_array_equivalence(data, all_arithmetic_operators):
100
+ data, scalar = data
101
+ op = tm.get_op_from_name(all_arithmetic_operators)
102
+ check_skip(data, all_arithmetic_operators)
103
+
104
+ numpy_array = np.array([scalar] * len(data), dtype=data.dtype.numpy_dtype)
105
+ pd_array = pd.array(numpy_array, dtype=data.dtype)
106
+
107
+ if is_bool_not_implemented(data, all_arithmetic_operators):
108
+ msg = "operator '.*' not implemented for bool dtypes"
109
+ with pytest.raises(NotImplementedError, match=msg):
110
+ op(data, numpy_array)
111
+ with pytest.raises(NotImplementedError, match=msg):
112
+ op(data, pd_array)
113
+ return
114
+
115
+ result = op(data, numpy_array)
116
+ expected = op(data, pd_array)
117
+ tm.assert_extension_array_equal(result, expected)
118
+
119
+
120
+ # Test equivalence with Series and DataFrame ops
121
+ # -----------------------------------------------------------------------------
122
+
123
+
124
+ def test_frame(data, all_arithmetic_operators):
125
+ data, scalar = data
126
+ op = tm.get_op_from_name(all_arithmetic_operators)
127
+ check_skip(data, all_arithmetic_operators)
128
+
129
+ # DataFrame with scalar
130
+ df = pd.DataFrame({"A": data})
131
+
132
+ if is_bool_not_implemented(data, all_arithmetic_operators):
133
+ msg = "operator '.*' not implemented for bool dtypes"
134
+ with pytest.raises(NotImplementedError, match=msg):
135
+ op(df, scalar)
136
+ with pytest.raises(NotImplementedError, match=msg):
137
+ op(data, scalar)
138
+ return
139
+
140
+ result = op(df, scalar)
141
+ expected = pd.DataFrame({"A": op(data, scalar)})
142
+ tm.assert_frame_equal(result, expected)
143
+
144
+
145
+ def test_series(data, all_arithmetic_operators):
146
+ data, scalar = data
147
+ op = tm.get_op_from_name(all_arithmetic_operators)
148
+ check_skip(data, all_arithmetic_operators)
149
+
150
+ ser = pd.Series(data)
151
+
152
+ others = [
153
+ scalar,
154
+ np.array([scalar] * len(data), dtype=data.dtype.numpy_dtype),
155
+ pd.array([scalar] * len(data), dtype=data.dtype),
156
+ pd.Series([scalar] * len(data), dtype=data.dtype),
157
+ ]
158
+
159
+ for other in others:
160
+ if is_bool_not_implemented(data, all_arithmetic_operators):
161
+ msg = "operator '.*' not implemented for bool dtypes"
162
+ with pytest.raises(NotImplementedError, match=msg):
163
+ op(ser, other)
164
+
165
+ else:
166
+ result = op(ser, other)
167
+ expected = pd.Series(op(data, other))
168
+ tm.assert_series_equal(result, expected)
169
+
170
+
171
+ # Test generic characteristics / errors
172
+ # -----------------------------------------------------------------------------
173
+
174
+
175
+ def test_error_invalid_object(data, all_arithmetic_operators):
176
+ data, _ = data
177
+
178
+ op = all_arithmetic_operators
179
+ opa = getattr(data, op)
180
+
181
+ # 2d -> return NotImplemented
182
+ result = opa(pd.DataFrame({"A": data}))
183
+ assert result is NotImplemented
184
+
185
+ msg = r"can only perform ops with 1-d structures"
186
+ with pytest.raises(NotImplementedError, match=msg):
187
+ opa(np.arange(len(data)).reshape(-1, len(data)))
188
+
189
+
190
+ def test_error_len_mismatch(data, all_arithmetic_operators):
191
+ # operating with a list-like with non-matching length raises
192
+ data, scalar = data
193
+ op = tm.get_op_from_name(all_arithmetic_operators)
194
+
195
+ other = [scalar] * (len(data) - 1)
196
+
197
+ err = ValueError
198
+ msg = "|".join(
199
+ [
200
+ r"operands could not be broadcast together with shapes \(3,\) \(4,\)",
201
+ r"operands could not be broadcast together with shapes \(4,\) \(3,\)",
202
+ ]
203
+ )
204
+ if data.dtype.kind == "b" and all_arithmetic_operators.strip("_") in [
205
+ "sub",
206
+ "rsub",
207
+ ]:
208
+ err = TypeError
209
+ msg = (
210
+ r"numpy boolean subtract, the `\-` operator, is not supported, use "
211
+ r"the bitwise_xor, the `\^` operator, or the logical_xor function instead"
212
+ )
213
+ elif is_bool_not_implemented(data, all_arithmetic_operators):
214
+ msg = "operator '.*' not implemented for bool dtypes"
215
+ err = NotImplementedError
216
+
217
+ for other in [other, np.array(other)]:
218
+ with pytest.raises(err, match=msg):
219
+ op(data, other)
220
+
221
+ s = pd.Series(data)
222
+ with pytest.raises(err, match=msg):
223
+ op(s, other)
224
+
225
+
226
+ @pytest.mark.parametrize("op", ["__neg__", "__abs__", "__invert__"])
227
+ def test_unary_op_does_not_propagate_mask(data, op):
228
+ # https://github.com/pandas-dev/pandas/issues/39943
229
+ data, _ = data
230
+ ser = pd.Series(data)
231
+
232
+ if op == "__invert__" and data.dtype.kind == "f":
233
+ # we follow numpy in raising
234
+ msg = "ufunc 'invert' not supported for the input types"
235
+ with pytest.raises(TypeError, match=msg):
236
+ getattr(ser, op)()
237
+ with pytest.raises(TypeError, match=msg):
238
+ getattr(data, op)()
239
+ with pytest.raises(TypeError, match=msg):
240
+ # Check that this is still the numpy behavior
241
+ getattr(data._data, op)()
242
+
243
+ return
244
+
245
+ result = getattr(ser, op)()
246
+ expected = result.copy(deep=True)
247
+ ser[0] = None
248
+ tm.assert_series_equal(result, expected)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_indexing.cpython-310.pyc ADDED
Binary file (1.94 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_numpy.cpython-310.pyc ADDED
Binary file (8.22 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_indexing.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas.core.dtypes.common import is_scalar
4
+
5
+ import pandas as pd
6
+ import pandas._testing as tm
7
+
8
+
9
+ class TestSearchsorted:
10
+ def test_searchsorted_string(self, string_dtype):
11
+ arr = pd.array(["a", "b", "c"], dtype=string_dtype)
12
+
13
+ result = arr.searchsorted("a", side="left")
14
+ assert is_scalar(result)
15
+ assert result == 0
16
+
17
+ result = arr.searchsorted("a", side="right")
18
+ assert is_scalar(result)
19
+ assert result == 1
20
+
21
+ def test_searchsorted_numeric_dtypes_scalar(self, any_real_numpy_dtype):
22
+ arr = pd.array([1, 3, 90], dtype=any_real_numpy_dtype)
23
+ result = arr.searchsorted(30)
24
+ assert is_scalar(result)
25
+ assert result == 2
26
+
27
+ result = arr.searchsorted([30])
28
+ expected = np.array([2], dtype=np.intp)
29
+ tm.assert_numpy_array_equal(result, expected)
30
+
31
+ def test_searchsorted_numeric_dtypes_vector(self, any_real_numpy_dtype):
32
+ arr = pd.array([1, 3, 90], dtype=any_real_numpy_dtype)
33
+ result = arr.searchsorted([2, 30])
34
+ expected = np.array([1, 2], dtype=np.intp)
35
+ tm.assert_numpy_array_equal(result, expected)
36
+
37
+ def test_searchsorted_sorter(self, any_real_numpy_dtype):
38
+ arr = pd.array([3, 1, 2], dtype=any_real_numpy_dtype)
39
+ result = arr.searchsorted([0, 3], sorter=np.argsort(arr))
40
+ expected = np.array([0, 2], dtype=np.intp)
41
+ tm.assert_numpy_array_equal(result, expected)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_numpy.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Additional tests for NumpyExtensionArray that aren't covered by
3
+ the interface tests.
4
+ """
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from pandas.core.dtypes.dtypes import NumpyEADtype
9
+
10
+ import pandas as pd
11
+ import pandas._testing as tm
12
+ from pandas.arrays import NumpyExtensionArray
13
+
14
+
15
+ @pytest.fixture(
16
+ params=[
17
+ np.array(["a", "b"], dtype=object),
18
+ np.array([0, 1], dtype=float),
19
+ np.array([0, 1], dtype=int),
20
+ np.array([0, 1 + 2j], dtype=complex),
21
+ np.array([True, False], dtype=bool),
22
+ np.array([0, 1], dtype="datetime64[ns]"),
23
+ np.array([0, 1], dtype="timedelta64[ns]"),
24
+ ]
25
+ )
26
+ def any_numpy_array(request):
27
+ """
28
+ Parametrized fixture for NumPy arrays with different dtypes.
29
+
30
+ This excludes string and bytes.
31
+ """
32
+ return request.param
33
+
34
+
35
+ # ----------------------------------------------------------------------------
36
+ # NumpyEADtype
37
+
38
+
39
+ @pytest.mark.parametrize(
40
+ "dtype, expected",
41
+ [
42
+ ("bool", True),
43
+ ("int", True),
44
+ ("uint", True),
45
+ ("float", True),
46
+ ("complex", True),
47
+ ("str", False),
48
+ ("bytes", False),
49
+ ("datetime64[ns]", False),
50
+ ("object", False),
51
+ ("void", False),
52
+ ],
53
+ )
54
+ def test_is_numeric(dtype, expected):
55
+ dtype = NumpyEADtype(dtype)
56
+ assert dtype._is_numeric is expected
57
+
58
+
59
+ @pytest.mark.parametrize(
60
+ "dtype, expected",
61
+ [
62
+ ("bool", True),
63
+ ("int", False),
64
+ ("uint", False),
65
+ ("float", False),
66
+ ("complex", False),
67
+ ("str", False),
68
+ ("bytes", False),
69
+ ("datetime64[ns]", False),
70
+ ("object", False),
71
+ ("void", False),
72
+ ],
73
+ )
74
+ def test_is_boolean(dtype, expected):
75
+ dtype = NumpyEADtype(dtype)
76
+ assert dtype._is_boolean is expected
77
+
78
+
79
+ def test_repr():
80
+ dtype = NumpyEADtype(np.dtype("int64"))
81
+ assert repr(dtype) == "NumpyEADtype('int64')"
82
+
83
+
84
+ def test_constructor_from_string():
85
+ result = NumpyEADtype.construct_from_string("int64")
86
+ expected = NumpyEADtype(np.dtype("int64"))
87
+ assert result == expected
88
+
89
+
90
+ def test_dtype_idempotent(any_numpy_dtype):
91
+ dtype = NumpyEADtype(any_numpy_dtype)
92
+
93
+ result = NumpyEADtype(dtype)
94
+ assert result == dtype
95
+
96
+
97
+ # ----------------------------------------------------------------------------
98
+ # Construction
99
+
100
+
101
+ def test_constructor_no_coercion():
102
+ with pytest.raises(ValueError, match="NumPy array"):
103
+ NumpyExtensionArray([1, 2, 3])
104
+
105
+
106
+ def test_series_constructor_with_copy():
107
+ ndarray = np.array([1, 2, 3])
108
+ ser = pd.Series(NumpyExtensionArray(ndarray), copy=True)
109
+
110
+ assert ser.values is not ndarray
111
+
112
+
113
+ def test_series_constructor_with_astype():
114
+ ndarray = np.array([1, 2, 3])
115
+ result = pd.Series(NumpyExtensionArray(ndarray), dtype="float64")
116
+ expected = pd.Series([1.0, 2.0, 3.0], dtype="float64")
117
+ tm.assert_series_equal(result, expected)
118
+
119
+
120
+ def test_from_sequence_dtype():
121
+ arr = np.array([1, 2, 3], dtype="int64")
122
+ result = NumpyExtensionArray._from_sequence(arr, dtype="uint64")
123
+ expected = NumpyExtensionArray(np.array([1, 2, 3], dtype="uint64"))
124
+ tm.assert_extension_array_equal(result, expected)
125
+
126
+
127
+ def test_constructor_copy():
128
+ arr = np.array([0, 1])
129
+ result = NumpyExtensionArray(arr, copy=True)
130
+
131
+ assert not tm.shares_memory(result, arr)
132
+
133
+
134
+ def test_constructor_with_data(any_numpy_array):
135
+ nparr = any_numpy_array
136
+ arr = NumpyExtensionArray(nparr)
137
+ assert arr.dtype.numpy_dtype == nparr.dtype
138
+
139
+
140
+ # ----------------------------------------------------------------------------
141
+ # Conversion
142
+
143
+
144
+ def test_to_numpy():
145
+ arr = NumpyExtensionArray(np.array([1, 2, 3]))
146
+ result = arr.to_numpy()
147
+ assert result is arr._ndarray
148
+
149
+ result = arr.to_numpy(copy=True)
150
+ assert result is not arr._ndarray
151
+
152
+ result = arr.to_numpy(dtype="f8")
153
+ expected = np.array([1, 2, 3], dtype="f8")
154
+ tm.assert_numpy_array_equal(result, expected)
155
+
156
+
157
+ # ----------------------------------------------------------------------------
158
+ # Setitem
159
+
160
+
161
+ def test_setitem_series():
162
+ ser = pd.Series([1, 2, 3])
163
+ ser.array[0] = 10
164
+ expected = pd.Series([10, 2, 3])
165
+ tm.assert_series_equal(ser, expected)
166
+
167
+
168
+ def test_setitem(any_numpy_array):
169
+ nparr = any_numpy_array
170
+ arr = NumpyExtensionArray(nparr, copy=True)
171
+
172
+ arr[0] = arr[1]
173
+ nparr[0] = nparr[1]
174
+
175
+ tm.assert_numpy_array_equal(arr.to_numpy(), nparr)
176
+
177
+
178
+ # ----------------------------------------------------------------------------
179
+ # Reductions
180
+
181
+
182
+ def test_bad_reduce_raises():
183
+ arr = np.array([1, 2, 3], dtype="int64")
184
+ arr = NumpyExtensionArray(arr)
185
+ msg = "cannot perform not_a_method with type int"
186
+ with pytest.raises(TypeError, match=msg):
187
+ arr._reduce(msg)
188
+
189
+
190
+ def test_validate_reduction_keyword_args():
191
+ arr = NumpyExtensionArray(np.array([1, 2, 3]))
192
+ msg = "the 'keepdims' parameter is not supported .*all"
193
+ with pytest.raises(ValueError, match=msg):
194
+ arr.all(keepdims=True)
195
+
196
+
197
+ def test_np_max_nested_tuples():
198
+ # case where checking in ufunc.nout works while checking for tuples
199
+ # does not
200
+ vals = [
201
+ (("j", "k"), ("l", "m")),
202
+ (("l", "m"), ("o", "p")),
203
+ (("o", "p"), ("j", "k")),
204
+ ]
205
+ ser = pd.Series(vals)
206
+ arr = ser.array
207
+
208
+ assert arr.max() is arr[2]
209
+ assert ser.max() is arr[2]
210
+
211
+ result = np.maximum.reduce(arr)
212
+ assert result == arr[2]
213
+
214
+ result = np.maximum.reduce(ser)
215
+ assert result == arr[2]
216
+
217
+
218
+ def test_np_reduce_2d():
219
+ raw = np.arange(12).reshape(4, 3)
220
+ arr = NumpyExtensionArray(raw)
221
+
222
+ res = np.maximum.reduce(arr, axis=0)
223
+ tm.assert_extension_array_equal(res, arr[-1])
224
+
225
+ alt = arr.max(axis=0)
226
+ tm.assert_extension_array_equal(alt, arr[-1])
227
+
228
+
229
+ # ----------------------------------------------------------------------------
230
+ # Ops
231
+
232
+
233
+ @pytest.mark.parametrize("ufunc", [np.abs, np.negative, np.positive])
234
+ def test_ufunc_unary(ufunc):
235
+ arr = NumpyExtensionArray(np.array([-1.0, 0.0, 1.0]))
236
+ result = ufunc(arr)
237
+ expected = NumpyExtensionArray(ufunc(arr._ndarray))
238
+ tm.assert_extension_array_equal(result, expected)
239
+
240
+ # same thing but with the 'out' keyword
241
+ out = NumpyExtensionArray(np.array([-9.0, -9.0, -9.0]))
242
+ ufunc(arr, out=out)
243
+ tm.assert_extension_array_equal(out, expected)
244
+
245
+
246
+ def test_ufunc():
247
+ arr = NumpyExtensionArray(np.array([-1.0, 0.0, 1.0]))
248
+
249
+ r1, r2 = np.divmod(arr, np.add(arr, 2))
250
+ e1, e2 = np.divmod(arr._ndarray, np.add(arr._ndarray, 2))
251
+ e1 = NumpyExtensionArray(e1)
252
+ e2 = NumpyExtensionArray(e2)
253
+ tm.assert_extension_array_equal(r1, e1)
254
+ tm.assert_extension_array_equal(r2, e2)
255
+
256
+
257
+ def test_basic_binop():
258
+ # Just a basic smoke test. The EA interface tests exercise this
259
+ # more thoroughly.
260
+ x = NumpyExtensionArray(np.array([1, 2, 3]))
261
+ result = x + x
262
+ expected = NumpyExtensionArray(np.array([2, 4, 6]))
263
+ tm.assert_extension_array_equal(result, expected)
264
+
265
+
266
+ @pytest.mark.parametrize("dtype", [None, object])
267
+ def test_setitem_object_typecode(dtype):
268
+ arr = NumpyExtensionArray(np.array(["a", "b", "c"], dtype=dtype))
269
+ arr[0] = "t"
270
+ expected = NumpyExtensionArray(np.array(["t", "b", "c"], dtype=dtype))
271
+ tm.assert_extension_array_equal(arr, expected)
272
+
273
+
274
+ def test_setitem_no_coercion():
275
+ # https://github.com/pandas-dev/pandas/issues/28150
276
+ arr = NumpyExtensionArray(np.array([1, 2, 3]))
277
+ with pytest.raises(ValueError, match="int"):
278
+ arr[0] = "a"
279
+
280
+ # With a value that we do coerce, check that we coerce the value
281
+ # and not the underlying array.
282
+ arr[0] = 2.5
283
+ assert isinstance(arr[0], (int, np.integer)), type(arr[0])
284
+
285
+
286
+ def test_setitem_preserves_views():
287
+ # GH#28150, see also extension test of the same name
288
+ arr = NumpyExtensionArray(np.array([1, 2, 3]))
289
+ view1 = arr.view()
290
+ view2 = arr[:]
291
+ view3 = np.asarray(arr)
292
+
293
+ arr[0] = 9
294
+ assert view1[0] == 9
295
+ assert view2[0] == 9
296
+ assert view3[0] == 9
297
+
298
+ arr[-1] = 2.5
299
+ view1[-1] = 5
300
+ assert arr[-1] == 5
301
+
302
+
303
+ @pytest.mark.parametrize("dtype", [np.int64, np.uint64])
304
+ def test_quantile_empty(dtype):
305
+ # we should get back np.nans, not -1s
306
+ arr = NumpyExtensionArray(np.array([], dtype=dtype))
307
+ idx = pd.Index([0.0, 0.5])
308
+
309
+ result = arr._quantile(idx, interpolation="linear")
310
+ expected = NumpyExtensionArray(np.array([np.nan, np.nan]))
311
+ tm.assert_extension_array_equal(result, expected)
312
+
313
+
314
+ def test_factorize_unsigned():
315
+ # don't raise when calling factorize on unsigned int NumpyExtensionArray
316
+ arr = np.array([1, 2, 3], dtype=np.uint64)
317
+ obj = NumpyExtensionArray(arr)
318
+
319
+ res_codes, res_unique = obj.factorize()
320
+ exp_codes, exp_unique = pd.factorize(arr)
321
+
322
+ tm.assert_numpy_array_equal(res_codes, exp_codes)
323
+
324
+ tm.assert_extension_array_equal(res_unique, NumpyExtensionArray(exp_unique))
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_arrow_compat.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_astype.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_constructors.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_reductions.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/test_arrow_compat.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas.compat.pyarrow import pa_version_under10p1
4
+
5
+ from pandas.core.dtypes.dtypes import PeriodDtype
6
+
7
+ import pandas as pd
8
+ import pandas._testing as tm
9
+ from pandas.core.arrays import (
10
+ PeriodArray,
11
+ period_array,
12
+ )
13
+
14
+ pytestmark = pytest.mark.filterwarnings(
15
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
16
+ )
17
+
18
+
19
+ pa = pytest.importorskip("pyarrow")
20
+
21
+
22
+ def test_arrow_extension_type():
23
+ from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
24
+
25
+ p1 = ArrowPeriodType("D")
26
+ p2 = ArrowPeriodType("D")
27
+ p3 = ArrowPeriodType("M")
28
+
29
+ assert p1.freq == "D"
30
+ assert p1 == p2
31
+ assert p1 != p3
32
+ assert hash(p1) == hash(p2)
33
+ assert hash(p1) != hash(p3)
34
+
35
+
36
+ @pytest.mark.xfail(not pa_version_under10p1, reason="Wrong behavior with pyarrow 10")
37
+ @pytest.mark.parametrize(
38
+ "data, freq",
39
+ [
40
+ (pd.date_range("2017", periods=3), "D"),
41
+ (pd.date_range("2017", periods=3, freq="YE"), "Y-DEC"),
42
+ ],
43
+ )
44
+ def test_arrow_array(data, freq):
45
+ from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
46
+
47
+ periods = period_array(data, freq=freq)
48
+ result = pa.array(periods)
49
+ assert isinstance(result.type, ArrowPeriodType)
50
+ assert result.type.freq == freq
51
+ expected = pa.array(periods.asi8, type="int64")
52
+ assert result.storage.equals(expected)
53
+
54
+ # convert to its storage type
55
+ result = pa.array(periods, type=pa.int64())
56
+ assert result.equals(expected)
57
+
58
+ # unsupported conversions
59
+ msg = "Not supported to convert PeriodArray to 'double' type"
60
+ with pytest.raises(TypeError, match=msg):
61
+ pa.array(periods, type="float64")
62
+
63
+ with pytest.raises(TypeError, match="different 'freq'"):
64
+ pa.array(periods, type=ArrowPeriodType("T"))
65
+
66
+
67
+ def test_arrow_array_missing():
68
+ from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
69
+
70
+ arr = PeriodArray([1, 2, 3], dtype="period[D]")
71
+ arr[1] = pd.NaT
72
+
73
+ result = pa.array(arr)
74
+ assert isinstance(result.type, ArrowPeriodType)
75
+ assert result.type.freq == "D"
76
+ expected = pa.array([1, None, 3], type="int64")
77
+ assert result.storage.equals(expected)
78
+
79
+
80
+ def test_arrow_table_roundtrip():
81
+ from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
82
+
83
+ arr = PeriodArray([1, 2, 3], dtype="period[D]")
84
+ arr[1] = pd.NaT
85
+ df = pd.DataFrame({"a": arr})
86
+
87
+ table = pa.table(df)
88
+ assert isinstance(table.field("a").type, ArrowPeriodType)
89
+ result = table.to_pandas()
90
+ assert isinstance(result["a"].dtype, PeriodDtype)
91
+ tm.assert_frame_equal(result, df)
92
+
93
+ table2 = pa.concat_tables([table, table])
94
+ result = table2.to_pandas()
95
+ expected = pd.concat([df, df], ignore_index=True)
96
+ tm.assert_frame_equal(result, expected)
97
+
98
+
99
+ def test_arrow_load_from_zero_chunks():
100
+ # GH-41040
101
+
102
+ from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
103
+
104
+ arr = PeriodArray([], dtype="period[D]")
105
+ df = pd.DataFrame({"a": arr})
106
+
107
+ table = pa.table(df)
108
+ assert isinstance(table.field("a").type, ArrowPeriodType)
109
+ table = pa.table(
110
+ [pa.chunked_array([], type=table.column(0).type)], schema=table.schema
111
+ )
112
+
113
+ result = table.to_pandas()
114
+ assert isinstance(result["a"].dtype, PeriodDtype)
115
+ tm.assert_frame_equal(result, df)
116
+
117
+
118
+ def test_arrow_table_roundtrip_without_metadata():
119
+ arr = PeriodArray([1, 2, 3], dtype="period[h]")
120
+ arr[1] = pd.NaT
121
+ df = pd.DataFrame({"a": arr})
122
+
123
+ table = pa.table(df)
124
+ # remove the metadata
125
+ table = table.replace_schema_metadata()
126
+ assert table.schema.metadata is None
127
+
128
+ result = table.to_pandas()
129
+ assert isinstance(result["a"].dtype, PeriodDtype)
130
+ tm.assert_frame_equal(result, df)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/test_astype.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.core.dtypes.dtypes import PeriodDtype
5
+
6
+ import pandas as pd
7
+ import pandas._testing as tm
8
+ from pandas.core.arrays import period_array
9
+
10
+
11
+ @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
12
+ def test_astype_int(dtype):
13
+ # We choose to ignore the sign and size of integers for
14
+ # Period/Datetime/Timedelta astype
15
+ arr = period_array(["2000", "2001", None], freq="D")
16
+
17
+ if np.dtype(dtype) != np.int64:
18
+ with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):
19
+ arr.astype(dtype)
20
+ return
21
+
22
+ result = arr.astype(dtype)
23
+ expected = arr._ndarray.view("i8")
24
+ tm.assert_numpy_array_equal(result, expected)
25
+
26
+
27
+ def test_astype_copies():
28
+ arr = period_array(["2000", "2001", None], freq="D")
29
+ result = arr.astype(np.int64, copy=False)
30
+
31
+ # Add the `.base`, since we now use `.asi8` which returns a view.
32
+ # We could maybe override it in PeriodArray to return ._ndarray directly.
33
+ assert result.base is arr._ndarray
34
+
35
+ result = arr.astype(np.int64, copy=True)
36
+ assert result is not arr._ndarray
37
+ tm.assert_numpy_array_equal(result, arr._ndarray.view("i8"))
38
+
39
+
40
+ def test_astype_categorical():
41
+ arr = period_array(["2000", "2001", "2001", None], freq="D")
42
+ result = arr.astype("category")
43
+ categories = pd.PeriodIndex(["2000", "2001"], freq="D")
44
+ expected = pd.Categorical.from_codes([0, 1, 1, -1], categories=categories)
45
+ tm.assert_categorical_equal(result, expected)
46
+
47
+
48
+ def test_astype_period():
49
+ arr = period_array(["2000", "2001", None], freq="D")
50
+ result = arr.astype(PeriodDtype("M"))
51
+ expected = period_array(["2000", "2001", None], freq="M")
52
+ tm.assert_period_array_equal(result, expected)
53
+
54
+
55
+ @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
56
+ def test_astype_datetime(dtype):
57
+ arr = period_array(["2000", "2001", None], freq="D")
58
+ # slice off the [ns] so that the regex matches.
59
+ if dtype == "timedelta64[ns]":
60
+ with pytest.raises(TypeError, match=dtype[:-4]):
61
+ arr.astype(dtype)
62
+
63
+ else:
64
+ # GH#45038 allow period->dt64 because we allow dt64->period
65
+ result = arr.astype(dtype)
66
+ expected = pd.DatetimeIndex(["2000", "2001", pd.NaT], dtype=dtype)._data
67
+ tm.assert_datetime_array_equal(result, expected)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/test_constructors.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs.tslibs import iNaT
5
+ from pandas._libs.tslibs.offsets import MonthEnd
6
+ from pandas._libs.tslibs.period import IncompatibleFrequency
7
+
8
+ import pandas as pd
9
+ import pandas._testing as tm
10
+ from pandas.core.arrays import (
11
+ PeriodArray,
12
+ period_array,
13
+ )
14
+
15
+
16
+ @pytest.mark.parametrize(
17
+ "data, freq, expected",
18
+ [
19
+ ([pd.Period("2017", "D")], None, [17167]),
20
+ ([pd.Period("2017", "D")], "D", [17167]),
21
+ ([2017], "D", [17167]),
22
+ (["2017"], "D", [17167]),
23
+ ([pd.Period("2017", "D")], pd.tseries.offsets.Day(), [17167]),
24
+ ([pd.Period("2017", "D"), None], None, [17167, iNaT]),
25
+ (pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]),
26
+ (pd.date_range("2017", periods=3), None, [17167, 17168, 17169]),
27
+ (pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]),
28
+ ],
29
+ )
30
+ def test_period_array_ok(data, freq, expected):
31
+ result = period_array(data, freq=freq).asi8
32
+ expected = np.asarray(expected, dtype=np.int64)
33
+ tm.assert_numpy_array_equal(result, expected)
34
+
35
+
36
+ def test_period_array_readonly_object():
37
+ # https://github.com/pandas-dev/pandas/issues/25403
38
+ pa = period_array([pd.Period("2019-01-01")])
39
+ arr = np.asarray(pa, dtype="object")
40
+ arr.setflags(write=False)
41
+
42
+ result = period_array(arr)
43
+ tm.assert_period_array_equal(result, pa)
44
+
45
+ result = pd.Series(arr)
46
+ tm.assert_series_equal(result, pd.Series(pa))
47
+
48
+ result = pd.DataFrame({"A": arr})
49
+ tm.assert_frame_equal(result, pd.DataFrame({"A": pa}))
50
+
51
+
52
+ def test_from_datetime64_freq_changes():
53
+ # https://github.com/pandas-dev/pandas/issues/23438
54
+ arr = pd.date_range("2017", periods=3, freq="D")
55
+ result = PeriodArray._from_datetime64(arr, freq="M")
56
+ expected = period_array(["2017-01-01", "2017-01-01", "2017-01-01"], freq="M")
57
+ tm.assert_period_array_equal(result, expected)
58
+
59
+
60
+ @pytest.mark.parametrize("freq", ["2M", MonthEnd(2)])
61
+ def test_from_datetime64_freq_2M(freq):
62
+ arr = np.array(
63
+ ["2020-01-01T00:00:00", "2020-01-02T00:00:00"], dtype="datetime64[ns]"
64
+ )
65
+ result = PeriodArray._from_datetime64(arr, freq)
66
+ expected = period_array(["2020-01", "2020-01"], freq=freq)
67
+ tm.assert_period_array_equal(result, expected)
68
+
69
+
70
+ @pytest.mark.parametrize(
71
+ "data, freq, msg",
72
+ [
73
+ (
74
+ [pd.Period("2017", "D"), pd.Period("2017", "Y")],
75
+ None,
76
+ "Input has different freq",
77
+ ),
78
+ ([pd.Period("2017", "D")], "Y", "Input has different freq"),
79
+ ],
80
+ )
81
+ def test_period_array_raises(data, freq, msg):
82
+ with pytest.raises(IncompatibleFrequency, match=msg):
83
+ period_array(data, freq)
84
+
85
+
86
+ def test_period_array_non_period_series_raies():
87
+ ser = pd.Series([1, 2, 3])
88
+ with pytest.raises(TypeError, match="dtype"):
89
+ PeriodArray(ser, dtype="period[D]")
90
+
91
+
92
+ def test_period_array_freq_mismatch():
93
+ arr = period_array(["2000", "2001"], freq="D")
94
+ with pytest.raises(IncompatibleFrequency, match="freq"):
95
+ PeriodArray(arr, dtype="period[M]")
96
+
97
+ dtype = pd.PeriodDtype(pd.tseries.offsets.MonthEnd())
98
+ with pytest.raises(IncompatibleFrequency, match="freq"):
99
+ PeriodArray(arr, dtype=dtype)
100
+
101
+
102
+ def test_from_sequence_disallows_i8():
103
+ arr = period_array(["2000", "2001"], freq="D")
104
+
105
+ msg = str(arr[0].ordinal)
106
+ with pytest.raises(TypeError, match=msg):
107
+ PeriodArray._from_sequence(arr.asi8, dtype=arr.dtype)
108
+
109
+ with pytest.raises(TypeError, match=msg):
110
+ PeriodArray._from_sequence(list(arr.asi8), dtype=arr.dtype)
111
+
112
+
113
+ def test_from_td64nat_sequence_raises():
114
+ # GH#44507
115
+ td = pd.NaT.to_numpy("m8[ns]")
116
+
117
+ dtype = pd.period_range("2005-01-01", periods=3, freq="D").dtype
118
+
119
+ arr = np.array([None], dtype=object)
120
+ arr[0] = td
121
+
122
+ msg = "Value must be Period, string, integer, or datetime"
123
+ with pytest.raises(ValueError, match=msg):
124
+ PeriodArray._from_sequence(arr, dtype=dtype)
125
+
126
+ with pytest.raises(ValueError, match=msg):
127
+ pd.PeriodIndex(arr, dtype=dtype)
128
+ with pytest.raises(ValueError, match=msg):
129
+ pd.Index(arr, dtype=dtype)
130
+ with pytest.raises(ValueError, match=msg):
131
+ pd.array(arr, dtype=dtype)
132
+ with pytest.raises(ValueError, match=msg):
133
+ pd.Series(arr, dtype=dtype)
134
+ with pytest.raises(ValueError, match=msg):
135
+ pd.DataFrame(arr, dtype=dtype)
136
+
137
+
138
+ def test_freq_deprecated():
139
+ # GH#52462
140
+ data = np.arange(5).astype(np.int64)
141
+ msg = "The 'freq' keyword in the PeriodArray constructor is deprecated"
142
+ with tm.assert_produces_warning(FutureWarning, match=msg):
143
+ res = PeriodArray(data, freq="M")
144
+
145
+ expected = PeriodArray(data, dtype="period[M]")
146
+ tm.assert_equal(res, expected)
147
+
148
+
149
+ def test_period_array_from_datetime64():
150
+ arr = np.array(
151
+ ["2020-01-01T00:00:00", "2020-02-02T00:00:00"], dtype="datetime64[ns]"
152
+ )
153
+ result = PeriodArray._from_datetime64(arr, freq=MonthEnd(2))
154
+
155
+ expected = period_array(["2020-01-01", "2020-02-01"], freq=MonthEnd(2))
156
+ tm.assert_period_array_equal(result, expected)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/period/test_reductions.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import pandas as pd
4
+ from pandas.core.arrays import period_array
5
+
6
+
7
+ class TestReductions:
8
+ def test_min_max(self):
9
+ arr = period_array(
10
+ [
11
+ "2000-01-03",
12
+ "2000-01-03",
13
+ "NaT",
14
+ "2000-01-02",
15
+ "2000-01-05",
16
+ "2000-01-04",
17
+ ],
18
+ freq="D",
19
+ )
20
+
21
+ result = arr.min()
22
+ expected = pd.Period("2000-01-02", freq="D")
23
+ assert result == expected
24
+
25
+ result = arr.max()
26
+ expected = pd.Period("2000-01-05", freq="D")
27
+ assert result == expected
28
+
29
+ result = arr.min(skipna=False)
30
+ assert result is pd.NaT
31
+
32
+ result = arr.max(skipna=False)
33
+ assert result is pd.NaT
34
+
35
+ @pytest.mark.parametrize("skipna", [True, False])
36
+ def test_min_max_empty(self, skipna):
37
+ arr = period_array([], freq="D")
38
+ result = arr.min(skipna=skipna)
39
+ assert result is pd.NaT
40
+
41
+ result = arr.max(skipna=skipna)
42
+ assert result is pd.NaT
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_combine_concat.cpython-310.pyc ADDED
Binary file (2.18 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_libsparse.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_reductions.cpython-310.pyc ADDED
Binary file (8.28 kB). View file
 
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_accessor.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import string
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import SparseDtype
8
+ import pandas._testing as tm
9
+ from pandas.core.arrays.sparse import SparseArray
10
+
11
+
12
+ class TestSeriesAccessor:
13
+ def test_to_dense(self):
14
+ ser = pd.Series([0, 1, 0, 10], dtype="Sparse[int64]")
15
+ result = ser.sparse.to_dense()
16
+ expected = pd.Series([0, 1, 0, 10])
17
+ tm.assert_series_equal(result, expected)
18
+
19
+ @pytest.mark.parametrize("attr", ["npoints", "density", "fill_value", "sp_values"])
20
+ def test_get_attributes(self, attr):
21
+ arr = SparseArray([0, 1])
22
+ ser = pd.Series(arr)
23
+
24
+ result = getattr(ser.sparse, attr)
25
+ expected = getattr(arr, attr)
26
+ assert result == expected
27
+
28
+ def test_from_coo(self):
29
+ scipy_sparse = pytest.importorskip("scipy.sparse")
30
+
31
+ row = [0, 3, 1, 0]
32
+ col = [0, 3, 1, 2]
33
+ data = [4, 5, 7, 9]
34
+
35
+ sp_array = scipy_sparse.coo_matrix((data, (row, col)))
36
+ result = pd.Series.sparse.from_coo(sp_array)
37
+
38
+ index = pd.MultiIndex.from_arrays(
39
+ [
40
+ np.array([0, 0, 1, 3], dtype=np.int32),
41
+ np.array([0, 2, 1, 3], dtype=np.int32),
42
+ ],
43
+ )
44
+ expected = pd.Series([4, 9, 7, 5], index=index, dtype="Sparse[int]")
45
+ tm.assert_series_equal(result, expected)
46
+
47
+ @pytest.mark.parametrize(
48
+ "sort_labels, expected_rows, expected_cols, expected_values_pos",
49
+ [
50
+ (
51
+ False,
52
+ [("b", 2), ("a", 2), ("b", 1), ("a", 1)],
53
+ [("z", 1), ("z", 2), ("x", 2), ("z", 0)],
54
+ {1: (1, 0), 3: (3, 3)},
55
+ ),
56
+ (
57
+ True,
58
+ [("a", 1), ("a", 2), ("b", 1), ("b", 2)],
59
+ [("x", 2), ("z", 0), ("z", 1), ("z", 2)],
60
+ {1: (1, 2), 3: (0, 1)},
61
+ ),
62
+ ],
63
+ )
64
+ def test_to_coo(
65
+ self, sort_labels, expected_rows, expected_cols, expected_values_pos
66
+ ):
67
+ sp_sparse = pytest.importorskip("scipy.sparse")
68
+
69
+ values = SparseArray([0, np.nan, 1, 0, None, 3], fill_value=0)
70
+ index = pd.MultiIndex.from_tuples(
71
+ [
72
+ ("b", 2, "z", 1),
73
+ ("a", 2, "z", 2),
74
+ ("a", 2, "z", 1),
75
+ ("a", 2, "x", 2),
76
+ ("b", 1, "z", 1),
77
+ ("a", 1, "z", 0),
78
+ ]
79
+ )
80
+ ss = pd.Series(values, index=index)
81
+
82
+ expected_A = np.zeros((4, 4))
83
+ for value, (row, col) in expected_values_pos.items():
84
+ expected_A[row, col] = value
85
+
86
+ A, rows, cols = ss.sparse.to_coo(
87
+ row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels
88
+ )
89
+ assert isinstance(A, sp_sparse.coo_matrix)
90
+ tm.assert_numpy_array_equal(A.toarray(), expected_A)
91
+ assert rows == expected_rows
92
+ assert cols == expected_cols
93
+
94
+ def test_non_sparse_raises(self):
95
+ ser = pd.Series([1, 2, 3])
96
+ with pytest.raises(AttributeError, match=".sparse"):
97
+ ser.sparse.density
98
+
99
+
100
+ class TestFrameAccessor:
101
+ def test_accessor_raises(self):
102
+ df = pd.DataFrame({"A": [0, 1]})
103
+ with pytest.raises(AttributeError, match="sparse"):
104
+ df.sparse
105
+
106
+ @pytest.mark.parametrize("format", ["csc", "csr", "coo"])
107
+ @pytest.mark.parametrize("labels", [None, list(string.ascii_letters[:10])])
108
+ @pytest.mark.parametrize("dtype", ["float64", "int64"])
109
+ def test_from_spmatrix(self, format, labels, dtype):
110
+ sp_sparse = pytest.importorskip("scipy.sparse")
111
+
112
+ sp_dtype = SparseDtype(dtype, np.array(0, dtype=dtype).item())
113
+
114
+ mat = sp_sparse.eye(10, format=format, dtype=dtype)
115
+ result = pd.DataFrame.sparse.from_spmatrix(mat, index=labels, columns=labels)
116
+ expected = pd.DataFrame(
117
+ np.eye(10, dtype=dtype), index=labels, columns=labels
118
+ ).astype(sp_dtype)
119
+ tm.assert_frame_equal(result, expected)
120
+
121
+ @pytest.mark.parametrize("format", ["csc", "csr", "coo"])
122
+ def test_from_spmatrix_including_explicit_zero(self, format):
123
+ sp_sparse = pytest.importorskip("scipy.sparse")
124
+
125
+ mat = sp_sparse.random(10, 2, density=0.5, format=format)
126
+ mat.data[0] = 0
127
+ result = pd.DataFrame.sparse.from_spmatrix(mat)
128
+ dtype = SparseDtype("float64", 0.0)
129
+ expected = pd.DataFrame(mat.todense()).astype(dtype)
130
+ tm.assert_frame_equal(result, expected)
131
+
132
+ @pytest.mark.parametrize(
133
+ "columns",
134
+ [["a", "b"], pd.MultiIndex.from_product([["A"], ["a", "b"]]), ["a", "a"]],
135
+ )
136
+ def test_from_spmatrix_columns(self, columns):
137
+ sp_sparse = pytest.importorskip("scipy.sparse")
138
+
139
+ dtype = SparseDtype("float64", 0.0)
140
+
141
+ mat = sp_sparse.random(10, 2, density=0.5)
142
+ result = pd.DataFrame.sparse.from_spmatrix(mat, columns=columns)
143
+ expected = pd.DataFrame(mat.toarray(), columns=columns).astype(dtype)
144
+ tm.assert_frame_equal(result, expected)
145
+
146
+ @pytest.mark.parametrize(
147
+ "colnames", [("A", "B"), (1, 2), (1, pd.NA), (0.1, 0.2), ("x", "x"), (0, 0)]
148
+ )
149
+ def test_to_coo(self, colnames):
150
+ sp_sparse = pytest.importorskip("scipy.sparse")
151
+
152
+ df = pd.DataFrame(
153
+ {colnames[0]: [0, 1, 0], colnames[1]: [1, 0, 0]}, dtype="Sparse[int64, 0]"
154
+ )
155
+ result = df.sparse.to_coo()
156
+ expected = sp_sparse.coo_matrix(np.asarray(df))
157
+ assert (result != expected).nnz == 0
158
+
159
+ @pytest.mark.parametrize("fill_value", [1, np.nan])
160
+ def test_to_coo_nonzero_fill_val_raises(self, fill_value):
161
+ pytest.importorskip("scipy")
162
+ df = pd.DataFrame(
163
+ {
164
+ "A": SparseArray(
165
+ [fill_value, fill_value, fill_value, 2], fill_value=fill_value
166
+ ),
167
+ "B": SparseArray(
168
+ [fill_value, 2, fill_value, fill_value], fill_value=fill_value
169
+ ),
170
+ }
171
+ )
172
+ with pytest.raises(ValueError, match="fill value must be 0"):
173
+ df.sparse.to_coo()
174
+
175
+ def test_to_coo_midx_categorical(self):
176
+ # GH#50996
177
+ sp_sparse = pytest.importorskip("scipy.sparse")
178
+
179
+ midx = pd.MultiIndex.from_arrays(
180
+ [
181
+ pd.CategoricalIndex(list("ab"), name="x"),
182
+ pd.CategoricalIndex([0, 1], name="y"),
183
+ ]
184
+ )
185
+
186
+ ser = pd.Series(1, index=midx, dtype="Sparse[int]")
187
+ result = ser.sparse.to_coo(row_levels=["x"], column_levels=["y"])[0]
188
+ expected = sp_sparse.coo_matrix(
189
+ (np.array([1, 1]), (np.array([0, 1]), np.array([0, 1]))), shape=(2, 2)
190
+ )
191
+ assert (result != expected).nnz == 0
192
+
193
+ def test_to_dense(self):
194
+ df = pd.DataFrame(
195
+ {
196
+ "A": SparseArray([1, 0], dtype=SparseDtype("int64", 0)),
197
+ "B": SparseArray([1, 0], dtype=SparseDtype("int64", 1)),
198
+ "C": SparseArray([1.0, 0.0], dtype=SparseDtype("float64", 0.0)),
199
+ },
200
+ index=["b", "a"],
201
+ )
202
+ result = df.sparse.to_dense()
203
+ expected = pd.DataFrame(
204
+ {"A": [1, 0], "B": [1, 0], "C": [1.0, 0.0]}, index=["b", "a"]
205
+ )
206
+ tm.assert_frame_equal(result, expected)
207
+
208
+ def test_density(self):
209
+ df = pd.DataFrame(
210
+ {
211
+ "A": SparseArray([1, 0, 2, 1], fill_value=0),
212
+ "B": SparseArray([0, 1, 1, 1], fill_value=0),
213
+ }
214
+ )
215
+ res = df.sparse.density
216
+ expected = 0.75
217
+ assert res == expected
218
+
219
+ @pytest.mark.parametrize("dtype", ["int64", "float64"])
220
+ @pytest.mark.parametrize("dense_index", [True, False])
221
+ def test_series_from_coo(self, dtype, dense_index):
222
+ sp_sparse = pytest.importorskip("scipy.sparse")
223
+
224
+ A = sp_sparse.eye(3, format="coo", dtype=dtype)
225
+ result = pd.Series.sparse.from_coo(A, dense_index=dense_index)
226
+
227
+ index = pd.MultiIndex.from_tuples(
228
+ [
229
+ np.array([0, 0], dtype=np.int32),
230
+ np.array([1, 1], dtype=np.int32),
231
+ np.array([2, 2], dtype=np.int32),
232
+ ],
233
+ )
234
+ expected = pd.Series(SparseArray(np.array([1, 1, 1], dtype=dtype)), index=index)
235
+ if dense_index:
236
+ expected = expected.reindex(pd.MultiIndex.from_product(index.levels))
237
+
238
+ tm.assert_series_equal(result, expected)
239
+
240
+ def test_series_from_coo_incorrect_format_raises(self):
241
+ # gh-26554
242
+ sp_sparse = pytest.importorskip("scipy.sparse")
243
+
244
+ m = sp_sparse.csr_matrix(np.array([[0, 1], [0, 0]]))
245
+ with pytest.raises(
246
+ TypeError, match="Expected coo_matrix. Got csr_matrix instead."
247
+ ):
248
+ pd.Series.sparse.from_coo(m)
249
+
250
+ def test_with_column_named_sparse(self):
251
+ # https://github.com/pandas-dev/pandas/issues/30758
252
+ df = pd.DataFrame({"sparse": pd.arrays.SparseArray([1, 2])})
253
+ assert isinstance(df.sparse, pd.core.arrays.sparse.accessor.SparseFrameAccessor)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import SparseDtype
8
+ import pandas._testing as tm
9
+ from pandas.core.arrays.sparse import SparseArray
10
+
11
+
12
+ @pytest.fixture(params=["integer", "block"])
13
+ def kind(request):
14
+ """kind kwarg to pass to SparseArray"""
15
+ return request.param
16
+
17
+
18
+ @pytest.fixture(params=[True, False])
19
+ def mix(request):
20
+ """
21
+ Fixture returning True or False, determining whether to operate
22
+ op(sparse, dense) instead of op(sparse, sparse)
23
+ """
24
+ return request.param
25
+
26
+
27
+ class TestSparseArrayArithmetics:
28
+ def _assert(self, a, b):
29
+ # We have to use tm.assert_sp_array_equal. See GH #45126
30
+ tm.assert_numpy_array_equal(a, b)
31
+
32
+ def _check_numeric_ops(self, a, b, a_dense, b_dense, mix: bool, op):
33
+ # Check that arithmetic behavior matches non-Sparse Series arithmetic
34
+
35
+ if isinstance(a_dense, np.ndarray):
36
+ expected = op(pd.Series(a_dense), b_dense).values
37
+ elif isinstance(b_dense, np.ndarray):
38
+ expected = op(a_dense, pd.Series(b_dense)).values
39
+ else:
40
+ raise NotImplementedError
41
+
42
+ with np.errstate(invalid="ignore", divide="ignore"):
43
+ if mix:
44
+ result = op(a, b_dense).to_dense()
45
+ else:
46
+ result = op(a, b).to_dense()
47
+
48
+ self._assert(result, expected)
49
+
50
+ def _check_bool_result(self, res):
51
+ assert isinstance(res, SparseArray)
52
+ assert isinstance(res.dtype, SparseDtype)
53
+ assert res.dtype.subtype == np.bool_
54
+ assert isinstance(res.fill_value, bool)
55
+
56
+ def _check_comparison_ops(self, a, b, a_dense, b_dense):
57
+ with np.errstate(invalid="ignore"):
58
+ # Unfortunately, trying to wrap the computation of each expected
59
+ # value is with np.errstate() is too tedious.
60
+ #
61
+ # sparse & sparse
62
+ self._check_bool_result(a == b)
63
+ self._assert((a == b).to_dense(), a_dense == b_dense)
64
+
65
+ self._check_bool_result(a != b)
66
+ self._assert((a != b).to_dense(), a_dense != b_dense)
67
+
68
+ self._check_bool_result(a >= b)
69
+ self._assert((a >= b).to_dense(), a_dense >= b_dense)
70
+
71
+ self._check_bool_result(a <= b)
72
+ self._assert((a <= b).to_dense(), a_dense <= b_dense)
73
+
74
+ self._check_bool_result(a > b)
75
+ self._assert((a > b).to_dense(), a_dense > b_dense)
76
+
77
+ self._check_bool_result(a < b)
78
+ self._assert((a < b).to_dense(), a_dense < b_dense)
79
+
80
+ # sparse & dense
81
+ self._check_bool_result(a == b_dense)
82
+ self._assert((a == b_dense).to_dense(), a_dense == b_dense)
83
+
84
+ self._check_bool_result(a != b_dense)
85
+ self._assert((a != b_dense).to_dense(), a_dense != b_dense)
86
+
87
+ self._check_bool_result(a >= b_dense)
88
+ self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)
89
+
90
+ self._check_bool_result(a <= b_dense)
91
+ self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)
92
+
93
+ self._check_bool_result(a > b_dense)
94
+ self._assert((a > b_dense).to_dense(), a_dense > b_dense)
95
+
96
+ self._check_bool_result(a < b_dense)
97
+ self._assert((a < b_dense).to_dense(), a_dense < b_dense)
98
+
99
+ def _check_logical_ops(self, a, b, a_dense, b_dense):
100
+ # sparse & sparse
101
+ self._check_bool_result(a & b)
102
+ self._assert((a & b).to_dense(), a_dense & b_dense)
103
+
104
+ self._check_bool_result(a | b)
105
+ self._assert((a | b).to_dense(), a_dense | b_dense)
106
+ # sparse & dense
107
+ self._check_bool_result(a & b_dense)
108
+ self._assert((a & b_dense).to_dense(), a_dense & b_dense)
109
+
110
+ self._check_bool_result(a | b_dense)
111
+ self._assert((a | b_dense).to_dense(), a_dense | b_dense)
112
+
113
+ @pytest.mark.parametrize("scalar", [0, 1, 3])
114
+ @pytest.mark.parametrize("fill_value", [None, 0, 2])
115
+ def test_float_scalar(
116
+ self, kind, mix, all_arithmetic_functions, fill_value, scalar, request
117
+ ):
118
+ op = all_arithmetic_functions
119
+ values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
120
+ a = SparseArray(values, kind=kind, fill_value=fill_value)
121
+ self._check_numeric_ops(a, scalar, values, scalar, mix, op)
122
+
123
+ def test_float_scalar_comparison(self, kind):
124
+ values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
125
+
126
+ a = SparseArray(values, kind=kind)
127
+ self._check_comparison_ops(a, 1, values, 1)
128
+ self._check_comparison_ops(a, 0, values, 0)
129
+ self._check_comparison_ops(a, 3, values, 3)
130
+
131
+ a = SparseArray(values, kind=kind, fill_value=0)
132
+ self._check_comparison_ops(a, 1, values, 1)
133
+ self._check_comparison_ops(a, 0, values, 0)
134
+ self._check_comparison_ops(a, 3, values, 3)
135
+
136
+ a = SparseArray(values, kind=kind, fill_value=2)
137
+ self._check_comparison_ops(a, 1, values, 1)
138
+ self._check_comparison_ops(a, 0, values, 0)
139
+ self._check_comparison_ops(a, 3, values, 3)
140
+
141
+ def test_float_same_index_without_nans(self, kind, mix, all_arithmetic_functions):
142
+ # when sp_index are the same
143
+ op = all_arithmetic_functions
144
+
145
+ values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
146
+ rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
147
+
148
+ a = SparseArray(values, kind=kind, fill_value=0)
149
+ b = SparseArray(rvalues, kind=kind, fill_value=0)
150
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
151
+
152
+ def test_float_same_index_with_nans(
153
+ self, kind, mix, all_arithmetic_functions, request
154
+ ):
155
+ # when sp_index are the same
156
+ op = all_arithmetic_functions
157
+ values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
158
+ rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
159
+
160
+ a = SparseArray(values, kind=kind)
161
+ b = SparseArray(rvalues, kind=kind)
162
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
163
+
164
+ def test_float_same_index_comparison(self, kind):
165
+ # when sp_index are the same
166
+ values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
167
+ rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
168
+
169
+ a = SparseArray(values, kind=kind)
170
+ b = SparseArray(rvalues, kind=kind)
171
+ self._check_comparison_ops(a, b, values, rvalues)
172
+
173
+ values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
174
+ rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
175
+
176
+ a = SparseArray(values, kind=kind, fill_value=0)
177
+ b = SparseArray(rvalues, kind=kind, fill_value=0)
178
+ self._check_comparison_ops(a, b, values, rvalues)
179
+
180
+ def test_float_array(self, kind, mix, all_arithmetic_functions):
181
+ op = all_arithmetic_functions
182
+
183
+ values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
184
+ rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
185
+
186
+ a = SparseArray(values, kind=kind)
187
+ b = SparseArray(rvalues, kind=kind)
188
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
189
+ self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
190
+
191
+ a = SparseArray(values, kind=kind, fill_value=0)
192
+ b = SparseArray(rvalues, kind=kind)
193
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
194
+
195
+ a = SparseArray(values, kind=kind, fill_value=0)
196
+ b = SparseArray(rvalues, kind=kind, fill_value=0)
197
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
198
+
199
+ a = SparseArray(values, kind=kind, fill_value=1)
200
+ b = SparseArray(rvalues, kind=kind, fill_value=2)
201
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
202
+
203
+ def test_float_array_different_kind(self, mix, all_arithmetic_functions):
204
+ op = all_arithmetic_functions
205
+
206
+ values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
207
+ rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
208
+
209
+ a = SparseArray(values, kind="integer")
210
+ b = SparseArray(rvalues, kind="block")
211
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
212
+ self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
213
+
214
+ a = SparseArray(values, kind="integer", fill_value=0)
215
+ b = SparseArray(rvalues, kind="block")
216
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
217
+
218
+ a = SparseArray(values, kind="integer", fill_value=0)
219
+ b = SparseArray(rvalues, kind="block", fill_value=0)
220
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
221
+
222
+ a = SparseArray(values, kind="integer", fill_value=1)
223
+ b = SparseArray(rvalues, kind="block", fill_value=2)
224
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
225
+
226
+ def test_float_array_comparison(self, kind):
227
+ values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
228
+ rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
229
+
230
+ a = SparseArray(values, kind=kind)
231
+ b = SparseArray(rvalues, kind=kind)
232
+ self._check_comparison_ops(a, b, values, rvalues)
233
+ self._check_comparison_ops(a, b * 0, values, rvalues * 0)
234
+
235
+ a = SparseArray(values, kind=kind, fill_value=0)
236
+ b = SparseArray(rvalues, kind=kind)
237
+ self._check_comparison_ops(a, b, values, rvalues)
238
+
239
+ a = SparseArray(values, kind=kind, fill_value=0)
240
+ b = SparseArray(rvalues, kind=kind, fill_value=0)
241
+ self._check_comparison_ops(a, b, values, rvalues)
242
+
243
+ a = SparseArray(values, kind=kind, fill_value=1)
244
+ b = SparseArray(rvalues, kind=kind, fill_value=2)
245
+ self._check_comparison_ops(a, b, values, rvalues)
246
+
247
+ def test_int_array(self, kind, mix, all_arithmetic_functions):
248
+ op = all_arithmetic_functions
249
+
250
+ # have to specify dtype explicitly until fixing GH 667
251
+ dtype = np.int64
252
+
253
+ values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
254
+ rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
255
+
256
+ a = SparseArray(values, dtype=dtype, kind=kind)
257
+ assert a.dtype == SparseDtype(dtype)
258
+ b = SparseArray(rvalues, dtype=dtype, kind=kind)
259
+ assert b.dtype == SparseDtype(dtype)
260
+
261
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
262
+ self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
263
+
264
+ a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind)
265
+ assert a.dtype == SparseDtype(dtype)
266
+ b = SparseArray(rvalues, dtype=dtype, kind=kind)
267
+ assert b.dtype == SparseDtype(dtype)
268
+
269
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
270
+
271
+ a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind)
272
+ assert a.dtype == SparseDtype(dtype)
273
+ b = SparseArray(rvalues, fill_value=0, dtype=dtype, kind=kind)
274
+ assert b.dtype == SparseDtype(dtype)
275
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
276
+
277
+ a = SparseArray(values, fill_value=1, dtype=dtype, kind=kind)
278
+ assert a.dtype == SparseDtype(dtype, fill_value=1)
279
+ b = SparseArray(rvalues, fill_value=2, dtype=dtype, kind=kind)
280
+ assert b.dtype == SparseDtype(dtype, fill_value=2)
281
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
282
+
283
+ def test_int_array_comparison(self, kind):
284
+ dtype = "int64"
285
+ # int32 NI ATM
286
+
287
+ values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
288
+ rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
289
+
290
+ a = SparseArray(values, dtype=dtype, kind=kind)
291
+ b = SparseArray(rvalues, dtype=dtype, kind=kind)
292
+ self._check_comparison_ops(a, b, values, rvalues)
293
+ self._check_comparison_ops(a, b * 0, values, rvalues * 0)
294
+
295
+ a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0)
296
+ b = SparseArray(rvalues, dtype=dtype, kind=kind)
297
+ self._check_comparison_ops(a, b, values, rvalues)
298
+
299
+ a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0)
300
+ b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=0)
301
+ self._check_comparison_ops(a, b, values, rvalues)
302
+
303
+ a = SparseArray(values, dtype=dtype, kind=kind, fill_value=1)
304
+ b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=2)
305
+ self._check_comparison_ops(a, b, values, rvalues)
306
+
307
+ @pytest.mark.parametrize("fill_value", [True, False, np.nan])
308
+ def test_bool_same_index(self, kind, fill_value):
309
+ # GH 14000
310
+ # when sp_index are the same
311
+ values = np.array([True, False, True, True], dtype=np.bool_)
312
+ rvalues = np.array([True, False, True, True], dtype=np.bool_)
313
+
314
+ a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value)
315
+ b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value)
316
+ self._check_logical_ops(a, b, values, rvalues)
317
+
318
+ @pytest.mark.parametrize("fill_value", [True, False, np.nan])
319
+ def test_bool_array_logical(self, kind, fill_value):
320
+ # GH 14000
321
+ # when sp_index are the same
322
+ values = np.array([True, False, True, False, True, True], dtype=np.bool_)
323
+ rvalues = np.array([True, False, False, True, False, True], dtype=np.bool_)
324
+
325
+ a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value)
326
+ b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value)
327
+ self._check_logical_ops(a, b, values, rvalues)
328
+
329
+ def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request):
330
+ op = all_arithmetic_functions
331
+ rdtype = "int64"
332
+ values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
333
+ rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
334
+
335
+ a = SparseArray(values, kind=kind)
336
+ b = SparseArray(rvalues, kind=kind)
337
+ assert b.dtype == SparseDtype(rdtype)
338
+
339
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
340
+ self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
341
+
342
+ a = SparseArray(values, kind=kind, fill_value=0)
343
+ b = SparseArray(rvalues, kind=kind)
344
+ assert b.dtype == SparseDtype(rdtype)
345
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
346
+
347
+ a = SparseArray(values, kind=kind, fill_value=0)
348
+ b = SparseArray(rvalues, kind=kind, fill_value=0)
349
+ assert b.dtype == SparseDtype(rdtype)
350
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
351
+
352
+ a = SparseArray(values, kind=kind, fill_value=1)
353
+ b = SparseArray(rvalues, kind=kind, fill_value=2)
354
+ assert b.dtype == SparseDtype(rdtype, fill_value=2)
355
+ self._check_numeric_ops(a, b, values, rvalues, mix, op)
356
+
357
+ def test_mixed_array_comparison(self, kind):
358
+ rdtype = "int64"
359
+ # int32 NI ATM
360
+
361
+ values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
362
+ rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
363
+
364
+ a = SparseArray(values, kind=kind)
365
+ b = SparseArray(rvalues, kind=kind)
366
+ assert b.dtype == SparseDtype(rdtype)
367
+
368
+ self._check_comparison_ops(a, b, values, rvalues)
369
+ self._check_comparison_ops(a, b * 0, values, rvalues * 0)
370
+
371
+ a = SparseArray(values, kind=kind, fill_value=0)
372
+ b = SparseArray(rvalues, kind=kind)
373
+ assert b.dtype == SparseDtype(rdtype)
374
+ self._check_comparison_ops(a, b, values, rvalues)
375
+
376
+ a = SparseArray(values, kind=kind, fill_value=0)
377
+ b = SparseArray(rvalues, kind=kind, fill_value=0)
378
+ assert b.dtype == SparseDtype(rdtype)
379
+ self._check_comparison_ops(a, b, values, rvalues)
380
+
381
+ a = SparseArray(values, kind=kind, fill_value=1)
382
+ b = SparseArray(rvalues, kind=kind, fill_value=2)
383
+ assert b.dtype == SparseDtype(rdtype, fill_value=2)
384
+ self._check_comparison_ops(a, b, values, rvalues)
385
+
386
+ def test_xor(self):
387
+ s = SparseArray([True, True, False, False])
388
+ t = SparseArray([True, False, True, False])
389
+ result = s ^ t
390
+ sp_index = pd.core.arrays.sparse.IntIndex(4, np.array([0, 1, 2], dtype="int32"))
391
+ expected = SparseArray([False, True, True], sparse_index=sp_index)
392
+ tm.assert_sp_array_equal(result, expected)
393
+
394
+
395
+ @pytest.mark.parametrize("op", [operator.eq, operator.add])
396
+ def test_with_list(op):
397
+ arr = SparseArray([0, 1], fill_value=0)
398
+ result = op(arr, [0, 1])
399
+ expected = op(arr, SparseArray([0, 1]))
400
+ tm.assert_sp_array_equal(result, expected)
401
+
402
+
403
+ def test_with_dataframe():
404
+ # GH#27910
405
+ arr = SparseArray([0, 1], fill_value=0)
406
+ df = pd.DataFrame([[1, 2], [3, 4]])
407
+ result = arr.__add__(df)
408
+ assert result is NotImplemented
409
+
410
+
411
+ def test_with_zerodim_ndarray():
412
+ # GH#27910
413
+ arr = SparseArray([0, 1], fill_value=0)
414
+
415
+ result = arr * np.array(2)
416
+ expected = arr * 2
417
+ tm.assert_sp_array_equal(result, expected)
418
+
419
+
420
+ @pytest.mark.parametrize("ufunc", [np.abs, np.exp])
421
+ @pytest.mark.parametrize(
422
+ "arr", [SparseArray([0, 0, -1, 1]), SparseArray([None, None, -1, 1])]
423
+ )
424
+ def test_ufuncs(ufunc, arr):
425
+ result = ufunc(arr)
426
+ fill_value = ufunc(arr.fill_value)
427
+ expected = SparseArray(ufunc(np.asarray(arr)), fill_value=fill_value)
428
+ tm.assert_sp_array_equal(result, expected)
429
+
430
+
431
+ @pytest.mark.parametrize(
432
+ "a, b",
433
+ [
434
+ (SparseArray([0, 0, 0]), np.array([0, 1, 2])),
435
+ (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
436
+ (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
437
+ (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
438
+ (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
439
+ ],
440
+ )
441
+ @pytest.mark.parametrize("ufunc", [np.add, np.greater])
442
+ def test_binary_ufuncs(ufunc, a, b):
443
+ # can't say anything about fill value here.
444
+ result = ufunc(a, b)
445
+ expected = ufunc(np.asarray(a), np.asarray(b))
446
+ assert isinstance(result, SparseArray)
447
+ tm.assert_numpy_array_equal(np.asarray(result), expected)
448
+
449
+
450
+ def test_ndarray_inplace():
451
+ sparray = SparseArray([0, 2, 0, 0])
452
+ ndarray = np.array([0, 1, 2, 3])
453
+ ndarray += sparray
454
+ expected = np.array([0, 3, 2, 3])
455
+ tm.assert_numpy_array_equal(ndarray, expected)
456
+
457
+
458
+ def test_sparray_inplace():
459
+ sparray = SparseArray([0, 2, 0, 0])
460
+ ndarray = np.array([0, 1, 2, 3])
461
+ sparray += ndarray
462
+ expected = SparseArray([0, 3, 2, 3], fill_value=0)
463
+ tm.assert_sp_array_equal(sparray, expected)
464
+
465
+
466
+ @pytest.mark.parametrize("cons", [list, np.array, SparseArray])
467
+ def test_mismatched_length_cmp_op(cons):
468
+ left = SparseArray([True, True])
469
+ right = cons([True, True, True])
470
+ with pytest.raises(ValueError, match="operands have mismatched length"):
471
+ left & right
472
+
473
+
474
+ @pytest.mark.parametrize("op", ["add", "sub", "mul", "truediv", "floordiv", "pow"])
475
+ @pytest.mark.parametrize("fill_value", [np.nan, 3])
476
+ def test_binary_operators(op, fill_value):
477
+ op = getattr(operator, op)
478
+ data1 = np.random.default_rng(2).standard_normal(20)
479
+ data2 = np.random.default_rng(2).standard_normal(20)
480
+
481
+ data1[::2] = fill_value
482
+ data2[::3] = fill_value
483
+
484
+ first = SparseArray(data1, fill_value=fill_value)
485
+ second = SparseArray(data2, fill_value=fill_value)
486
+
487
+ with np.errstate(all="ignore"):
488
+ res = op(first, second)
489
+ exp = SparseArray(
490
+ op(first.to_dense(), second.to_dense()), fill_value=first.fill_value
491
+ )
492
+ assert isinstance(res, SparseArray)
493
+ tm.assert_almost_equal(res.to_dense(), exp.to_dense())
494
+
495
+ res2 = op(first, second.to_dense())
496
+ assert isinstance(res2, SparseArray)
497
+ tm.assert_sp_array_equal(res, res2)
498
+
499
+ res3 = op(first.to_dense(), second)
500
+ assert isinstance(res3, SparseArray)
501
+ tm.assert_sp_array_equal(res, res3)
502
+
503
+ res4 = op(first, 4)
504
+ assert isinstance(res4, SparseArray)
505
+
506
+ # Ignore this if the actual op raises (e.g. pow).
507
+ try:
508
+ exp = op(first.to_dense(), 4)
509
+ exp_fv = op(first.fill_value, 4)
510
+ except ValueError:
511
+ pass
512
+ else:
513
+ tm.assert_almost_equal(res4.fill_value, exp_fv)
514
+ tm.assert_almost_equal(res4.to_dense(), exp)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_array.py ADDED
@@ -0,0 +1,480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas._libs.sparse import IntIndex
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ SparseDtype,
11
+ isna,
12
+ )
13
+ import pandas._testing as tm
14
+ from pandas.core.arrays.sparse import SparseArray
15
+
16
+
17
+ @pytest.fixture
18
+ def arr_data():
19
+ """Fixture returning numpy array with valid and missing entries"""
20
+ return np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
21
+
22
+
23
+ @pytest.fixture
24
+ def arr(arr_data):
25
+ """Fixture returning SparseArray from 'arr_data'"""
26
+ return SparseArray(arr_data)
27
+
28
+
29
+ @pytest.fixture
30
+ def zarr():
31
+ """Fixture returning SparseArray with integer entries and 'fill_value=0'"""
32
+ return SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
33
+
34
+
35
+ class TestSparseArray:
36
+ @pytest.mark.parametrize("fill_value", [0, None, np.nan])
37
+ def test_shift_fill_value(self, fill_value):
38
+ # GH #24128
39
+ sparse = SparseArray(np.array([1, 0, 0, 3, 0]), fill_value=8.0)
40
+ res = sparse.shift(1, fill_value=fill_value)
41
+ if isna(fill_value):
42
+ fill_value = res.dtype.na_value
43
+ exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]), fill_value=8.0)
44
+ tm.assert_sp_array_equal(res, exp)
45
+
46
+ def test_set_fill_value(self):
47
+ arr = SparseArray([1.0, np.nan, 2.0], fill_value=np.nan)
48
+ arr.fill_value = 2
49
+ assert arr.fill_value == 2
50
+
51
+ arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
52
+ arr.fill_value = 2
53
+ assert arr.fill_value == 2
54
+
55
+ msg = "Allowing arbitrary scalar fill_value in SparseDtype is deprecated"
56
+ with tm.assert_produces_warning(FutureWarning, match=msg):
57
+ arr.fill_value = 3.1
58
+ assert arr.fill_value == 3.1
59
+
60
+ arr.fill_value = np.nan
61
+ assert np.isnan(arr.fill_value)
62
+
63
+ arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_)
64
+ arr.fill_value = True
65
+ assert arr.fill_value is True
66
+
67
+ with tm.assert_produces_warning(FutureWarning, match=msg):
68
+ arr.fill_value = 0
69
+
70
+ arr.fill_value = np.nan
71
+ assert np.isnan(arr.fill_value)
72
+
73
+ @pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
74
+ def test_set_fill_invalid_non_scalar(self, val):
75
+ arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_)
76
+ msg = "fill_value must be a scalar"
77
+
78
+ with pytest.raises(ValueError, match=msg):
79
+ arr.fill_value = val
80
+
81
+ def test_copy(self, arr):
82
+ arr2 = arr.copy()
83
+ assert arr2.sp_values is not arr.sp_values
84
+ assert arr2.sp_index is arr.sp_index
85
+
86
+ def test_values_asarray(self, arr_data, arr):
87
+ tm.assert_almost_equal(arr.to_dense(), arr_data)
88
+
89
+ @pytest.mark.parametrize(
90
+ "data,shape,dtype",
91
+ [
92
+ ([0, 0, 0, 0, 0], (5,), None),
93
+ ([], (0,), None),
94
+ ([0], (1,), None),
95
+ (["A", "A", np.nan, "B"], (4,), object),
96
+ ],
97
+ )
98
+ def test_shape(self, data, shape, dtype):
99
+ # GH 21126
100
+ out = SparseArray(data, dtype=dtype)
101
+ assert out.shape == shape
102
+
103
+ @pytest.mark.parametrize(
104
+ "vals",
105
+ [
106
+ [np.nan, np.nan, np.nan, np.nan, np.nan],
107
+ [1, np.nan, np.nan, 3, np.nan],
108
+ [1, np.nan, 0, 3, 0],
109
+ ],
110
+ )
111
+ @pytest.mark.parametrize("fill_value", [None, 0])
112
+ def test_dense_repr(self, vals, fill_value):
113
+ vals = np.array(vals)
114
+ arr = SparseArray(vals, fill_value=fill_value)
115
+
116
+ res = arr.to_dense()
117
+ tm.assert_numpy_array_equal(res, vals)
118
+
119
+ @pytest.mark.parametrize("fix", ["arr", "zarr"])
120
+ def test_pickle(self, fix, request):
121
+ obj = request.getfixturevalue(fix)
122
+ unpickled = tm.round_trip_pickle(obj)
123
+ tm.assert_sp_array_equal(unpickled, obj)
124
+
125
+ def test_generator_warnings(self):
126
+ sp_arr = SparseArray([1, 2, 3])
127
+ with tm.assert_produces_warning(None):
128
+ for _ in sp_arr:
129
+ pass
130
+
131
+ def test_where_retain_fill_value(self):
132
+ # GH#45691 don't lose fill_value on _where
133
+ arr = SparseArray([np.nan, 1.0], fill_value=0)
134
+
135
+ mask = np.array([True, False])
136
+
137
+ res = arr._where(~mask, 1)
138
+ exp = SparseArray([1, 1.0], fill_value=0)
139
+ tm.assert_sp_array_equal(res, exp)
140
+
141
+ ser = pd.Series(arr)
142
+ res = ser.where(~mask, 1)
143
+ tm.assert_series_equal(res, pd.Series(exp))
144
+
145
+ def test_fillna(self):
146
+ s = SparseArray([1, np.nan, np.nan, 3, np.nan])
147
+ res = s.fillna(-1)
148
+ exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
149
+ tm.assert_sp_array_equal(res, exp)
150
+
151
+ s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
152
+ res = s.fillna(-1)
153
+ exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
154
+ tm.assert_sp_array_equal(res, exp)
155
+
156
+ s = SparseArray([1, np.nan, 0, 3, 0])
157
+ res = s.fillna(-1)
158
+ exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
159
+ tm.assert_sp_array_equal(res, exp)
160
+
161
+ s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
162
+ res = s.fillna(-1)
163
+ exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
164
+ tm.assert_sp_array_equal(res, exp)
165
+
166
+ s = SparseArray([np.nan, np.nan, np.nan, np.nan])
167
+ res = s.fillna(-1)
168
+ exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
169
+ tm.assert_sp_array_equal(res, exp)
170
+
171
+ s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
172
+ res = s.fillna(-1)
173
+ exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
174
+ tm.assert_sp_array_equal(res, exp)
175
+
176
+ # float dtype's fill_value is np.nan, replaced by -1
177
+ s = SparseArray([0.0, 0.0, 0.0, 0.0])
178
+ res = s.fillna(-1)
179
+ exp = SparseArray([0.0, 0.0, 0.0, 0.0], fill_value=-1)
180
+ tm.assert_sp_array_equal(res, exp)
181
+
182
+ # int dtype shouldn't have missing. No changes.
183
+ s = SparseArray([0, 0, 0, 0])
184
+ assert s.dtype == SparseDtype(np.int64)
185
+ assert s.fill_value == 0
186
+ res = s.fillna(-1)
187
+ tm.assert_sp_array_equal(res, s)
188
+
189
+ s = SparseArray([0, 0, 0, 0], fill_value=0)
190
+ assert s.dtype == SparseDtype(np.int64)
191
+ assert s.fill_value == 0
192
+ res = s.fillna(-1)
193
+ exp = SparseArray([0, 0, 0, 0], fill_value=0)
194
+ tm.assert_sp_array_equal(res, exp)
195
+
196
+ # fill_value can be nan if there is no missing hole.
197
+ # only fill_value will be changed
198
+ s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
199
+ assert s.dtype == SparseDtype(np.int64, fill_value=np.nan)
200
+ assert np.isnan(s.fill_value)
201
+ res = s.fillna(-1)
202
+ exp = SparseArray([0, 0, 0, 0], fill_value=-1)
203
+ tm.assert_sp_array_equal(res, exp)
204
+
205
+ def test_fillna_overlap(self):
206
+ s = SparseArray([1, np.nan, np.nan, 3, np.nan])
207
+ # filling with existing value doesn't replace existing value with
208
+ # fill_value, i.e. existing 3 remains in sp_values
209
+ res = s.fillna(3)
210
+ exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
211
+ tm.assert_numpy_array_equal(res.to_dense(), exp)
212
+
213
+ s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
214
+ res = s.fillna(3)
215
+ exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
216
+ tm.assert_sp_array_equal(res, exp)
217
+
218
+ def test_nonzero(self):
219
+ # Tests regression #21172.
220
+ sa = SparseArray([float("nan"), float("nan"), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
221
+ expected = np.array([2, 5, 9], dtype=np.int32)
222
+ (result,) = sa.nonzero()
223
+ tm.assert_numpy_array_equal(expected, result)
224
+
225
+ sa = SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
226
+ (result,) = sa.nonzero()
227
+ tm.assert_numpy_array_equal(expected, result)
228
+
229
+
230
+ class TestSparseArrayAnalytics:
231
+ @pytest.mark.parametrize(
232
+ "data,expected",
233
+ [
234
+ (
235
+ np.array([1, 2, 3, 4, 5], dtype=float), # non-null data
236
+ SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0])),
237
+ ),
238
+ (
239
+ np.array([1, 2, np.nan, 4, 5], dtype=float), # null data
240
+ SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])),
241
+ ),
242
+ ],
243
+ )
244
+ @pytest.mark.parametrize("numpy", [True, False])
245
+ def test_cumsum(self, data, expected, numpy):
246
+ cumsum = np.cumsum if numpy else lambda s: s.cumsum()
247
+
248
+ out = cumsum(SparseArray(data))
249
+ tm.assert_sp_array_equal(out, expected)
250
+
251
+ out = cumsum(SparseArray(data, fill_value=np.nan))
252
+ tm.assert_sp_array_equal(out, expected)
253
+
254
+ out = cumsum(SparseArray(data, fill_value=2))
255
+ tm.assert_sp_array_equal(out, expected)
256
+
257
+ if numpy: # numpy compatibility checks.
258
+ msg = "the 'dtype' parameter is not supported"
259
+ with pytest.raises(ValueError, match=msg):
260
+ np.cumsum(SparseArray(data), dtype=np.int64)
261
+
262
+ msg = "the 'out' parameter is not supported"
263
+ with pytest.raises(ValueError, match=msg):
264
+ np.cumsum(SparseArray(data), out=out)
265
+ else:
266
+ axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
267
+ msg = re.escape(f"axis(={axis}) out of bounds")
268
+ with pytest.raises(ValueError, match=msg):
269
+ SparseArray(data).cumsum(axis=axis)
270
+
271
+ def test_ufunc(self):
272
+ # GH 13853 make sure ufunc is applied to fill_value
273
+ sparse = SparseArray([1, np.nan, 2, np.nan, -2])
274
+ result = SparseArray([1, np.nan, 2, np.nan, 2])
275
+ tm.assert_sp_array_equal(abs(sparse), result)
276
+ tm.assert_sp_array_equal(np.abs(sparse), result)
277
+
278
+ sparse = SparseArray([1, -1, 2, -2], fill_value=1)
279
+ result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1)
280
+ tm.assert_sp_array_equal(abs(sparse), result)
281
+ tm.assert_sp_array_equal(np.abs(sparse), result)
282
+
283
+ sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
284
+ exp = SparseArray([1, 1, 2, 2], fill_value=1)
285
+ tm.assert_sp_array_equal(abs(sparse), exp)
286
+ tm.assert_sp_array_equal(np.abs(sparse), exp)
287
+
288
+ sparse = SparseArray([1, np.nan, 2, np.nan, -2])
289
+ result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
290
+ tm.assert_sp_array_equal(np.sin(sparse), result)
291
+
292
+ sparse = SparseArray([1, -1, 2, -2], fill_value=1)
293
+ result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
294
+ tm.assert_sp_array_equal(np.sin(sparse), result)
295
+
296
+ sparse = SparseArray([1, -1, 0, -2], fill_value=0)
297
+ result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
298
+ tm.assert_sp_array_equal(np.sin(sparse), result)
299
+
300
+ def test_ufunc_args(self):
301
+ # GH 13853 make sure ufunc is applied to fill_value, including its arg
302
+ sparse = SparseArray([1, np.nan, 2, np.nan, -2])
303
+ result = SparseArray([2, np.nan, 3, np.nan, -1])
304
+ tm.assert_sp_array_equal(np.add(sparse, 1), result)
305
+
306
+ sparse = SparseArray([1, -1, 2, -2], fill_value=1)
307
+ result = SparseArray([2, 0, 3, -1], fill_value=2)
308
+ tm.assert_sp_array_equal(np.add(sparse, 1), result)
309
+
310
+ sparse = SparseArray([1, -1, 0, -2], fill_value=0)
311
+ result = SparseArray([2, 0, 1, -1], fill_value=1)
312
+ tm.assert_sp_array_equal(np.add(sparse, 1), result)
313
+
314
+ @pytest.mark.parametrize("fill_value", [0.0, np.nan])
315
+ def test_modf(self, fill_value):
316
+ # https://github.com/pandas-dev/pandas/issues/26946
317
+ sparse = SparseArray([fill_value] * 10 + [1.1, 2.2], fill_value=fill_value)
318
+ r1, r2 = np.modf(sparse)
319
+ e1, e2 = np.modf(np.asarray(sparse))
320
+ tm.assert_sp_array_equal(r1, SparseArray(e1, fill_value=fill_value))
321
+ tm.assert_sp_array_equal(r2, SparseArray(e2, fill_value=fill_value))
322
+
323
+ def test_nbytes_integer(self):
324
+ arr = SparseArray([1, 0, 0, 0, 2], kind="integer")
325
+ result = arr.nbytes
326
+ # (2 * 8) + 2 * 4
327
+ assert result == 24
328
+
329
+ def test_nbytes_block(self):
330
+ arr = SparseArray([1, 2, 0, 0, 0], kind="block")
331
+ result = arr.nbytes
332
+ # (2 * 8) + 4 + 4
333
+ # sp_values, blocs, blengths
334
+ assert result == 24
335
+
336
+ def test_asarray_datetime64(self):
337
+ s = SparseArray(pd.to_datetime(["2012", None, None, "2013"]))
338
+ np.asarray(s)
339
+
340
+ def test_density(self):
341
+ arr = SparseArray([0, 1])
342
+ assert arr.density == 0.5
343
+
344
+ def test_npoints(self):
345
+ arr = SparseArray([0, 1])
346
+ assert arr.npoints == 1
347
+
348
+
349
+ def test_setting_fill_value_fillna_still_works():
350
+ # This is why letting users update fill_value / dtype is bad
351
+ # astype has the same problem.
352
+ arr = SparseArray([1.0, np.nan, 1.0], fill_value=0.0)
353
+ arr.fill_value = np.nan
354
+ result = arr.isna()
355
+ # Can't do direct comparison, since the sp_index will be different
356
+ # So let's convert to ndarray and check there.
357
+ result = np.asarray(result)
358
+
359
+ expected = np.array([False, True, False])
360
+ tm.assert_numpy_array_equal(result, expected)
361
+
362
+
363
+ def test_setting_fill_value_updates():
364
+ arr = SparseArray([0.0, np.nan], fill_value=0)
365
+ arr.fill_value = np.nan
366
+ # use private constructor to get the index right
367
+ # otherwise both nans would be un-stored.
368
+ expected = SparseArray._simple_new(
369
+ sparse_array=np.array([np.nan]),
370
+ sparse_index=IntIndex(2, [1]),
371
+ dtype=SparseDtype(float, np.nan),
372
+ )
373
+ tm.assert_sp_array_equal(arr, expected)
374
+
375
+
376
+ @pytest.mark.parametrize(
377
+ "arr,fill_value,loc",
378
+ [
379
+ ([None, 1, 2], None, 0),
380
+ ([0, None, 2], None, 1),
381
+ ([0, 1, None], None, 2),
382
+ ([0, 1, 1, None, None], None, 3),
383
+ ([1, 1, 1, 2], None, -1),
384
+ ([], None, -1),
385
+ ([None, 1, 0, 0, None, 2], None, 0),
386
+ ([None, 1, 0, 0, None, 2], 1, 1),
387
+ ([None, 1, 0, 0, None, 2], 2, 5),
388
+ ([None, 1, 0, 0, None, 2], 3, -1),
389
+ ([None, 0, 0, 1, 2, 1], 0, 1),
390
+ ([None, 0, 0, 1, 2, 1], 1, 3),
391
+ ],
392
+ )
393
+ def test_first_fill_value_loc(arr, fill_value, loc):
394
+ result = SparseArray(arr, fill_value=fill_value)._first_fill_value_loc()
395
+ assert result == loc
396
+
397
+
398
+ @pytest.mark.parametrize(
399
+ "arr",
400
+ [
401
+ [1, 2, np.nan, np.nan],
402
+ [1, np.nan, 2, np.nan],
403
+ [1, 2, np.nan],
404
+ [np.nan, 1, 0, 0, np.nan, 2],
405
+ [np.nan, 0, 0, 1, 2, 1],
406
+ ],
407
+ )
408
+ @pytest.mark.parametrize("fill_value", [np.nan, 0, 1])
409
+ def test_unique_na_fill(arr, fill_value):
410
+ a = SparseArray(arr, fill_value=fill_value).unique()
411
+ b = pd.Series(arr).unique()
412
+ assert isinstance(a, SparseArray)
413
+ a = np.asarray(a)
414
+ tm.assert_numpy_array_equal(a, b)
415
+
416
+
417
+ def test_unique_all_sparse():
418
+ # https://github.com/pandas-dev/pandas/issues/23168
419
+ arr = SparseArray([0, 0])
420
+ result = arr.unique()
421
+ expected = SparseArray([0])
422
+ tm.assert_sp_array_equal(result, expected)
423
+
424
+
425
+ def test_map():
426
+ arr = SparseArray([0, 1, 2])
427
+ expected = SparseArray([10, 11, 12], fill_value=10)
428
+
429
+ # dict
430
+ result = arr.map({0: 10, 1: 11, 2: 12})
431
+ tm.assert_sp_array_equal(result, expected)
432
+
433
+ # series
434
+ result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
435
+ tm.assert_sp_array_equal(result, expected)
436
+
437
+ # function
438
+ result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
439
+ expected = SparseArray([10, 11, 12], fill_value=10)
440
+ tm.assert_sp_array_equal(result, expected)
441
+
442
+
443
+ def test_map_missing():
444
+ arr = SparseArray([0, 1, 2])
445
+ expected = SparseArray([10, 11, None], fill_value=10)
446
+
447
+ result = arr.map({0: 10, 1: 11})
448
+ tm.assert_sp_array_equal(result, expected)
449
+
450
+
451
+ @pytest.mark.parametrize("fill_value", [np.nan, 1])
452
+ def test_dropna(fill_value):
453
+ # GH-28287
454
+ arr = SparseArray([np.nan, 1], fill_value=fill_value)
455
+ exp = SparseArray([1.0], fill_value=fill_value)
456
+ tm.assert_sp_array_equal(arr.dropna(), exp)
457
+
458
+ df = pd.DataFrame({"a": [0, 1], "b": arr})
459
+ expected_df = pd.DataFrame({"a": [1], "b": exp}, index=pd.Index([1]))
460
+ tm.assert_equal(df.dropna(), expected_df)
461
+
462
+
463
+ def test_drop_duplicates_fill_value():
464
+ # GH 11726
465
+ df = pd.DataFrame(np.zeros((5, 5))).apply(lambda x: SparseArray(x, fill_value=0))
466
+ result = df.drop_duplicates()
467
+ expected = pd.DataFrame({i: SparseArray([0.0], fill_value=0) for i in range(5)})
468
+ tm.assert_frame_equal(result, expected)
469
+
470
+
471
+ def test_zero_sparse_column():
472
+ # GH 27781
473
+ df1 = pd.DataFrame({"A": SparseArray([0, 0, 0]), "B": [1, 2, 3]})
474
+ df2 = pd.DataFrame({"A": SparseArray([0, 1, 0]), "B": [1, 2, 3]})
475
+ result = df1.loc[df1["B"] != 2]
476
+ expected = df2.loc[df2["B"] != 2]
477
+ tm.assert_frame_equal(result, expected)
478
+
479
+ expected = pd.DataFrame({"A": SparseArray([0, 0]), "B": [1, 3]}, index=[0, 2])
480
+ tm.assert_frame_equal(result, expected)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_astype.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs.sparse import IntIndex
5
+
6
+ from pandas import (
7
+ SparseDtype,
8
+ Timestamp,
9
+ )
10
+ import pandas._testing as tm
11
+ from pandas.core.arrays.sparse import SparseArray
12
+
13
+
14
+ class TestAstype:
15
+ def test_astype(self):
16
+ # float -> float
17
+ arr = SparseArray([None, None, 0, 2])
18
+ result = arr.astype("Sparse[float32]")
19
+ expected = SparseArray([None, None, 0, 2], dtype=np.dtype("float32"))
20
+ tm.assert_sp_array_equal(result, expected)
21
+
22
+ dtype = SparseDtype("float64", fill_value=0)
23
+ result = arr.astype(dtype)
24
+ expected = SparseArray._simple_new(
25
+ np.array([0.0, 2.0], dtype=dtype.subtype), IntIndex(4, [2, 3]), dtype
26
+ )
27
+ tm.assert_sp_array_equal(result, expected)
28
+
29
+ dtype = SparseDtype("int64", 0)
30
+ result = arr.astype(dtype)
31
+ expected = SparseArray._simple_new(
32
+ np.array([0, 2], dtype=np.int64), IntIndex(4, [2, 3]), dtype
33
+ )
34
+ tm.assert_sp_array_equal(result, expected)
35
+
36
+ arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
37
+ with pytest.raises(ValueError, match="NA"):
38
+ arr.astype("Sparse[i8]")
39
+
40
+ def test_astype_bool(self):
41
+ a = SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
42
+ result = a.astype(bool)
43
+ expected = np.array([1, 0, 0, 1], dtype=bool)
44
+ tm.assert_numpy_array_equal(result, expected)
45
+
46
+ # update fill value
47
+ result = a.astype(SparseDtype(bool, False))
48
+ expected = SparseArray(
49
+ [True, False, False, True], dtype=SparseDtype(bool, False)
50
+ )
51
+ tm.assert_sp_array_equal(result, expected)
52
+
53
+ def test_astype_all(self, any_real_numpy_dtype):
54
+ vals = np.array([1, 2, 3])
55
+ arr = SparseArray(vals, fill_value=1)
56
+ typ = np.dtype(any_real_numpy_dtype)
57
+ res = arr.astype(typ)
58
+ tm.assert_numpy_array_equal(res, vals.astype(any_real_numpy_dtype))
59
+
60
+ @pytest.mark.parametrize(
61
+ "arr, dtype, expected",
62
+ [
63
+ (
64
+ SparseArray([0, 1]),
65
+ "float",
66
+ SparseArray([0.0, 1.0], dtype=SparseDtype(float, 0.0)),
67
+ ),
68
+ (SparseArray([0, 1]), bool, SparseArray([False, True])),
69
+ (
70
+ SparseArray([0, 1], fill_value=1),
71
+ bool,
72
+ SparseArray([False, True], dtype=SparseDtype(bool, True)),
73
+ ),
74
+ pytest.param(
75
+ SparseArray([0, 1]),
76
+ "datetime64[ns]",
77
+ SparseArray(
78
+ np.array([0, 1], dtype="datetime64[ns]"),
79
+ dtype=SparseDtype("datetime64[ns]", Timestamp("1970")),
80
+ ),
81
+ ),
82
+ (
83
+ SparseArray([0, 1, 10]),
84
+ str,
85
+ SparseArray(["0", "1", "10"], dtype=SparseDtype(str, "0")),
86
+ ),
87
+ (SparseArray(["10", "20"]), float, SparseArray([10.0, 20.0])),
88
+ (
89
+ SparseArray([0, 1, 0]),
90
+ object,
91
+ SparseArray([0, 1, 0], dtype=SparseDtype(object, 0)),
92
+ ),
93
+ ],
94
+ )
95
+ def test_astype_more(self, arr, dtype, expected):
96
+ result = arr.astype(arr.dtype.update_dtype(dtype))
97
+ tm.assert_sp_array_equal(result, expected)
98
+
99
+ def test_astype_nan_raises(self):
100
+ arr = SparseArray([1.0, np.nan])
101
+ with pytest.raises(ValueError, match="Cannot convert non-finite"):
102
+ arr.astype(int)
103
+
104
+ def test_astype_copy_false(self):
105
+ # GH#34456 bug caused by using .view instead of .astype in astype_nansafe
106
+ arr = SparseArray([1, 2, 3])
107
+
108
+ dtype = SparseDtype(float, 0)
109
+
110
+ result = arr.astype(dtype, copy=False)
111
+ expected = SparseArray([1.0, 2.0, 3.0], fill_value=0.0)
112
+ tm.assert_sp_array_equal(result, expected)
113
+
114
+ def test_astype_dt64_to_int64(self):
115
+ # GH#49631 match non-sparse behavior
116
+ values = np.array(["NaT", "2016-01-02", "2016-01-03"], dtype="M8[ns]")
117
+
118
+ arr = SparseArray(values)
119
+ result = arr.astype("int64")
120
+ expected = values.astype("int64")
121
+ tm.assert_numpy_array_equal(result, expected)
122
+
123
+ # we should also be able to cast to equivalent Sparse[int64]
124
+ dtype_int64 = SparseDtype("int64", np.iinfo(np.int64).min)
125
+ result2 = arr.astype(dtype_int64)
126
+ tm.assert_numpy_array_equal(result2.to_numpy(), expected)
127
+
128
+ # GH#50087 we should match the non-sparse behavior regardless of
129
+ # if we have a fill_value other than NaT
130
+ dtype = SparseDtype("datetime64[ns]", values[1])
131
+ arr3 = SparseArray(values, dtype=dtype)
132
+ result3 = arr3.astype("int64")
133
+ tm.assert_numpy_array_equal(result3, expected)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ import pandas._testing as tm
6
+ from pandas.core.arrays.sparse import SparseArray
7
+
8
+
9
+ class TestSparseArrayConcat:
10
+ @pytest.mark.parametrize("kind", ["integer", "block"])
11
+ def test_basic(self, kind):
12
+ a = SparseArray([1, 0, 0, 2], kind=kind)
13
+ b = SparseArray([1, 0, 2, 2], kind=kind)
14
+
15
+ result = SparseArray._concat_same_type([a, b])
16
+ # Can't make any assertions about the sparse index itself
17
+ # since we aren't don't merge sparse blocs across arrays
18
+ # in to_concat
19
+ expected = np.array([1, 2, 1, 2, 2], dtype="int64")
20
+ tm.assert_numpy_array_equal(result.sp_values, expected)
21
+ assert result.kind == kind
22
+
23
+ @pytest.mark.parametrize("kind", ["integer", "block"])
24
+ def test_uses_first_kind(self, kind):
25
+ other = "integer" if kind == "block" else "block"
26
+ a = SparseArray([1, 0, 0, 2], kind=kind)
27
+ b = SparseArray([1, 0, 2, 2], kind=other)
28
+
29
+ result = SparseArray._concat_same_type([a, b])
30
+ expected = np.array([1, 2, 1, 2, 2], dtype="int64")
31
+ tm.assert_numpy_array_equal(result.sp_values, expected)
32
+ assert result.kind == kind
33
+
34
+
35
+ @pytest.mark.parametrize(
36
+ "other, expected_dtype",
37
+ [
38
+ # compatible dtype -> preserve sparse
39
+ (pd.Series([3, 4, 5], dtype="int64"), pd.SparseDtype("int64", 0)),
40
+ # (pd.Series([3, 4, 5], dtype="Int64"), pd.SparseDtype("int64", 0)),
41
+ # incompatible dtype -> Sparse[common dtype]
42
+ (pd.Series([1.5, 2.5, 3.5], dtype="float64"), pd.SparseDtype("float64", 0)),
43
+ # incompatible dtype -> Sparse[object] dtype
44
+ (pd.Series(["a", "b", "c"], dtype=object), pd.SparseDtype(object, 0)),
45
+ # categorical with compatible categories -> dtype of the categories
46
+ (pd.Series([3, 4, 5], dtype="category"), np.dtype("int64")),
47
+ (pd.Series([1.5, 2.5, 3.5], dtype="category"), np.dtype("float64")),
48
+ # categorical with incompatible categories -> object dtype
49
+ (pd.Series(["a", "b", "c"], dtype="category"), np.dtype(object)),
50
+ ],
51
+ )
52
+ def test_concat_with_non_sparse(other, expected_dtype):
53
+ # https://github.com/pandas-dev/pandas/issues/34336
54
+ s_sparse = pd.Series([1, 0, 2], dtype=pd.SparseDtype("int64", 0))
55
+
56
+ result = pd.concat([s_sparse, other], ignore_index=True)
57
+ expected = pd.Series(list(s_sparse) + list(other)).astype(expected_dtype)
58
+ tm.assert_series_equal(result, expected)
59
+
60
+ result = pd.concat([other, s_sparse], ignore_index=True)
61
+ expected = pd.Series(list(other) + list(s_sparse)).astype(expected_dtype)
62
+ tm.assert_series_equal(result, expected)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_constructors.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs.sparse import IntIndex
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ SparseDtype,
9
+ isna,
10
+ )
11
+ import pandas._testing as tm
12
+ from pandas.core.arrays.sparse import SparseArray
13
+
14
+
15
+ class TestConstructors:
16
+ def test_constructor_dtype(self):
17
+ arr = SparseArray([np.nan, 1, 2, np.nan])
18
+ assert arr.dtype == SparseDtype(np.float64, np.nan)
19
+ assert arr.dtype.subtype == np.float64
20
+ assert np.isnan(arr.fill_value)
21
+
22
+ arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
23
+ assert arr.dtype == SparseDtype(np.float64, 0)
24
+ assert arr.fill_value == 0
25
+
26
+ arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
27
+ assert arr.dtype == SparseDtype(np.float64, np.nan)
28
+ assert np.isnan(arr.fill_value)
29
+
30
+ arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
31
+ assert arr.dtype == SparseDtype(np.int64, 0)
32
+ assert arr.fill_value == 0
33
+
34
+ arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
35
+ assert arr.dtype == SparseDtype(np.int64, 0)
36
+ assert arr.fill_value == 0
37
+
38
+ arr = SparseArray([0, 1, 2, 4], dtype=None)
39
+ assert arr.dtype == SparseDtype(np.int64, 0)
40
+ assert arr.fill_value == 0
41
+
42
+ arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
43
+ assert arr.dtype == SparseDtype(np.int64, 0)
44
+ assert arr.fill_value == 0
45
+
46
+ def test_constructor_dtype_str(self):
47
+ result = SparseArray([1, 2, 3], dtype="int")
48
+ expected = SparseArray([1, 2, 3], dtype=int)
49
+ tm.assert_sp_array_equal(result, expected)
50
+
51
+ def test_constructor_sparse_dtype(self):
52
+ result = SparseArray([1, 0, 0, 1], dtype=SparseDtype("int64", -1))
53
+ expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
54
+ tm.assert_sp_array_equal(result, expected)
55
+ assert result.sp_values.dtype == np.dtype("int64")
56
+
57
+ def test_constructor_sparse_dtype_str(self):
58
+ result = SparseArray([1, 0, 0, 1], dtype="Sparse[int32]")
59
+ expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
60
+ tm.assert_sp_array_equal(result, expected)
61
+ assert result.sp_values.dtype == np.dtype("int32")
62
+
63
+ def test_constructor_object_dtype(self):
64
+ # GH#11856
65
+ arr = SparseArray(["A", "A", np.nan, "B"], dtype=object)
66
+ assert arr.dtype == SparseDtype(object)
67
+ assert np.isnan(arr.fill_value)
68
+
69
+ arr = SparseArray(["A", "A", np.nan, "B"], dtype=object, fill_value="A")
70
+ assert arr.dtype == SparseDtype(object, "A")
71
+ assert arr.fill_value == "A"
72
+
73
+ def test_constructor_object_dtype_bool_fill(self):
74
+ # GH#17574
75
+ data = [False, 0, 100.0, 0.0]
76
+ arr = SparseArray(data, dtype=object, fill_value=False)
77
+ assert arr.dtype == SparseDtype(object, False)
78
+ assert arr.fill_value is False
79
+ arr_expected = np.array(data, dtype=object)
80
+ it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
81
+ assert np.fromiter(it, dtype=np.bool_).all()
82
+
83
+ @pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
84
+ def test_constructor_na_dtype(self, dtype):
85
+ with pytest.raises(ValueError, match="Cannot convert"):
86
+ SparseArray([0, 1, np.nan], dtype=dtype)
87
+
88
+ def test_constructor_warns_when_losing_timezone(self):
89
+ # GH#32501 warn when losing timezone information
90
+ dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
91
+
92
+ expected = SparseArray(np.asarray(dti, dtype="datetime64[ns]"))
93
+
94
+ with tm.assert_produces_warning(UserWarning):
95
+ result = SparseArray(dti)
96
+
97
+ tm.assert_sp_array_equal(result, expected)
98
+
99
+ with tm.assert_produces_warning(UserWarning):
100
+ result = SparseArray(pd.Series(dti))
101
+
102
+ tm.assert_sp_array_equal(result, expected)
103
+
104
+ def test_constructor_spindex_dtype(self):
105
+ arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
106
+ # TODO: actionable?
107
+ # XXX: Behavior change: specifying SparseIndex no longer changes the
108
+ # fill_value
109
+ expected = SparseArray([0, 1, 2, 0], kind="integer")
110
+ tm.assert_sp_array_equal(arr, expected)
111
+ assert arr.dtype == SparseDtype(np.int64)
112
+ assert arr.fill_value == 0
113
+
114
+ arr = SparseArray(
115
+ data=[1, 2, 3],
116
+ sparse_index=IntIndex(4, [1, 2, 3]),
117
+ dtype=np.int64,
118
+ fill_value=0,
119
+ )
120
+ exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
121
+ tm.assert_sp_array_equal(arr, exp)
122
+ assert arr.dtype == SparseDtype(np.int64)
123
+ assert arr.fill_value == 0
124
+
125
+ arr = SparseArray(
126
+ data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=np.int64
127
+ )
128
+ exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
129
+ tm.assert_sp_array_equal(arr, exp)
130
+ assert arr.dtype == SparseDtype(np.int64)
131
+ assert arr.fill_value == 0
132
+
133
+ arr = SparseArray(
134
+ data=[1, 2, 3],
135
+ sparse_index=IntIndex(4, [1, 2, 3]),
136
+ dtype=None,
137
+ fill_value=0,
138
+ )
139
+ exp = SparseArray([0, 1, 2, 3], dtype=None)
140
+ tm.assert_sp_array_equal(arr, exp)
141
+ assert arr.dtype == SparseDtype(np.int64)
142
+ assert arr.fill_value == 0
143
+
144
+ @pytest.mark.parametrize("sparse_index", [None, IntIndex(1, [0])])
145
+ def test_constructor_spindex_dtype_scalar(self, sparse_index):
146
+ # scalar input
147
+ msg = "Constructing SparseArray with scalar data is deprecated"
148
+ with tm.assert_produces_warning(FutureWarning, match=msg):
149
+ arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
150
+ exp = SparseArray([1], dtype=None)
151
+ tm.assert_sp_array_equal(arr, exp)
152
+ assert arr.dtype == SparseDtype(np.int64)
153
+ assert arr.fill_value == 0
154
+
155
+ with tm.assert_produces_warning(FutureWarning, match=msg):
156
+ arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
157
+ exp = SparseArray([1], dtype=None)
158
+ tm.assert_sp_array_equal(arr, exp)
159
+ assert arr.dtype == SparseDtype(np.int64)
160
+ assert arr.fill_value == 0
161
+
162
+ def test_constructor_spindex_dtype_scalar_broadcasts(self):
163
+ arr = SparseArray(
164
+ data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=None
165
+ )
166
+ exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
167
+ tm.assert_sp_array_equal(arr, exp)
168
+ assert arr.dtype == SparseDtype(np.int64)
169
+ assert arr.fill_value == 0
170
+
171
+ @pytest.mark.parametrize(
172
+ "data, fill_value",
173
+ [
174
+ (np.array([1, 2]), 0),
175
+ (np.array([1.0, 2.0]), np.nan),
176
+ ([True, False], False),
177
+ ([pd.Timestamp("2017-01-01")], pd.NaT),
178
+ ],
179
+ )
180
+ def test_constructor_inferred_fill_value(self, data, fill_value):
181
+ result = SparseArray(data).fill_value
182
+
183
+ if isna(fill_value):
184
+ assert isna(result)
185
+ else:
186
+ assert result == fill_value
187
+
188
+ @pytest.mark.parametrize("format", ["coo", "csc", "csr"])
189
+ @pytest.mark.parametrize("size", [0, 10])
190
+ def test_from_spmatrix(self, size, format):
191
+ sp_sparse = pytest.importorskip("scipy.sparse")
192
+
193
+ mat = sp_sparse.random(size, 1, density=0.5, format=format)
194
+ result = SparseArray.from_spmatrix(mat)
195
+
196
+ result = np.asarray(result)
197
+ expected = mat.toarray().ravel()
198
+ tm.assert_numpy_array_equal(result, expected)
199
+
200
+ @pytest.mark.parametrize("format", ["coo", "csc", "csr"])
201
+ def test_from_spmatrix_including_explicit_zero(self, format):
202
+ sp_sparse = pytest.importorskip("scipy.sparse")
203
+
204
+ mat = sp_sparse.random(10, 1, density=0.5, format=format)
205
+ mat.data[0] = 0
206
+ result = SparseArray.from_spmatrix(mat)
207
+
208
+ result = np.asarray(result)
209
+ expected = mat.toarray().ravel()
210
+ tm.assert_numpy_array_equal(result, expected)
211
+
212
+ def test_from_spmatrix_raises(self):
213
+ sp_sparse = pytest.importorskip("scipy.sparse")
214
+
215
+ mat = sp_sparse.eye(5, 4, format="csc")
216
+
217
+ with pytest.raises(ValueError, match="not '4'"):
218
+ SparseArray.from_spmatrix(mat)
219
+
220
+ def test_constructor_from_too_large_array(self):
221
+ with pytest.raises(TypeError, match="expected dimension <= 1 data"):
222
+ SparseArray(np.arange(10).reshape((2, 5)))
223
+
224
+ def test_constructor_from_sparse(self):
225
+ zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
226
+ res = SparseArray(zarr)
227
+ assert res.fill_value == 0
228
+ tm.assert_almost_equal(res.sp_values, zarr.sp_values)
229
+
230
+ def test_constructor_copy(self):
231
+ arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
232
+ arr = SparseArray(arr_data)
233
+
234
+ cp = SparseArray(arr, copy=True)
235
+ cp.sp_values[:3] = 0
236
+ assert not (arr.sp_values[:3] == 0).any()
237
+
238
+ not_copy = SparseArray(arr)
239
+ not_copy.sp_values[:3] = 0
240
+ assert (arr.sp_values[:3] == 0).all()
241
+
242
+ def test_constructor_bool(self):
243
+ # GH#10648
244
+ data = np.array([False, False, True, True, False, False])
245
+ arr = SparseArray(data, fill_value=False, dtype=bool)
246
+
247
+ assert arr.dtype == SparseDtype(bool)
248
+ tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
249
+ # Behavior change: np.asarray densifies.
250
+ # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
251
+ tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([2, 3], np.int32))
252
+
253
+ dense = arr.to_dense()
254
+ assert dense.dtype == bool
255
+ tm.assert_numpy_array_equal(dense, data)
256
+
257
+ def test_constructor_bool_fill_value(self):
258
+ arr = SparseArray([True, False, True], dtype=None)
259
+ assert arr.dtype == SparseDtype(np.bool_)
260
+ assert not arr.fill_value
261
+
262
+ arr = SparseArray([True, False, True], dtype=np.bool_)
263
+ assert arr.dtype == SparseDtype(np.bool_)
264
+ assert not arr.fill_value
265
+
266
+ arr = SparseArray([True, False, True], dtype=np.bool_, fill_value=True)
267
+ assert arr.dtype == SparseDtype(np.bool_, True)
268
+ assert arr.fill_value
269
+
270
+ def test_constructor_float32(self):
271
+ # GH#10648
272
+ data = np.array([1.0, np.nan, 3], dtype=np.float32)
273
+ arr = SparseArray(data, dtype=np.float32)
274
+
275
+ assert arr.dtype == SparseDtype(np.float32)
276
+ tm.assert_numpy_array_equal(arr.sp_values, np.array([1, 3], dtype=np.float32))
277
+ # Behavior change: np.asarray densifies.
278
+ # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
279
+ tm.assert_numpy_array_equal(
280
+ arr.sp_index.indices, np.array([0, 2], dtype=np.int32)
281
+ )
282
+
283
+ dense = arr.to_dense()
284
+ assert dense.dtype == np.float32
285
+ tm.assert_numpy_array_equal(dense, data)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_dtype.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import warnings
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ import pandas as pd
8
+ from pandas import SparseDtype
9
+
10
+
11
+ @pytest.mark.parametrize(
12
+ "dtype, fill_value",
13
+ [
14
+ ("int", 0),
15
+ ("float", np.nan),
16
+ ("bool", False),
17
+ ("object", np.nan),
18
+ ("datetime64[ns]", np.datetime64("NaT", "ns")),
19
+ ("timedelta64[ns]", np.timedelta64("NaT", "ns")),
20
+ ],
21
+ )
22
+ def test_inferred_dtype(dtype, fill_value):
23
+ sparse_dtype = SparseDtype(dtype)
24
+ result = sparse_dtype.fill_value
25
+ if pd.isna(fill_value):
26
+ assert pd.isna(result) and type(result) == type(fill_value)
27
+ else:
28
+ assert result == fill_value
29
+
30
+
31
+ def test_from_sparse_dtype():
32
+ dtype = SparseDtype("float", 0)
33
+ result = SparseDtype(dtype)
34
+ assert result.fill_value == 0
35
+
36
+
37
+ def test_from_sparse_dtype_fill_value():
38
+ dtype = SparseDtype("int", 1)
39
+ result = SparseDtype(dtype, fill_value=2)
40
+ expected = SparseDtype("int", 2)
41
+ assert result == expected
42
+
43
+
44
+ @pytest.mark.parametrize(
45
+ "dtype, fill_value",
46
+ [
47
+ ("int", None),
48
+ ("float", None),
49
+ ("bool", None),
50
+ ("object", None),
51
+ ("datetime64[ns]", None),
52
+ ("timedelta64[ns]", None),
53
+ ("int", np.nan),
54
+ ("float", 0),
55
+ ],
56
+ )
57
+ def test_equal(dtype, fill_value):
58
+ a = SparseDtype(dtype, fill_value)
59
+ b = SparseDtype(dtype, fill_value)
60
+ assert a == b
61
+ assert b == a
62
+
63
+
64
+ def test_nans_equal():
65
+ a = SparseDtype(float, float("nan"))
66
+ b = SparseDtype(float, np.nan)
67
+ assert a == b
68
+ assert b == a
69
+
70
+
71
+ with warnings.catch_warnings():
72
+ msg = "Allowing arbitrary scalar fill_value in SparseDtype is deprecated"
73
+ warnings.filterwarnings("ignore", msg, category=FutureWarning)
74
+
75
+ tups = [
76
+ (SparseDtype("float64"), SparseDtype("float32")),
77
+ (SparseDtype("float64"), SparseDtype("float64", 0)),
78
+ (SparseDtype("float64"), SparseDtype("datetime64[ns]", np.nan)),
79
+ (SparseDtype(int, pd.NaT), SparseDtype(float, pd.NaT)),
80
+ (SparseDtype("float64"), np.dtype("float64")),
81
+ ]
82
+
83
+
84
+ @pytest.mark.parametrize(
85
+ "a, b",
86
+ tups,
87
+ )
88
+ def test_not_equal(a, b):
89
+ assert a != b
90
+
91
+
92
+ def test_construct_from_string_raises():
93
+ with pytest.raises(
94
+ TypeError, match="Cannot construct a 'SparseDtype' from 'not a dtype'"
95
+ ):
96
+ SparseDtype.construct_from_string("not a dtype")
97
+
98
+
99
+ @pytest.mark.parametrize(
100
+ "dtype, expected",
101
+ [
102
+ (SparseDtype(int), True),
103
+ (SparseDtype(float), True),
104
+ (SparseDtype(bool), True),
105
+ (SparseDtype(object), False),
106
+ (SparseDtype(str), False),
107
+ ],
108
+ )
109
+ def test_is_numeric(dtype, expected):
110
+ assert dtype._is_numeric is expected
111
+
112
+
113
+ def test_str_uses_object():
114
+ result = SparseDtype(str).subtype
115
+ assert result == np.dtype("object")
116
+
117
+
118
+ @pytest.mark.parametrize(
119
+ "string, expected",
120
+ [
121
+ ("Sparse[float64]", SparseDtype(np.dtype("float64"))),
122
+ ("Sparse[float32]", SparseDtype(np.dtype("float32"))),
123
+ ("Sparse[int]", SparseDtype(np.dtype("int"))),
124
+ ("Sparse[str]", SparseDtype(np.dtype("str"))),
125
+ ("Sparse[datetime64[ns]]", SparseDtype(np.dtype("datetime64[ns]"))),
126
+ ("Sparse", SparseDtype(np.dtype("float"), np.nan)),
127
+ ],
128
+ )
129
+ def test_construct_from_string(string, expected):
130
+ result = SparseDtype.construct_from_string(string)
131
+ assert result == expected
132
+
133
+
134
+ @pytest.mark.parametrize(
135
+ "a, b, expected",
136
+ [
137
+ (SparseDtype(float, 0.0), SparseDtype(np.dtype("float"), 0.0), True),
138
+ (SparseDtype(int, 0), SparseDtype(int, 0), True),
139
+ (SparseDtype(float, float("nan")), SparseDtype(float, np.nan), True),
140
+ (SparseDtype(float, 0), SparseDtype(float, np.nan), False),
141
+ (SparseDtype(int, 0.0), SparseDtype(float, 0.0), False),
142
+ ],
143
+ )
144
+ def test_hash_equal(a, b, expected):
145
+ result = a == b
146
+ assert result is expected
147
+
148
+ result = hash(a) == hash(b)
149
+ assert result is expected
150
+
151
+
152
+ @pytest.mark.parametrize(
153
+ "string, expected",
154
+ [
155
+ ("Sparse[int]", "int"),
156
+ ("Sparse[int, 0]", "int"),
157
+ ("Sparse[int64]", "int64"),
158
+ ("Sparse[int64, 0]", "int64"),
159
+ ("Sparse[datetime64[ns], 0]", "datetime64[ns]"),
160
+ ],
161
+ )
162
+ def test_parse_subtype(string, expected):
163
+ subtype, _ = SparseDtype._parse_subtype(string)
164
+ assert subtype == expected
165
+
166
+
167
+ @pytest.mark.parametrize(
168
+ "string", ["Sparse[int, 1]", "Sparse[float, 0.0]", "Sparse[bool, True]"]
169
+ )
170
+ def test_construct_from_string_fill_value_raises(string):
171
+ with pytest.raises(TypeError, match="fill_value in the string is not"):
172
+ SparseDtype.construct_from_string(string)
173
+
174
+
175
+ @pytest.mark.parametrize(
176
+ "original, dtype, expected",
177
+ [
178
+ (SparseDtype(int, 0), float, SparseDtype(float, 0.0)),
179
+ (SparseDtype(int, 1), float, SparseDtype(float, 1.0)),
180
+ (SparseDtype(int, 1), str, SparseDtype(object, "1")),
181
+ (SparseDtype(float, 1.5), int, SparseDtype(int, 1)),
182
+ ],
183
+ )
184
+ def test_update_dtype(original, dtype, expected):
185
+ result = original.update_dtype(dtype)
186
+ assert result == expected
187
+
188
+
189
+ @pytest.mark.parametrize(
190
+ "original, dtype, expected_error_msg",
191
+ [
192
+ (
193
+ SparseDtype(float, np.nan),
194
+ int,
195
+ re.escape("Cannot convert non-finite values (NA or inf) to integer"),
196
+ ),
197
+ (
198
+ SparseDtype(str, "abc"),
199
+ int,
200
+ r"invalid literal for int\(\) with base 10: ('abc'|np\.str_\('abc'\))",
201
+ ),
202
+ ],
203
+ )
204
+ def test_update_dtype_raises(original, dtype, expected_error_msg):
205
+ with pytest.raises(ValueError, match=expected_error_msg):
206
+ original.update_dtype(dtype)
207
+
208
+
209
+ def test_repr():
210
+ # GH-34352
211
+ result = str(SparseDtype("int64", fill_value=0))
212
+ expected = "Sparse[int64, 0]"
213
+ assert result == expected
214
+
215
+ result = str(SparseDtype(object, fill_value="0"))
216
+ expected = "Sparse[object, '0']"
217
+ assert result == expected
218
+
219
+
220
+ def test_sparse_dtype_subtype_must_be_numpy_dtype():
221
+ # GH#53160
222
+ msg = "SparseDtype subtype must be a numpy dtype"
223
+ with pytest.raises(TypeError, match=msg):
224
+ SparseDtype("category", fill_value="c")
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_indexing.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import SparseDtype
6
+ import pandas._testing as tm
7
+ from pandas.core.arrays.sparse import SparseArray
8
+
9
+
10
+ @pytest.fixture
11
+ def arr_data():
12
+ return np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
13
+
14
+
15
+ @pytest.fixture
16
+ def arr(arr_data):
17
+ return SparseArray(arr_data)
18
+
19
+
20
+ class TestGetitem:
21
+ def test_getitem(self, arr):
22
+ dense = arr.to_dense()
23
+ for i, value in enumerate(arr):
24
+ tm.assert_almost_equal(value, dense[i])
25
+ tm.assert_almost_equal(arr[-i], dense[-i])
26
+
27
+ def test_getitem_arraylike_mask(self, arr):
28
+ arr = SparseArray([0, 1, 2])
29
+ result = arr[[True, False, True]]
30
+ expected = SparseArray([0, 2])
31
+ tm.assert_sp_array_equal(result, expected)
32
+
33
+ @pytest.mark.parametrize(
34
+ "slc",
35
+ [
36
+ np.s_[:],
37
+ np.s_[1:10],
38
+ np.s_[1:100],
39
+ np.s_[10:1],
40
+ np.s_[:-3],
41
+ np.s_[-5:-4],
42
+ np.s_[:-12],
43
+ np.s_[-12:],
44
+ np.s_[2:],
45
+ np.s_[2::3],
46
+ np.s_[::2],
47
+ np.s_[::-1],
48
+ np.s_[::-2],
49
+ np.s_[1:6:2],
50
+ np.s_[:-6:-2],
51
+ ],
52
+ )
53
+ @pytest.mark.parametrize(
54
+ "as_dense", [[np.nan] * 10, [1] * 10, [np.nan] * 5 + [1] * 5, []]
55
+ )
56
+ def test_getslice(self, slc, as_dense):
57
+ as_dense = np.array(as_dense)
58
+ arr = SparseArray(as_dense)
59
+
60
+ result = arr[slc]
61
+ expected = SparseArray(as_dense[slc])
62
+
63
+ tm.assert_sp_array_equal(result, expected)
64
+
65
+ def test_getslice_tuple(self):
66
+ dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
67
+
68
+ sparse = SparseArray(dense)
69
+ res = sparse[(slice(4, None),)]
70
+ exp = SparseArray(dense[4:])
71
+ tm.assert_sp_array_equal(res, exp)
72
+
73
+ sparse = SparseArray(dense, fill_value=0)
74
+ res = sparse[(slice(4, None),)]
75
+ exp = SparseArray(dense[4:], fill_value=0)
76
+ tm.assert_sp_array_equal(res, exp)
77
+
78
+ msg = "too many indices for array"
79
+ with pytest.raises(IndexError, match=msg):
80
+ sparse[4:, :]
81
+
82
+ with pytest.raises(IndexError, match=msg):
83
+ # check numpy compat
84
+ dense[4:, :]
85
+
86
+ def test_boolean_slice_empty(self):
87
+ arr = SparseArray([0, 1, 2])
88
+ res = arr[[False, False, False]]
89
+ assert res.dtype == arr.dtype
90
+
91
+ def test_getitem_bool_sparse_array(self, arr):
92
+ # GH 23122
93
+ spar_bool = SparseArray([False, True] * 5, dtype=np.bool_, fill_value=True)
94
+ exp = SparseArray([np.nan, 2, np.nan, 5, 6])
95
+ tm.assert_sp_array_equal(arr[spar_bool], exp)
96
+
97
+ spar_bool = ~spar_bool
98
+ res = arr[spar_bool]
99
+ exp = SparseArray([np.nan, 1, 3, 4, np.nan])
100
+ tm.assert_sp_array_equal(res, exp)
101
+
102
+ spar_bool = SparseArray(
103
+ [False, True, np.nan] * 3, dtype=np.bool_, fill_value=np.nan
104
+ )
105
+ res = arr[spar_bool]
106
+ exp = SparseArray([np.nan, 3, 5])
107
+ tm.assert_sp_array_equal(res, exp)
108
+
109
+ def test_getitem_bool_sparse_array_as_comparison(self):
110
+ # GH 45110
111
+ arr = SparseArray([1, 2, 3, 4, np.nan, np.nan], fill_value=np.nan)
112
+ res = arr[arr > 2]
113
+ exp = SparseArray([3.0, 4.0], fill_value=np.nan)
114
+ tm.assert_sp_array_equal(res, exp)
115
+
116
+ def test_get_item(self, arr):
117
+ zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
118
+
119
+ assert np.isnan(arr[1])
120
+ assert arr[2] == 1
121
+ assert arr[7] == 5
122
+
123
+ assert zarr[0] == 0
124
+ assert zarr[2] == 1
125
+ assert zarr[7] == 5
126
+
127
+ errmsg = "must be an integer between -10 and 10"
128
+
129
+ with pytest.raises(IndexError, match=errmsg):
130
+ arr[11]
131
+
132
+ with pytest.raises(IndexError, match=errmsg):
133
+ arr[-11]
134
+
135
+ assert arr[-1] == arr[len(arr) - 1]
136
+
137
+
138
+ class TestSetitem:
139
+ def test_set_item(self, arr_data):
140
+ arr = SparseArray(arr_data).copy()
141
+
142
+ def setitem():
143
+ arr[5] = 3
144
+
145
+ def setslice():
146
+ arr[1:5] = 2
147
+
148
+ with pytest.raises(TypeError, match="assignment via setitem"):
149
+ setitem()
150
+
151
+ with pytest.raises(TypeError, match="assignment via setitem"):
152
+ setslice()
153
+
154
+
155
+ class TestTake:
156
+ def test_take_scalar_raises(self, arr):
157
+ msg = "'indices' must be an array, not a scalar '2'."
158
+ with pytest.raises(ValueError, match=msg):
159
+ arr.take(2)
160
+
161
+ def test_take(self, arr_data, arr):
162
+ exp = SparseArray(np.take(arr_data, [2, 3]))
163
+ tm.assert_sp_array_equal(arr.take([2, 3]), exp)
164
+
165
+ exp = SparseArray(np.take(arr_data, [0, 1, 2]))
166
+ tm.assert_sp_array_equal(arr.take([0, 1, 2]), exp)
167
+
168
+ def test_take_all_empty(self):
169
+ sparse = pd.array([0, 0], dtype=SparseDtype("int64"))
170
+ result = sparse.take([0, 1], allow_fill=True, fill_value=np.nan)
171
+ tm.assert_sp_array_equal(sparse, result)
172
+
173
+ def test_take_different_fill_value(self):
174
+ # Take with a different fill value shouldn't overwrite the original
175
+ sparse = pd.array([0.0], dtype=SparseDtype("float64", fill_value=0.0))
176
+ result = sparse.take([0, -1], allow_fill=True, fill_value=np.nan)
177
+ expected = pd.array([0, np.nan], dtype=sparse.dtype)
178
+ tm.assert_sp_array_equal(expected, result)
179
+
180
+ def test_take_fill_value(self):
181
+ data = np.array([1, np.nan, 0, 3, 0])
182
+ sparse = SparseArray(data, fill_value=0)
183
+
184
+ exp = SparseArray(np.take(data, [0]), fill_value=0)
185
+ tm.assert_sp_array_equal(sparse.take([0]), exp)
186
+
187
+ exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
188
+ tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
189
+
190
+ def test_take_negative(self, arr_data, arr):
191
+ exp = SparseArray(np.take(arr_data, [-1]))
192
+ tm.assert_sp_array_equal(arr.take([-1]), exp)
193
+
194
+ exp = SparseArray(np.take(arr_data, [-4, -3, -2]))
195
+ tm.assert_sp_array_equal(arr.take([-4, -3, -2]), exp)
196
+
197
+ def test_bad_take(self, arr):
198
+ with pytest.raises(IndexError, match="bounds"):
199
+ arr.take([11])
200
+
201
+ def test_take_filling(self):
202
+ # similar tests as GH 12631
203
+ sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
204
+ result = sparse.take(np.array([1, 0, -1]))
205
+ expected = SparseArray([np.nan, np.nan, 4])
206
+ tm.assert_sp_array_equal(result, expected)
207
+
208
+ # TODO: actionable?
209
+ # XXX: test change: fill_value=True -> allow_fill=True
210
+ result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
211
+ expected = SparseArray([np.nan, np.nan, np.nan])
212
+ tm.assert_sp_array_equal(result, expected)
213
+
214
+ # allow_fill=False
215
+ result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
216
+ expected = SparseArray([np.nan, np.nan, 4])
217
+ tm.assert_sp_array_equal(result, expected)
218
+
219
+ msg = "Invalid value in 'indices'"
220
+ with pytest.raises(ValueError, match=msg):
221
+ sparse.take(np.array([1, 0, -2]), allow_fill=True)
222
+
223
+ with pytest.raises(ValueError, match=msg):
224
+ sparse.take(np.array([1, 0, -5]), allow_fill=True)
225
+
226
+ msg = "out of bounds value in 'indices'"
227
+ with pytest.raises(IndexError, match=msg):
228
+ sparse.take(np.array([1, -6]))
229
+ with pytest.raises(IndexError, match=msg):
230
+ sparse.take(np.array([1, 5]))
231
+ with pytest.raises(IndexError, match=msg):
232
+ sparse.take(np.array([1, 5]), allow_fill=True)
233
+
234
+ def test_take_filling_fill_value(self):
235
+ # same tests as GH#12631
236
+ sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
237
+ result = sparse.take(np.array([1, 0, -1]))
238
+ expected = SparseArray([0, np.nan, 4], fill_value=0)
239
+ tm.assert_sp_array_equal(result, expected)
240
+
241
+ # fill_value
242
+ result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
243
+ # TODO: actionable?
244
+ # XXX: behavior change.
245
+ # the old way of filling self.fill_value doesn't follow EA rules.
246
+ # It's supposed to be self.dtype.na_value (nan in this case)
247
+ expected = SparseArray([0, np.nan, np.nan], fill_value=0)
248
+ tm.assert_sp_array_equal(result, expected)
249
+
250
+ # allow_fill=False
251
+ result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
252
+ expected = SparseArray([0, np.nan, 4], fill_value=0)
253
+ tm.assert_sp_array_equal(result, expected)
254
+
255
+ msg = "Invalid value in 'indices'."
256
+ with pytest.raises(ValueError, match=msg):
257
+ sparse.take(np.array([1, 0, -2]), allow_fill=True)
258
+ with pytest.raises(ValueError, match=msg):
259
+ sparse.take(np.array([1, 0, -5]), allow_fill=True)
260
+
261
+ msg = "out of bounds value in 'indices'"
262
+ with pytest.raises(IndexError, match=msg):
263
+ sparse.take(np.array([1, -6]))
264
+ with pytest.raises(IndexError, match=msg):
265
+ sparse.take(np.array([1, 5]))
266
+ with pytest.raises(IndexError, match=msg):
267
+ sparse.take(np.array([1, 5]), fill_value=True)
268
+
269
+ @pytest.mark.parametrize("kind", ["block", "integer"])
270
+ def test_take_filling_all_nan(self, kind):
271
+ sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan], kind=kind)
272
+ result = sparse.take(np.array([1, 0, -1]))
273
+ expected = SparseArray([np.nan, np.nan, np.nan], kind=kind)
274
+ tm.assert_sp_array_equal(result, expected)
275
+
276
+ result = sparse.take(np.array([1, 0, -1]), fill_value=True)
277
+ expected = SparseArray([np.nan, np.nan, np.nan], kind=kind)
278
+ tm.assert_sp_array_equal(result, expected)
279
+
280
+ msg = "out of bounds value in 'indices'"
281
+ with pytest.raises(IndexError, match=msg):
282
+ sparse.take(np.array([1, -6]))
283
+ with pytest.raises(IndexError, match=msg):
284
+ sparse.take(np.array([1, 5]))
285
+ with pytest.raises(IndexError, match=msg):
286
+ sparse.take(np.array([1, 5]), fill_value=True)
287
+
288
+
289
+ class TestWhere:
290
+ def test_where_retain_fill_value(self):
291
+ # GH#45691 don't lose fill_value on _where
292
+ arr = SparseArray([np.nan, 1.0], fill_value=0)
293
+
294
+ mask = np.array([True, False])
295
+
296
+ res = arr._where(~mask, 1)
297
+ exp = SparseArray([1, 1.0], fill_value=0)
298
+ tm.assert_sp_array_equal(res, exp)
299
+
300
+ ser = pd.Series(arr)
301
+ res = ser.where(~mask, 1)
302
+ tm.assert_series_equal(res, pd.Series(exp))
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_libsparse.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas._libs.sparse as splib
7
+ import pandas.util._test_decorators as td
8
+
9
+ from pandas import Series
10
+ import pandas._testing as tm
11
+ from pandas.core.arrays.sparse import (
12
+ BlockIndex,
13
+ IntIndex,
14
+ make_sparse_index,
15
+ )
16
+
17
+
18
+ @pytest.fixture
19
+ def test_length():
20
+ return 20
21
+
22
+
23
+ @pytest.fixture(
24
+ params=[
25
+ [
26
+ [0, 7, 15],
27
+ [3, 5, 5],
28
+ [2, 9, 14],
29
+ [2, 3, 5],
30
+ [2, 9, 15],
31
+ [1, 3, 4],
32
+ ],
33
+ [
34
+ [0, 5],
35
+ [4, 4],
36
+ [1],
37
+ [4],
38
+ [1],
39
+ [3],
40
+ ],
41
+ [
42
+ [0],
43
+ [10],
44
+ [0, 5],
45
+ [3, 7],
46
+ [0, 5],
47
+ [3, 5],
48
+ ],
49
+ [
50
+ [10],
51
+ [5],
52
+ [0, 12],
53
+ [5, 3],
54
+ [12],
55
+ [3],
56
+ ],
57
+ [
58
+ [0, 10],
59
+ [4, 6],
60
+ [5, 17],
61
+ [4, 2],
62
+ [],
63
+ [],
64
+ ],
65
+ [
66
+ [0],
67
+ [5],
68
+ [],
69
+ [],
70
+ [],
71
+ [],
72
+ ],
73
+ ],
74
+ ids=[
75
+ "plain_case",
76
+ "delete_blocks",
77
+ "split_blocks",
78
+ "skip_block",
79
+ "no_intersect",
80
+ "one_empty",
81
+ ],
82
+ )
83
+ def cases(request):
84
+ return request.param
85
+
86
+
87
+ class TestSparseIndexUnion:
88
+ @pytest.mark.parametrize(
89
+ "xloc, xlen, yloc, ylen, eloc, elen",
90
+ [
91
+ [[0], [5], [5], [4], [0], [9]],
92
+ [[0, 10], [5, 5], [2, 17], [5, 2], [0, 10, 17], [7, 5, 2]],
93
+ [[1], [5], [3], [5], [1], [7]],
94
+ [[2, 10], [4, 4], [4], [8], [2], [12]],
95
+ [[0, 5], [3, 5], [0], [7], [0], [10]],
96
+ [[2, 10], [4, 4], [4, 13], [8, 4], [2], [15]],
97
+ [[2], [15], [4, 9, 14], [3, 2, 2], [2], [15]],
98
+ [[0, 10], [3, 3], [5, 15], [2, 2], [0, 5, 10, 15], [3, 2, 3, 2]],
99
+ ],
100
+ )
101
+ def test_index_make_union(self, xloc, xlen, yloc, ylen, eloc, elen, test_length):
102
+ # Case 1
103
+ # x: ----
104
+ # y: ----
105
+ # r: --------
106
+ # Case 2
107
+ # x: ----- -----
108
+ # y: ----- --
109
+ # Case 3
110
+ # x: ------
111
+ # y: -------
112
+ # r: ----------
113
+ # Case 4
114
+ # x: ------ -----
115
+ # y: -------
116
+ # r: -------------
117
+ # Case 5
118
+ # x: --- -----
119
+ # y: -------
120
+ # r: -------------
121
+ # Case 6
122
+ # x: ------ -----
123
+ # y: ------- ---
124
+ # r: -------------
125
+ # Case 7
126
+ # x: ----------------------
127
+ # y: ---- ---- ---
128
+ # r: ----------------------
129
+ # Case 8
130
+ # x: ---- ---
131
+ # y: --- ---
132
+ xindex = BlockIndex(test_length, xloc, xlen)
133
+ yindex = BlockIndex(test_length, yloc, ylen)
134
+ bresult = xindex.make_union(yindex)
135
+ assert isinstance(bresult, BlockIndex)
136
+ tm.assert_numpy_array_equal(bresult.blocs, np.array(eloc, dtype=np.int32))
137
+ tm.assert_numpy_array_equal(bresult.blengths, np.array(elen, dtype=np.int32))
138
+
139
+ ixindex = xindex.to_int_index()
140
+ iyindex = yindex.to_int_index()
141
+ iresult = ixindex.make_union(iyindex)
142
+ assert isinstance(iresult, IntIndex)
143
+ tm.assert_numpy_array_equal(iresult.indices, bresult.to_int_index().indices)
144
+
145
+ def test_int_index_make_union(self):
146
+ a = IntIndex(5, np.array([0, 3, 4], dtype=np.int32))
147
+ b = IntIndex(5, np.array([0, 2], dtype=np.int32))
148
+ res = a.make_union(b)
149
+ exp = IntIndex(5, np.array([0, 2, 3, 4], np.int32))
150
+ assert res.equals(exp)
151
+
152
+ a = IntIndex(5, np.array([], dtype=np.int32))
153
+ b = IntIndex(5, np.array([0, 2], dtype=np.int32))
154
+ res = a.make_union(b)
155
+ exp = IntIndex(5, np.array([0, 2], np.int32))
156
+ assert res.equals(exp)
157
+
158
+ a = IntIndex(5, np.array([], dtype=np.int32))
159
+ b = IntIndex(5, np.array([], dtype=np.int32))
160
+ res = a.make_union(b)
161
+ exp = IntIndex(5, np.array([], np.int32))
162
+ assert res.equals(exp)
163
+
164
+ a = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32))
165
+ b = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32))
166
+ res = a.make_union(b)
167
+ exp = IntIndex(5, np.array([0, 1, 2, 3, 4], np.int32))
168
+ assert res.equals(exp)
169
+
170
+ a = IntIndex(5, np.array([0, 1], dtype=np.int32))
171
+ b = IntIndex(4, np.array([0, 1], dtype=np.int32))
172
+
173
+ msg = "Indices must reference same underlying length"
174
+ with pytest.raises(ValueError, match=msg):
175
+ a.make_union(b)
176
+
177
+
178
+ class TestSparseIndexIntersect:
179
+ @td.skip_if_windows
180
+ def test_intersect(self, cases, test_length):
181
+ xloc, xlen, yloc, ylen, eloc, elen = cases
182
+ xindex = BlockIndex(test_length, xloc, xlen)
183
+ yindex = BlockIndex(test_length, yloc, ylen)
184
+ expected = BlockIndex(test_length, eloc, elen)
185
+ longer_index = BlockIndex(test_length + 1, yloc, ylen)
186
+
187
+ result = xindex.intersect(yindex)
188
+ assert result.equals(expected)
189
+ result = xindex.to_int_index().intersect(yindex.to_int_index())
190
+ assert result.equals(expected.to_int_index())
191
+
192
+ msg = "Indices must reference same underlying length"
193
+ with pytest.raises(Exception, match=msg):
194
+ xindex.intersect(longer_index)
195
+ with pytest.raises(Exception, match=msg):
196
+ xindex.to_int_index().intersect(longer_index.to_int_index())
197
+
198
+ def test_intersect_empty(self):
199
+ xindex = IntIndex(4, np.array([], dtype=np.int32))
200
+ yindex = IntIndex(4, np.array([2, 3], dtype=np.int32))
201
+ assert xindex.intersect(yindex).equals(xindex)
202
+ assert yindex.intersect(xindex).equals(xindex)
203
+
204
+ xindex = xindex.to_block_index()
205
+ yindex = yindex.to_block_index()
206
+ assert xindex.intersect(yindex).equals(xindex)
207
+ assert yindex.intersect(xindex).equals(xindex)
208
+
209
+ @pytest.mark.parametrize(
210
+ "case",
211
+ [
212
+ # Argument 2 to "IntIndex" has incompatible type "ndarray[Any,
213
+ # dtype[signedinteger[_32Bit]]]"; expected "Sequence[int]"
214
+ IntIndex(5, np.array([1, 2], dtype=np.int32)), # type: ignore[arg-type]
215
+ IntIndex(5, np.array([0, 2, 4], dtype=np.int32)), # type: ignore[arg-type]
216
+ IntIndex(0, np.array([], dtype=np.int32)), # type: ignore[arg-type]
217
+ IntIndex(5, np.array([], dtype=np.int32)), # type: ignore[arg-type]
218
+ ],
219
+ )
220
+ def test_intersect_identical(self, case):
221
+ assert case.intersect(case).equals(case)
222
+ case = case.to_block_index()
223
+ assert case.intersect(case).equals(case)
224
+
225
+
226
+ class TestSparseIndexCommon:
227
+ def test_int_internal(self):
228
+ idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer")
229
+ assert isinstance(idx, IntIndex)
230
+ assert idx.npoints == 2
231
+ tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32))
232
+
233
+ idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer")
234
+ assert isinstance(idx, IntIndex)
235
+ assert idx.npoints == 0
236
+ tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32))
237
+
238
+ idx = make_sparse_index(
239
+ 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer"
240
+ )
241
+ assert isinstance(idx, IntIndex)
242
+ assert idx.npoints == 4
243
+ tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32))
244
+
245
+ def test_block_internal(self):
246
+ idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block")
247
+ assert isinstance(idx, BlockIndex)
248
+ assert idx.npoints == 2
249
+ tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32))
250
+ tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32))
251
+
252
+ idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block")
253
+ assert isinstance(idx, BlockIndex)
254
+ assert idx.npoints == 0
255
+ tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32))
256
+ tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32))
257
+
258
+ idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block")
259
+ assert isinstance(idx, BlockIndex)
260
+ assert idx.npoints == 4
261
+ tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32))
262
+ tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32))
263
+
264
+ idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block")
265
+ assert isinstance(idx, BlockIndex)
266
+ assert idx.npoints == 3
267
+ tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32))
268
+ tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32))
269
+
270
+ @pytest.mark.parametrize("kind", ["integer", "block"])
271
+ def test_lookup(self, kind):
272
+ idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind)
273
+ assert idx.lookup(-1) == -1
274
+ assert idx.lookup(0) == -1
275
+ assert idx.lookup(1) == -1
276
+ assert idx.lookup(2) == 0
277
+ assert idx.lookup(3) == 1
278
+ assert idx.lookup(4) == -1
279
+
280
+ idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind)
281
+
282
+ for i in range(-1, 5):
283
+ assert idx.lookup(i) == -1
284
+
285
+ idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind)
286
+ assert idx.lookup(-1) == -1
287
+ assert idx.lookup(0) == 0
288
+ assert idx.lookup(1) == 1
289
+ assert idx.lookup(2) == 2
290
+ assert idx.lookup(3) == 3
291
+ assert idx.lookup(4) == -1
292
+
293
+ idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind)
294
+ assert idx.lookup(-1) == -1
295
+ assert idx.lookup(0) == 0
296
+ assert idx.lookup(1) == -1
297
+ assert idx.lookup(2) == 1
298
+ assert idx.lookup(3) == 2
299
+ assert idx.lookup(4) == -1
300
+
301
+ @pytest.mark.parametrize("kind", ["integer", "block"])
302
+ def test_lookup_array(self, kind):
303
+ idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind)
304
+
305
+ res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32))
306
+ exp = np.array([-1, -1, 0], dtype=np.int32)
307
+ tm.assert_numpy_array_equal(res, exp)
308
+
309
+ res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32))
310
+ exp = np.array([-1, 0, -1, 1], dtype=np.int32)
311
+ tm.assert_numpy_array_equal(res, exp)
312
+
313
+ idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind)
314
+ res = idx.lookup_array(np.array([-1, 0, 2, 4], dtype=np.int32))
315
+ exp = np.array([-1, -1, -1, -1], dtype=np.int32)
316
+ tm.assert_numpy_array_equal(res, exp)
317
+
318
+ idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind)
319
+ res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32))
320
+ exp = np.array([-1, 0, 2], dtype=np.int32)
321
+ tm.assert_numpy_array_equal(res, exp)
322
+
323
+ res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32))
324
+ exp = np.array([-1, 2, 1, 3], dtype=np.int32)
325
+ tm.assert_numpy_array_equal(res, exp)
326
+
327
+ idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind)
328
+ res = idx.lookup_array(np.array([2, 1, 3, 0], dtype=np.int32))
329
+ exp = np.array([1, -1, 2, 0], dtype=np.int32)
330
+ tm.assert_numpy_array_equal(res, exp)
331
+
332
+ res = idx.lookup_array(np.array([1, 4, 2, 5], dtype=np.int32))
333
+ exp = np.array([-1, -1, 1, -1], dtype=np.int32)
334
+ tm.assert_numpy_array_equal(res, exp)
335
+
336
+ @pytest.mark.parametrize(
337
+ "idx, expected",
338
+ [
339
+ [0, -1],
340
+ [5, 0],
341
+ [7, 2],
342
+ [8, -1],
343
+ [9, -1],
344
+ [10, -1],
345
+ [11, -1],
346
+ [12, 3],
347
+ [17, 8],
348
+ [18, -1],
349
+ ],
350
+ )
351
+ def test_lookup_basics(self, idx, expected):
352
+ bindex = BlockIndex(20, [5, 12], [3, 6])
353
+ assert bindex.lookup(idx) == expected
354
+
355
+ iindex = bindex.to_int_index()
356
+ assert iindex.lookup(idx) == expected
357
+
358
+
359
+ class TestBlockIndex:
360
+ def test_block_internal(self):
361
+ idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block")
362
+ assert isinstance(idx, BlockIndex)
363
+ assert idx.npoints == 2
364
+ tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32))
365
+ tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32))
366
+
367
+ idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block")
368
+ assert isinstance(idx, BlockIndex)
369
+ assert idx.npoints == 0
370
+ tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32))
371
+ tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32))
372
+
373
+ idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block")
374
+ assert isinstance(idx, BlockIndex)
375
+ assert idx.npoints == 4
376
+ tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32))
377
+ tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32))
378
+
379
+ idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block")
380
+ assert isinstance(idx, BlockIndex)
381
+ assert idx.npoints == 3
382
+ tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32))
383
+ tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32))
384
+
385
+ @pytest.mark.parametrize("i", [5, 10, 100, 101])
386
+ def test_make_block_boundary(self, i):
387
+ idx = make_sparse_index(i, np.arange(0, i, 2, dtype=np.int32), kind="block")
388
+
389
+ exp = np.arange(0, i, 2, dtype=np.int32)
390
+ tm.assert_numpy_array_equal(idx.blocs, exp)
391
+ tm.assert_numpy_array_equal(idx.blengths, np.ones(len(exp), dtype=np.int32))
392
+
393
+ def test_equals(self):
394
+ index = BlockIndex(10, [0, 4], [2, 5])
395
+
396
+ assert index.equals(index)
397
+ assert not index.equals(BlockIndex(10, [0, 4], [2, 6]))
398
+
399
+ def test_check_integrity(self):
400
+ locs = []
401
+ lengths = []
402
+
403
+ # 0-length OK
404
+ BlockIndex(0, locs, lengths)
405
+
406
+ # also OK even though empty
407
+ BlockIndex(1, locs, lengths)
408
+
409
+ msg = "Block 0 extends beyond end"
410
+ with pytest.raises(ValueError, match=msg):
411
+ BlockIndex(10, [5], [10])
412
+
413
+ msg = "Block 0 overlaps"
414
+ with pytest.raises(ValueError, match=msg):
415
+ BlockIndex(10, [2, 5], [5, 3])
416
+
417
+ def test_to_int_index(self):
418
+ locs = [0, 10]
419
+ lengths = [4, 6]
420
+ exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15]
421
+
422
+ block = BlockIndex(20, locs, lengths)
423
+ dense = block.to_int_index()
424
+
425
+ tm.assert_numpy_array_equal(dense.indices, np.array(exp_inds, dtype=np.int32))
426
+
427
+ def test_to_block_index(self):
428
+ index = BlockIndex(10, [0, 5], [4, 5])
429
+ assert index.to_block_index() is index
430
+
431
+
432
+ class TestIntIndex:
433
+ def test_check_integrity(self):
434
+ # Too many indices than specified in self.length
435
+ msg = "Too many indices"
436
+
437
+ with pytest.raises(ValueError, match=msg):
438
+ IntIndex(length=1, indices=[1, 2, 3])
439
+
440
+ # No index can be negative.
441
+ msg = "No index can be less than zero"
442
+
443
+ with pytest.raises(ValueError, match=msg):
444
+ IntIndex(length=5, indices=[1, -2, 3])
445
+
446
+ # No index can be negative.
447
+ msg = "No index can be less than zero"
448
+
449
+ with pytest.raises(ValueError, match=msg):
450
+ IntIndex(length=5, indices=[1, -2, 3])
451
+
452
+ # All indices must be less than the length.
453
+ msg = "All indices must be less than the length"
454
+
455
+ with pytest.raises(ValueError, match=msg):
456
+ IntIndex(length=5, indices=[1, 2, 5])
457
+
458
+ with pytest.raises(ValueError, match=msg):
459
+ IntIndex(length=5, indices=[1, 2, 6])
460
+
461
+ # Indices must be strictly ascending.
462
+ msg = "Indices must be strictly increasing"
463
+
464
+ with pytest.raises(ValueError, match=msg):
465
+ IntIndex(length=5, indices=[1, 3, 2])
466
+
467
+ with pytest.raises(ValueError, match=msg):
468
+ IntIndex(length=5, indices=[1, 3, 3])
469
+
470
+ def test_int_internal(self):
471
+ idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer")
472
+ assert isinstance(idx, IntIndex)
473
+ assert idx.npoints == 2
474
+ tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32))
475
+
476
+ idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer")
477
+ assert isinstance(idx, IntIndex)
478
+ assert idx.npoints == 0
479
+ tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32))
480
+
481
+ idx = make_sparse_index(
482
+ 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer"
483
+ )
484
+ assert isinstance(idx, IntIndex)
485
+ assert idx.npoints == 4
486
+ tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32))
487
+
488
+ def test_equals(self):
489
+ index = IntIndex(10, [0, 1, 2, 3, 4])
490
+ assert index.equals(index)
491
+ assert not index.equals(IntIndex(10, [0, 1, 2, 3]))
492
+
493
+ def test_to_block_index(self, cases, test_length):
494
+ xloc, xlen, yloc, ylen, _, _ = cases
495
+ xindex = BlockIndex(test_length, xloc, xlen)
496
+ yindex = BlockIndex(test_length, yloc, ylen)
497
+
498
+ # see if survive the round trip
499
+ xbindex = xindex.to_int_index().to_block_index()
500
+ ybindex = yindex.to_int_index().to_block_index()
501
+ assert isinstance(xbindex, BlockIndex)
502
+ assert xbindex.equals(xindex)
503
+ assert ybindex.equals(yindex)
504
+
505
+ def test_to_int_index(self):
506
+ index = IntIndex(10, [2, 3, 4, 5, 6])
507
+ assert index.to_int_index() is index
508
+
509
+
510
+ class TestSparseOperators:
511
+ @pytest.mark.parametrize("opname", ["add", "sub", "mul", "truediv", "floordiv"])
512
+ def test_op(self, opname, cases, test_length):
513
+ xloc, xlen, yloc, ylen, _, _ = cases
514
+ sparse_op = getattr(splib, f"sparse_{opname}_float64")
515
+ python_op = getattr(operator, opname)
516
+
517
+ xindex = BlockIndex(test_length, xloc, xlen)
518
+ yindex = BlockIndex(test_length, yloc, ylen)
519
+
520
+ xdindex = xindex.to_int_index()
521
+ ydindex = yindex.to_int_index()
522
+
523
+ x = np.arange(xindex.npoints) * 10.0 + 1
524
+ y = np.arange(yindex.npoints) * 100.0 + 1
525
+
526
+ xfill = 0
527
+ yfill = 2
528
+
529
+ result_block_vals, rb_index, bfill = sparse_op(
530
+ x, xindex, xfill, y, yindex, yfill
531
+ )
532
+ result_int_vals, ri_index, ifill = sparse_op(
533
+ x, xdindex, xfill, y, ydindex, yfill
534
+ )
535
+
536
+ assert rb_index.to_int_index().equals(ri_index)
537
+ tm.assert_numpy_array_equal(result_block_vals, result_int_vals)
538
+ assert bfill == ifill
539
+
540
+ # check versus Series...
541
+ xseries = Series(x, xdindex.indices)
542
+ xseries = xseries.reindex(np.arange(test_length)).fillna(xfill)
543
+
544
+ yseries = Series(y, ydindex.indices)
545
+ yseries = yseries.reindex(np.arange(test_length)).fillna(yfill)
546
+
547
+ series_result = python_op(xseries, yseries)
548
+ series_result = series_result.reindex(ri_index.indices)
549
+
550
+ tm.assert_numpy_array_equal(result_block_vals, series_result.values)
551
+ tm.assert_numpy_array_equal(result_int_vals, series_result.values)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_reductions.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ NaT,
6
+ SparseDtype,
7
+ Timestamp,
8
+ isna,
9
+ )
10
+ from pandas.core.arrays.sparse import SparseArray
11
+
12
+
13
+ class TestReductions:
14
+ @pytest.mark.parametrize(
15
+ "data,pos,neg",
16
+ [
17
+ ([True, True, True], True, False),
18
+ ([1, 2, 1], 1, 0),
19
+ ([1.0, 2.0, 1.0], 1.0, 0.0),
20
+ ],
21
+ )
22
+ def test_all(self, data, pos, neg):
23
+ # GH#17570
24
+ out = SparseArray(data).all()
25
+ assert out
26
+
27
+ out = SparseArray(data, fill_value=pos).all()
28
+ assert out
29
+
30
+ data[1] = neg
31
+ out = SparseArray(data).all()
32
+ assert not out
33
+
34
+ out = SparseArray(data, fill_value=pos).all()
35
+ assert not out
36
+
37
+ @pytest.mark.parametrize(
38
+ "data,pos,neg",
39
+ [
40
+ ([True, True, True], True, False),
41
+ ([1, 2, 1], 1, 0),
42
+ ([1.0, 2.0, 1.0], 1.0, 0.0),
43
+ ],
44
+ )
45
+ def test_numpy_all(self, data, pos, neg):
46
+ # GH#17570
47
+ out = np.all(SparseArray(data))
48
+ assert out
49
+
50
+ out = np.all(SparseArray(data, fill_value=pos))
51
+ assert out
52
+
53
+ data[1] = neg
54
+ out = np.all(SparseArray(data))
55
+ assert not out
56
+
57
+ out = np.all(SparseArray(data, fill_value=pos))
58
+ assert not out
59
+
60
+ # raises with a different message on py2.
61
+ msg = "the 'out' parameter is not supported"
62
+ with pytest.raises(ValueError, match=msg):
63
+ np.all(SparseArray(data), out=np.array([]))
64
+
65
+ @pytest.mark.parametrize(
66
+ "data,pos,neg",
67
+ [
68
+ ([False, True, False], True, False),
69
+ ([0, 2, 0], 2, 0),
70
+ ([0.0, 2.0, 0.0], 2.0, 0.0),
71
+ ],
72
+ )
73
+ def test_any(self, data, pos, neg):
74
+ # GH#17570
75
+ out = SparseArray(data).any()
76
+ assert out
77
+
78
+ out = SparseArray(data, fill_value=pos).any()
79
+ assert out
80
+
81
+ data[1] = neg
82
+ out = SparseArray(data).any()
83
+ assert not out
84
+
85
+ out = SparseArray(data, fill_value=pos).any()
86
+ assert not out
87
+
88
+ @pytest.mark.parametrize(
89
+ "data,pos,neg",
90
+ [
91
+ ([False, True, False], True, False),
92
+ ([0, 2, 0], 2, 0),
93
+ ([0.0, 2.0, 0.0], 2.0, 0.0),
94
+ ],
95
+ )
96
+ def test_numpy_any(self, data, pos, neg):
97
+ # GH#17570
98
+ out = np.any(SparseArray(data))
99
+ assert out
100
+
101
+ out = np.any(SparseArray(data, fill_value=pos))
102
+ assert out
103
+
104
+ data[1] = neg
105
+ out = np.any(SparseArray(data))
106
+ assert not out
107
+
108
+ out = np.any(SparseArray(data, fill_value=pos))
109
+ assert not out
110
+
111
+ msg = "the 'out' parameter is not supported"
112
+ with pytest.raises(ValueError, match=msg):
113
+ np.any(SparseArray(data), out=out)
114
+
115
+ def test_sum(self):
116
+ data = np.arange(10).astype(float)
117
+ out = SparseArray(data).sum()
118
+ assert out == 45.0
119
+
120
+ data[5] = np.nan
121
+ out = SparseArray(data, fill_value=2).sum()
122
+ assert out == 40.0
123
+
124
+ out = SparseArray(data, fill_value=np.nan).sum()
125
+ assert out == 40.0
126
+
127
+ @pytest.mark.parametrize(
128
+ "arr",
129
+ [np.array([0, 1, np.nan, 1]), np.array([0, 1, 1])],
130
+ )
131
+ @pytest.mark.parametrize("fill_value", [0, 1, np.nan])
132
+ @pytest.mark.parametrize("min_count, expected", [(3, 2), (4, np.nan)])
133
+ def test_sum_min_count(self, arr, fill_value, min_count, expected):
134
+ # GH#25777
135
+ sparray = SparseArray(arr, fill_value=fill_value)
136
+ result = sparray.sum(min_count=min_count)
137
+ if np.isnan(expected):
138
+ assert np.isnan(result)
139
+ else:
140
+ assert result == expected
141
+
142
+ def test_bool_sum_min_count(self):
143
+ spar_bool = SparseArray([False, True] * 5, dtype=np.bool_, fill_value=True)
144
+ res = spar_bool.sum(min_count=1)
145
+ assert res == 5
146
+ res = spar_bool.sum(min_count=11)
147
+ assert isna(res)
148
+
149
+ def test_numpy_sum(self):
150
+ data = np.arange(10).astype(float)
151
+ out = np.sum(SparseArray(data))
152
+ assert out == 45.0
153
+
154
+ data[5] = np.nan
155
+ out = np.sum(SparseArray(data, fill_value=2))
156
+ assert out == 40.0
157
+
158
+ out = np.sum(SparseArray(data, fill_value=np.nan))
159
+ assert out == 40.0
160
+
161
+ msg = "the 'dtype' parameter is not supported"
162
+ with pytest.raises(ValueError, match=msg):
163
+ np.sum(SparseArray(data), dtype=np.int64)
164
+
165
+ msg = "the 'out' parameter is not supported"
166
+ with pytest.raises(ValueError, match=msg):
167
+ np.sum(SparseArray(data), out=out)
168
+
169
+ def test_mean(self):
170
+ data = np.arange(10).astype(float)
171
+ out = SparseArray(data).mean()
172
+ assert out == 4.5
173
+
174
+ data[5] = np.nan
175
+ out = SparseArray(data).mean()
176
+ assert out == 40.0 / 9
177
+
178
+ def test_numpy_mean(self):
179
+ data = np.arange(10).astype(float)
180
+ out = np.mean(SparseArray(data))
181
+ assert out == 4.5
182
+
183
+ data[5] = np.nan
184
+ out = np.mean(SparseArray(data))
185
+ assert out == 40.0 / 9
186
+
187
+ msg = "the 'dtype' parameter is not supported"
188
+ with pytest.raises(ValueError, match=msg):
189
+ np.mean(SparseArray(data), dtype=np.int64)
190
+
191
+ msg = "the 'out' parameter is not supported"
192
+ with pytest.raises(ValueError, match=msg):
193
+ np.mean(SparseArray(data), out=out)
194
+
195
+
196
+ class TestMinMax:
197
+ @pytest.mark.parametrize(
198
+ "raw_data,max_expected,min_expected",
199
+ [
200
+ (np.arange(5.0), [4], [0]),
201
+ (-np.arange(5.0), [0], [-4]),
202
+ (np.array([0, 1, 2, np.nan, 4]), [4], [0]),
203
+ (np.array([np.nan] * 5), [np.nan], [np.nan]),
204
+ (np.array([]), [np.nan], [np.nan]),
205
+ ],
206
+ )
207
+ def test_nan_fill_value(self, raw_data, max_expected, min_expected):
208
+ arr = SparseArray(raw_data)
209
+ max_result = arr.max()
210
+ min_result = arr.min()
211
+ assert max_result in max_expected
212
+ assert min_result in min_expected
213
+
214
+ max_result = arr.max(skipna=False)
215
+ min_result = arr.min(skipna=False)
216
+ if np.isnan(raw_data).any():
217
+ assert np.isnan(max_result)
218
+ assert np.isnan(min_result)
219
+ else:
220
+ assert max_result in max_expected
221
+ assert min_result in min_expected
222
+
223
+ @pytest.mark.parametrize(
224
+ "fill_value,max_expected,min_expected",
225
+ [
226
+ (100, 100, 0),
227
+ (-100, 1, -100),
228
+ ],
229
+ )
230
+ def test_fill_value(self, fill_value, max_expected, min_expected):
231
+ arr = SparseArray(
232
+ np.array([fill_value, 0, 1]), dtype=SparseDtype("int", fill_value)
233
+ )
234
+ max_result = arr.max()
235
+ assert max_result == max_expected
236
+
237
+ min_result = arr.min()
238
+ assert min_result == min_expected
239
+
240
+ def test_only_fill_value(self):
241
+ fv = 100
242
+ arr = SparseArray(np.array([fv, fv, fv]), dtype=SparseDtype("int", fv))
243
+ assert len(arr._valid_sp_values) == 0
244
+
245
+ assert arr.max() == fv
246
+ assert arr.min() == fv
247
+ assert arr.max(skipna=False) == fv
248
+ assert arr.min(skipna=False) == fv
249
+
250
+ @pytest.mark.parametrize("func", ["min", "max"])
251
+ @pytest.mark.parametrize("data", [np.array([]), np.array([np.nan, np.nan])])
252
+ @pytest.mark.parametrize(
253
+ "dtype,expected",
254
+ [
255
+ (SparseDtype(np.float64, np.nan), np.nan),
256
+ (SparseDtype(np.float64, 5.0), np.nan),
257
+ (SparseDtype("datetime64[ns]", NaT), NaT),
258
+ (SparseDtype("datetime64[ns]", Timestamp("2018-05-05")), NaT),
259
+ ],
260
+ )
261
+ def test_na_value_if_no_valid_values(self, func, data, dtype, expected):
262
+ arr = SparseArray(data, dtype=dtype)
263
+ result = getattr(arr, func)()
264
+ if expected is NaT:
265
+ # TODO: pin down whether we wrap datetime64("NaT")
266
+ assert result is NaT or np.isnat(result)
267
+ else:
268
+ assert np.isnan(result)
269
+
270
+
271
+ class TestArgmaxArgmin:
272
+ @pytest.mark.parametrize(
273
+ "arr,argmax_expected,argmin_expected",
274
+ [
275
+ (SparseArray([1, 2, 0, 1, 2]), 1, 2),
276
+ (SparseArray([-1, -2, 0, -1, -2]), 2, 1),
277
+ (SparseArray([np.nan, 1, 0, 0, np.nan, -1]), 1, 5),
278
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2]), 5, 2),
279
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=-1), 5, 2),
280
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=0), 5, 2),
281
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=1), 5, 2),
282
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=2), 5, 2),
283
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=3), 5, 2),
284
+ (SparseArray([0] * 10 + [-1], fill_value=0), 0, 10),
285
+ (SparseArray([0] * 10 + [-1], fill_value=-1), 0, 10),
286
+ (SparseArray([0] * 10 + [-1], fill_value=1), 0, 10),
287
+ (SparseArray([-1] + [0] * 10, fill_value=0), 1, 0),
288
+ (SparseArray([1] + [0] * 10, fill_value=0), 0, 1),
289
+ (SparseArray([-1] + [0] * 10, fill_value=-1), 1, 0),
290
+ (SparseArray([1] + [0] * 10, fill_value=1), 0, 1),
291
+ ],
292
+ )
293
+ def test_argmax_argmin(self, arr, argmax_expected, argmin_expected):
294
+ argmax_result = arr.argmax()
295
+ argmin_result = arr.argmin()
296
+ assert argmax_result == argmax_expected
297
+ assert argmin_result == argmin_expected
298
+
299
+ @pytest.mark.parametrize(
300
+ "arr,method",
301
+ [(SparseArray([]), "argmax"), (SparseArray([]), "argmin")],
302
+ )
303
+ def test_empty_array(self, arr, method):
304
+ msg = f"attempt to get {method} of an empty sequence"
305
+ with pytest.raises(ValueError, match=msg):
306
+ arr.argmax() if method == "argmax" else arr.argmin()
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_unary.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ import pandas._testing as tm
8
+ from pandas.core.arrays import SparseArray
9
+
10
+
11
+ @pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning")
12
+ @pytest.mark.parametrize("fill_value", [0, np.nan])
13
+ @pytest.mark.parametrize("op", [operator.pos, operator.neg])
14
+ def test_unary_op(op, fill_value):
15
+ arr = np.array([0, 1, np.nan, 2])
16
+ sparray = SparseArray(arr, fill_value=fill_value)
17
+ result = op(sparray)
18
+ expected = SparseArray(op(arr), fill_value=op(fill_value))
19
+ tm.assert_sp_array_equal(result, expected)
20
+
21
+
22
+ @pytest.mark.parametrize("fill_value", [True, False])
23
+ def test_invert(fill_value):
24
+ arr = np.array([True, False, False, True])
25
+ sparray = SparseArray(arr, fill_value=fill_value)
26
+ result = ~sparray
27
+ expected = SparseArray(~arr, fill_value=not fill_value)
28
+ tm.assert_sp_array_equal(result, expected)
29
+
30
+ result = ~pd.Series(sparray)
31
+ expected = pd.Series(expected)
32
+ tm.assert_series_equal(result, expected)
33
+
34
+ result = ~pd.DataFrame({"A": sparray})
35
+ expected = pd.DataFrame({"A": expected})
36
+ tm.assert_frame_equal(result, expected)
37
+
38
+
39
+ class TestUnaryMethods:
40
+ @pytest.mark.filterwarnings(
41
+ "ignore:invalid value encountered in cast:RuntimeWarning"
42
+ )
43
+ def test_neg_operator(self):
44
+ arr = SparseArray([-1, -2, np.nan, 3], fill_value=np.nan, dtype=np.int8)
45
+ res = -arr
46
+ exp = SparseArray([1, 2, np.nan, -3], fill_value=np.nan, dtype=np.int8)
47
+ tm.assert_sp_array_equal(exp, res)
48
+
49
+ arr = SparseArray([-1, -2, 1, 3], fill_value=-1, dtype=np.int8)
50
+ res = -arr
51
+ exp = SparseArray([1, 2, -1, -3], fill_value=1, dtype=np.int8)
52
+ tm.assert_sp_array_equal(exp, res)
53
+
54
+ @pytest.mark.filterwarnings(
55
+ "ignore:invalid value encountered in cast:RuntimeWarning"
56
+ )
57
+ def test_abs_operator(self):
58
+ arr = SparseArray([-1, -2, np.nan, 3], fill_value=np.nan, dtype=np.int8)
59
+ res = abs(arr)
60
+ exp = SparseArray([1, 2, np.nan, 3], fill_value=np.nan, dtype=np.int8)
61
+ tm.assert_sp_array_equal(exp, res)
62
+
63
+ arr = SparseArray([-1, -2, 1, 3], fill_value=-1, dtype=np.int8)
64
+ res = abs(arr)
65
+ exp = SparseArray([1, 2, 1, 3], fill_value=1, dtype=np.int8)
66
+ tm.assert_sp_array_equal(exp, res)
67
+
68
+ def test_invert_operator(self):
69
+ arr = SparseArray([False, True, False, True], fill_value=False, dtype=np.bool_)
70
+ exp = SparseArray(
71
+ np.invert([False, True, False, True]), fill_value=True, dtype=np.bool_
72
+ )
73
+ res = ~arr
74
+ tm.assert_sp_array_equal(exp, res)
75
+
76
+ arr = SparseArray([0, 1, 0, 2, 3, 0], fill_value=0, dtype=np.int32)
77
+ res = ~arr
78
+ exp = SparseArray([-1, -2, -1, -3, -4, -1], fill_value=-1, dtype=np.int32)
79
+ tm.assert_sp_array_equal(exp, res)
llava_next/lib/python3.10/site-packages/pandas/tests/arrays/string_/__init__.py ADDED
File without changes