ZTWHHH commited on
Commit
05b27ed
·
verified ·
1 Parent(s): a55ee99

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/share/terminfo/w/wsiris +0 -0
  2. parrot/share/terminfo/w/wy120-w +0 -0
  3. parrot/share/terminfo/w/wy30-mc +0 -0
  4. parrot/share/terminfo/w/wy325-43w-vb +0 -0
  5. parrot/share/terminfo/w/wy75-mc +0 -0
  6. parrot/share/terminfo/w/wy99gt-25-w +0 -0
  7. parrot/share/terminfo/w/wyse370 +0 -0
  8. parrot/share/terminfo/w/wyse520-p-wvb +0 -0
  9. parrot/share/terminfo/w/wyse75-mc +0 -0
  10. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc +0 -0
  11. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc +0 -0
  12. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc +0 -0
  13. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc +0 -0
  14. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/base.cpython-310.pyc +0 -0
  15. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/boolean.cpython-310.pyc +0 -0
  16. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc +0 -0
  17. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc +0 -0
  18. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc +0 -0
  19. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc +0 -0
  20. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/integer.cpython-310.pyc +0 -0
  21. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/interval.cpython-310.pyc +0 -0
  22. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc +0 -0
  23. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc +0 -0
  24. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc +0 -0
  25. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_.cpython-310.pyc +0 -0
  26. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc +0 -0
  27. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc +0 -0
  28. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py +7 -0
  29. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc +0 -0
  30. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc +0 -0
  31. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc +0 -0
  32. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc +0 -0
  33. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc +0 -0
  34. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py +66 -0
  35. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py +473 -0
  36. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py +0 -0
  37. videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py +174 -0
  38. videollama2/lib/python3.10/site-packages/pandas/core/arrays/base.py +2588 -0
  39. videollama2/lib/python3.10/site-packages/pandas/core/arrays/boolean.py +407 -0
  40. videollama2/lib/python3.10/site-packages/pandas/core/arrays/categorical.py +0 -0
  41. videollama2/lib/python3.10/site-packages/pandas/core/arrays/floating.py +173 -0
  42. videollama2/lib/python3.10/site-packages/pandas/core/arrays/numpy_.py +563 -0
  43. videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py +19 -0
  44. videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  45. videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc +0 -0
  46. videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc +0 -0
  47. videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc +0 -0
  48. videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py +414 -0
  49. videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/array.py +1929 -0
  50. videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py +207 -0
parrot/share/terminfo/w/wsiris ADDED
Binary file (1.18 kB). View file
 
parrot/share/terminfo/w/wy120-w ADDED
Binary file (1.27 kB). View file
 
parrot/share/terminfo/w/wy30-mc ADDED
Binary file (1.04 kB). View file
 
parrot/share/terminfo/w/wy325-43w-vb ADDED
Binary file (1.24 kB). View file
 
parrot/share/terminfo/w/wy75-mc ADDED
Binary file (1.71 kB). View file
 
parrot/share/terminfo/w/wy99gt-25-w ADDED
Binary file (1.61 kB). View file
 
parrot/share/terminfo/w/wyse370 ADDED
Binary file (2.08 kB). View file
 
parrot/share/terminfo/w/wyse520-p-wvb ADDED
Binary file (1.79 kB). View file
 
parrot/share/terminfo/w/wyse75-mc ADDED
Binary file (1.71 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc ADDED
Binary file (4.83 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/base.cpython-310.pyc ADDED
Binary file (74.7 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/boolean.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc ADDED
Binary file (81.1 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc ADDED
Binary file (63.3 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc ADDED
Binary file (70.6 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc ADDED
Binary file (4.68 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/integer.cpython-310.pyc ADDED
Binary file (6.97 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/interval.cpython-310.pyc ADDED
Binary file (47.6 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc ADDED
Binary file (7.38 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc ADDED
Binary file (32.7 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc ADDED
Binary file (30.3 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from pandas.core.arrays.arrow.accessors import (
2
+ ListAccessor,
3
+ StructAccessor,
4
+ )
5
+ from pandas.core.arrays.arrow.array import ArrowExtensionArray
6
+
7
+ __all__ = ["ArrowExtensionArray", "StructAccessor", "ListAccessor"]
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (387 Bytes). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc ADDED
Binary file (2.05 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc ADDED
Binary file (82.6 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc ADDED
Binary file (6.28 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import warnings
4
+
5
+ import numpy as np
6
+ import pyarrow
7
+
8
+ from pandas.errors import PerformanceWarning
9
+ from pandas.util._exceptions import find_stack_level
10
+
11
+
12
+ def fallback_performancewarning(version: str | None = None) -> None:
13
+ """
14
+ Raise a PerformanceWarning for falling back to ExtensionArray's
15
+ non-pyarrow method
16
+ """
17
+ msg = "Falling back on a non-pyarrow code path which may decrease performance."
18
+ if version is not None:
19
+ msg += f" Upgrade to pyarrow >={version} to possibly suppress this warning."
20
+ warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
21
+
22
+
23
+ def pyarrow_array_to_numpy_and_mask(
24
+ arr, dtype: np.dtype
25
+ ) -> tuple[np.ndarray, np.ndarray]:
26
+ """
27
+ Convert a primitive pyarrow.Array to a numpy array and boolean mask based
28
+ on the buffers of the Array.
29
+
30
+ At the moment pyarrow.BooleanArray is not supported.
31
+
32
+ Parameters
33
+ ----------
34
+ arr : pyarrow.Array
35
+ dtype : numpy.dtype
36
+
37
+ Returns
38
+ -------
39
+ (data, mask)
40
+ Tuple of two numpy arrays with the raw data (with specified dtype) and
41
+ a boolean mask (validity mask, so False means missing)
42
+ """
43
+ dtype = np.dtype(dtype)
44
+
45
+ if pyarrow.types.is_null(arr.type):
46
+ # No initialization of data is needed since everything is null
47
+ data = np.empty(len(arr), dtype=dtype)
48
+ mask = np.zeros(len(arr), dtype=bool)
49
+ return data, mask
50
+ buflist = arr.buffers()
51
+ # Since Arrow buffers might contain padding and the data might be offset,
52
+ # the buffer gets sliced here before handing it to numpy.
53
+ # See also https://github.com/pandas-dev/pandas/issues/40896
54
+ offset = arr.offset * dtype.itemsize
55
+ length = len(arr) * dtype.itemsize
56
+ data_buf = buflist[1][offset : offset + length]
57
+ data = np.frombuffer(data_buf, dtype=dtype)
58
+ bitmask = buflist[0]
59
+ if bitmask is not None:
60
+ mask = pyarrow.BooleanArray.from_buffers(
61
+ pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset
62
+ )
63
+ mask = np.asarray(mask)
64
+ else:
65
+ mask = np.ones(len(arr), dtype=bool)
66
+ return data, mask
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Accessors for arrow-backed data."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from abc import (
6
+ ABCMeta,
7
+ abstractmethod,
8
+ )
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ cast,
12
+ )
13
+
14
+ from pandas.compat import (
15
+ pa_version_under10p1,
16
+ pa_version_under11p0,
17
+ )
18
+
19
+ from pandas.core.dtypes.common import is_list_like
20
+
21
+ if not pa_version_under10p1:
22
+ import pyarrow as pa
23
+ import pyarrow.compute as pc
24
+
25
+ from pandas.core.dtypes.dtypes import ArrowDtype
26
+
27
+ if TYPE_CHECKING:
28
+ from collections.abc import Iterator
29
+
30
+ from pandas import (
31
+ DataFrame,
32
+ Series,
33
+ )
34
+
35
+
36
+ class ArrowAccessor(metaclass=ABCMeta):
37
+ @abstractmethod
38
+ def __init__(self, data, validation_msg: str) -> None:
39
+ self._data = data
40
+ self._validation_msg = validation_msg
41
+ self._validate(data)
42
+
43
+ @abstractmethod
44
+ def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
45
+ pass
46
+
47
+ def _validate(self, data):
48
+ dtype = data.dtype
49
+ if not isinstance(dtype, ArrowDtype):
50
+ # Raise AttributeError so that inspect can handle non-struct Series.
51
+ raise AttributeError(self._validation_msg.format(dtype=dtype))
52
+
53
+ if not self._is_valid_pyarrow_dtype(dtype.pyarrow_dtype):
54
+ # Raise AttributeError so that inspect can handle invalid Series.
55
+ raise AttributeError(self._validation_msg.format(dtype=dtype))
56
+
57
+ @property
58
+ def _pa_array(self):
59
+ return self._data.array._pa_array
60
+
61
+
62
+ class ListAccessor(ArrowAccessor):
63
+ """
64
+ Accessor object for list data properties of the Series values.
65
+
66
+ Parameters
67
+ ----------
68
+ data : Series
69
+ Series containing Arrow list data.
70
+ """
71
+
72
+ def __init__(self, data=None) -> None:
73
+ super().__init__(
74
+ data,
75
+ validation_msg="Can only use the '.list' accessor with "
76
+ "'list[pyarrow]' dtype, not {dtype}.",
77
+ )
78
+
79
+ def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
80
+ return (
81
+ pa.types.is_list(pyarrow_dtype)
82
+ or pa.types.is_fixed_size_list(pyarrow_dtype)
83
+ or pa.types.is_large_list(pyarrow_dtype)
84
+ )
85
+
86
+ def len(self) -> Series:
87
+ """
88
+ Return the length of each list in the Series.
89
+
90
+ Returns
91
+ -------
92
+ pandas.Series
93
+ The length of each list.
94
+
95
+ Examples
96
+ --------
97
+ >>> import pyarrow as pa
98
+ >>> s = pd.Series(
99
+ ... [
100
+ ... [1, 2, 3],
101
+ ... [3],
102
+ ... ],
103
+ ... dtype=pd.ArrowDtype(pa.list_(
104
+ ... pa.int64()
105
+ ... ))
106
+ ... )
107
+ >>> s.list.len()
108
+ 0 3
109
+ 1 1
110
+ dtype: int32[pyarrow]
111
+ """
112
+ from pandas import Series
113
+
114
+ value_lengths = pc.list_value_length(self._pa_array)
115
+ return Series(value_lengths, dtype=ArrowDtype(value_lengths.type))
116
+
117
+ def __getitem__(self, key: int | slice) -> Series:
118
+ """
119
+ Index or slice lists in the Series.
120
+
121
+ Parameters
122
+ ----------
123
+ key : int | slice
124
+ Index or slice of indices to access from each list.
125
+
126
+ Returns
127
+ -------
128
+ pandas.Series
129
+ The list at requested index.
130
+
131
+ Examples
132
+ --------
133
+ >>> import pyarrow as pa
134
+ >>> s = pd.Series(
135
+ ... [
136
+ ... [1, 2, 3],
137
+ ... [3],
138
+ ... ],
139
+ ... dtype=pd.ArrowDtype(pa.list_(
140
+ ... pa.int64()
141
+ ... ))
142
+ ... )
143
+ >>> s.list[0]
144
+ 0 1
145
+ 1 3
146
+ dtype: int64[pyarrow]
147
+ """
148
+ from pandas import Series
149
+
150
+ if isinstance(key, int):
151
+ # TODO: Support negative key but pyarrow does not allow
152
+ # element index to be an array.
153
+ # if key < 0:
154
+ # key = pc.add(key, pc.list_value_length(self._pa_array))
155
+ element = pc.list_element(self._pa_array, key)
156
+ return Series(element, dtype=ArrowDtype(element.type))
157
+ elif isinstance(key, slice):
158
+ if pa_version_under11p0:
159
+ raise NotImplementedError(
160
+ f"List slice not supported by pyarrow {pa.__version__}."
161
+ )
162
+
163
+ # TODO: Support negative start/stop/step, ideally this would be added
164
+ # upstream in pyarrow.
165
+ start, stop, step = key.start, key.stop, key.step
166
+ if start is None:
167
+ # TODO: When adding negative step support
168
+ # this should be setto last element of array
169
+ # when step is negative.
170
+ start = 0
171
+ if step is None:
172
+ step = 1
173
+ sliced = pc.list_slice(self._pa_array, start, stop, step)
174
+ return Series(sliced, dtype=ArrowDtype(sliced.type))
175
+ else:
176
+ raise ValueError(f"key must be an int or slice, got {type(key).__name__}")
177
+
178
+ def __iter__(self) -> Iterator:
179
+ raise TypeError(f"'{type(self).__name__}' object is not iterable")
180
+
181
+ def flatten(self) -> Series:
182
+ """
183
+ Flatten list values.
184
+
185
+ Returns
186
+ -------
187
+ pandas.Series
188
+ The data from all lists in the series flattened.
189
+
190
+ Examples
191
+ --------
192
+ >>> import pyarrow as pa
193
+ >>> s = pd.Series(
194
+ ... [
195
+ ... [1, 2, 3],
196
+ ... [3],
197
+ ... ],
198
+ ... dtype=pd.ArrowDtype(pa.list_(
199
+ ... pa.int64()
200
+ ... ))
201
+ ... )
202
+ >>> s.list.flatten()
203
+ 0 1
204
+ 1 2
205
+ 2 3
206
+ 3 3
207
+ dtype: int64[pyarrow]
208
+ """
209
+ from pandas import Series
210
+
211
+ flattened = pc.list_flatten(self._pa_array)
212
+ return Series(flattened, dtype=ArrowDtype(flattened.type))
213
+
214
+
215
+ class StructAccessor(ArrowAccessor):
216
+ """
217
+ Accessor object for structured data properties of the Series values.
218
+
219
+ Parameters
220
+ ----------
221
+ data : Series
222
+ Series containing Arrow struct data.
223
+ """
224
+
225
+ def __init__(self, data=None) -> None:
226
+ super().__init__(
227
+ data,
228
+ validation_msg=(
229
+ "Can only use the '.struct' accessor with 'struct[pyarrow]' "
230
+ "dtype, not {dtype}."
231
+ ),
232
+ )
233
+
234
+ def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
235
+ return pa.types.is_struct(pyarrow_dtype)
236
+
237
+ @property
238
+ def dtypes(self) -> Series:
239
+ """
240
+ Return the dtype object of each child field of the struct.
241
+
242
+ Returns
243
+ -------
244
+ pandas.Series
245
+ The data type of each child field.
246
+
247
+ Examples
248
+ --------
249
+ >>> import pyarrow as pa
250
+ >>> s = pd.Series(
251
+ ... [
252
+ ... {"version": 1, "project": "pandas"},
253
+ ... {"version": 2, "project": "pandas"},
254
+ ... {"version": 1, "project": "numpy"},
255
+ ... ],
256
+ ... dtype=pd.ArrowDtype(pa.struct(
257
+ ... [("version", pa.int64()), ("project", pa.string())]
258
+ ... ))
259
+ ... )
260
+ >>> s.struct.dtypes
261
+ version int64[pyarrow]
262
+ project string[pyarrow]
263
+ dtype: object
264
+ """
265
+ from pandas import (
266
+ Index,
267
+ Series,
268
+ )
269
+
270
+ pa_type = self._data.dtype.pyarrow_dtype
271
+ types = [ArrowDtype(struct.type) for struct in pa_type]
272
+ names = [struct.name for struct in pa_type]
273
+ return Series(types, index=Index(names))
274
+
275
+ def field(
276
+ self,
277
+ name_or_index: list[str]
278
+ | list[bytes]
279
+ | list[int]
280
+ | pc.Expression
281
+ | bytes
282
+ | str
283
+ | int,
284
+ ) -> Series:
285
+ """
286
+ Extract a child field of a struct as a Series.
287
+
288
+ Parameters
289
+ ----------
290
+ name_or_index : str | bytes | int | expression | list
291
+ Name or index of the child field to extract.
292
+
293
+ For list-like inputs, this will index into a nested
294
+ struct.
295
+
296
+ Returns
297
+ -------
298
+ pandas.Series
299
+ The data corresponding to the selected child field.
300
+
301
+ See Also
302
+ --------
303
+ Series.struct.explode : Return all child fields as a DataFrame.
304
+
305
+ Notes
306
+ -----
307
+ The name of the resulting Series will be set using the following
308
+ rules:
309
+
310
+ - For string, bytes, or integer `name_or_index` (or a list of these, for
311
+ a nested selection), the Series name is set to the selected
312
+ field's name.
313
+ - For a :class:`pyarrow.compute.Expression`, this is set to
314
+ the string form of the expression.
315
+ - For list-like `name_or_index`, the name will be set to the
316
+ name of the final field selected.
317
+
318
+ Examples
319
+ --------
320
+ >>> import pyarrow as pa
321
+ >>> s = pd.Series(
322
+ ... [
323
+ ... {"version": 1, "project": "pandas"},
324
+ ... {"version": 2, "project": "pandas"},
325
+ ... {"version": 1, "project": "numpy"},
326
+ ... ],
327
+ ... dtype=pd.ArrowDtype(pa.struct(
328
+ ... [("version", pa.int64()), ("project", pa.string())]
329
+ ... ))
330
+ ... )
331
+
332
+ Extract by field name.
333
+
334
+ >>> s.struct.field("project")
335
+ 0 pandas
336
+ 1 pandas
337
+ 2 numpy
338
+ Name: project, dtype: string[pyarrow]
339
+
340
+ Extract by field index.
341
+
342
+ >>> s.struct.field(0)
343
+ 0 1
344
+ 1 2
345
+ 2 1
346
+ Name: version, dtype: int64[pyarrow]
347
+
348
+ Or an expression
349
+
350
+ >>> import pyarrow.compute as pc
351
+ >>> s.struct.field(pc.field("project"))
352
+ 0 pandas
353
+ 1 pandas
354
+ 2 numpy
355
+ Name: project, dtype: string[pyarrow]
356
+
357
+ For nested struct types, you can pass a list of values to index
358
+ multiple levels:
359
+
360
+ >>> version_type = pa.struct([
361
+ ... ("major", pa.int64()),
362
+ ... ("minor", pa.int64()),
363
+ ... ])
364
+ >>> s = pd.Series(
365
+ ... [
366
+ ... {"version": {"major": 1, "minor": 5}, "project": "pandas"},
367
+ ... {"version": {"major": 2, "minor": 1}, "project": "pandas"},
368
+ ... {"version": {"major": 1, "minor": 26}, "project": "numpy"},
369
+ ... ],
370
+ ... dtype=pd.ArrowDtype(pa.struct(
371
+ ... [("version", version_type), ("project", pa.string())]
372
+ ... ))
373
+ ... )
374
+ >>> s.struct.field(["version", "minor"])
375
+ 0 5
376
+ 1 1
377
+ 2 26
378
+ Name: minor, dtype: int64[pyarrow]
379
+ >>> s.struct.field([0, 0])
380
+ 0 1
381
+ 1 2
382
+ 2 1
383
+ Name: major, dtype: int64[pyarrow]
384
+ """
385
+ from pandas import Series
386
+
387
+ def get_name(
388
+ level_name_or_index: list[str]
389
+ | list[bytes]
390
+ | list[int]
391
+ | pc.Expression
392
+ | bytes
393
+ | str
394
+ | int,
395
+ data: pa.ChunkedArray,
396
+ ):
397
+ if isinstance(level_name_or_index, int):
398
+ name = data.type.field(level_name_or_index).name
399
+ elif isinstance(level_name_or_index, (str, bytes)):
400
+ name = level_name_or_index
401
+ elif isinstance(level_name_or_index, pc.Expression):
402
+ name = str(level_name_or_index)
403
+ elif is_list_like(level_name_or_index):
404
+ # For nested input like [2, 1, 2]
405
+ # iteratively get the struct and field name. The last
406
+ # one is used for the name of the index.
407
+ level_name_or_index = list(reversed(level_name_or_index))
408
+ selected = data
409
+ while level_name_or_index:
410
+ # we need the cast, otherwise mypy complains about
411
+ # getting ints, bytes, or str here, which isn't possible.
412
+ level_name_or_index = cast(list, level_name_or_index)
413
+ name_or_index = level_name_or_index.pop()
414
+ name = get_name(name_or_index, selected)
415
+ selected = selected.type.field(selected.type.get_field_index(name))
416
+ name = selected.name
417
+ else:
418
+ raise ValueError(
419
+ "name_or_index must be an int, str, bytes, "
420
+ "pyarrow.compute.Expression, or list of those"
421
+ )
422
+ return name
423
+
424
+ pa_arr = self._data.array._pa_array
425
+ name = get_name(name_or_index, pa_arr)
426
+ field_arr = pc.struct_field(pa_arr, name_or_index)
427
+
428
+ return Series(
429
+ field_arr,
430
+ dtype=ArrowDtype(field_arr.type),
431
+ index=self._data.index,
432
+ name=name,
433
+ )
434
+
435
+ def explode(self) -> DataFrame:
436
+ """
437
+ Extract all child fields of a struct as a DataFrame.
438
+
439
+ Returns
440
+ -------
441
+ pandas.DataFrame
442
+ The data corresponding to all child fields.
443
+
444
+ See Also
445
+ --------
446
+ Series.struct.field : Return a single child field as a Series.
447
+
448
+ Examples
449
+ --------
450
+ >>> import pyarrow as pa
451
+ >>> s = pd.Series(
452
+ ... [
453
+ ... {"version": 1, "project": "pandas"},
454
+ ... {"version": 2, "project": "pandas"},
455
+ ... {"version": 1, "project": "numpy"},
456
+ ... ],
457
+ ... dtype=pd.ArrowDtype(pa.struct(
458
+ ... [("version", pa.int64()), ("project", pa.string())]
459
+ ... ))
460
+ ... )
461
+
462
+ >>> s.struct.explode()
463
+ version project
464
+ 0 1 pandas
465
+ 1 2 pandas
466
+ 2 1 numpy
467
+ """
468
+ from pandas import concat
469
+
470
+ pa_type = self._pa_array.type
471
+ return concat(
472
+ [self.field(i) for i in range(pa_type.num_fields)], axis="columns"
473
+ )
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py ADDED
The diff for this file is too large to render. See raw diff
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from typing import TYPE_CHECKING
5
+
6
+ import pyarrow
7
+
8
+ from pandas.compat import pa_version_under14p1
9
+
10
+ from pandas.core.dtypes.dtypes import (
11
+ IntervalDtype,
12
+ PeriodDtype,
13
+ )
14
+
15
+ from pandas.core.arrays.interval import VALID_CLOSED
16
+
17
+ if TYPE_CHECKING:
18
+ from pandas._typing import IntervalClosedType
19
+
20
+
21
+ class ArrowPeriodType(pyarrow.ExtensionType):
22
+ def __init__(self, freq) -> None:
23
+ # attributes need to be set first before calling
24
+ # super init (as that calls serialize)
25
+ self._freq = freq
26
+ pyarrow.ExtensionType.__init__(self, pyarrow.int64(), "pandas.period")
27
+
28
+ @property
29
+ def freq(self):
30
+ return self._freq
31
+
32
+ def __arrow_ext_serialize__(self) -> bytes:
33
+ metadata = {"freq": self.freq}
34
+ return json.dumps(metadata).encode()
35
+
36
+ @classmethod
37
+ def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowPeriodType:
38
+ metadata = json.loads(serialized.decode())
39
+ return ArrowPeriodType(metadata["freq"])
40
+
41
+ def __eq__(self, other):
42
+ if isinstance(other, pyarrow.BaseExtensionType):
43
+ return type(self) == type(other) and self.freq == other.freq
44
+ else:
45
+ return NotImplemented
46
+
47
+ def __ne__(self, other) -> bool:
48
+ return not self == other
49
+
50
+ def __hash__(self) -> int:
51
+ return hash((str(self), self.freq))
52
+
53
+ def to_pandas_dtype(self) -> PeriodDtype:
54
+ return PeriodDtype(freq=self.freq)
55
+
56
+
57
+ # register the type with a dummy instance
58
+ _period_type = ArrowPeriodType("D")
59
+ pyarrow.register_extension_type(_period_type)
60
+
61
+
62
+ class ArrowIntervalType(pyarrow.ExtensionType):
63
+ def __init__(self, subtype, closed: IntervalClosedType) -> None:
64
+ # attributes need to be set first before calling
65
+ # super init (as that calls serialize)
66
+ assert closed in VALID_CLOSED
67
+ self._closed: IntervalClosedType = closed
68
+ if not isinstance(subtype, pyarrow.DataType):
69
+ subtype = pyarrow.type_for_alias(str(subtype))
70
+ self._subtype = subtype
71
+
72
+ storage_type = pyarrow.struct([("left", subtype), ("right", subtype)])
73
+ pyarrow.ExtensionType.__init__(self, storage_type, "pandas.interval")
74
+
75
+ @property
76
+ def subtype(self):
77
+ return self._subtype
78
+
79
+ @property
80
+ def closed(self) -> IntervalClosedType:
81
+ return self._closed
82
+
83
+ def __arrow_ext_serialize__(self) -> bytes:
84
+ metadata = {"subtype": str(self.subtype), "closed": self.closed}
85
+ return json.dumps(metadata).encode()
86
+
87
+ @classmethod
88
+ def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType:
89
+ metadata = json.loads(serialized.decode())
90
+ subtype = pyarrow.type_for_alias(metadata["subtype"])
91
+ closed = metadata["closed"]
92
+ return ArrowIntervalType(subtype, closed)
93
+
94
+ def __eq__(self, other):
95
+ if isinstance(other, pyarrow.BaseExtensionType):
96
+ return (
97
+ type(self) == type(other)
98
+ and self.subtype == other.subtype
99
+ and self.closed == other.closed
100
+ )
101
+ else:
102
+ return NotImplemented
103
+
104
+ def __ne__(self, other) -> bool:
105
+ return not self == other
106
+
107
+ def __hash__(self) -> int:
108
+ return hash((str(self), str(self.subtype), self.closed))
109
+
110
+ def to_pandas_dtype(self) -> IntervalDtype:
111
+ return IntervalDtype(self.subtype.to_pandas_dtype(), self.closed)
112
+
113
+
114
+ # register the type with a dummy instance
115
+ _interval_type = ArrowIntervalType(pyarrow.int64(), "left")
116
+ pyarrow.register_extension_type(_interval_type)
117
+
118
+
119
+ _ERROR_MSG = """\
120
+ Disallowed deserialization of 'arrow.py_extension_type':
121
+ storage_type = {storage_type}
122
+ serialized = {serialized}
123
+ pickle disassembly:\n{pickle_disassembly}
124
+
125
+ Reading of untrusted Parquet or Feather files with a PyExtensionType column
126
+ allows arbitrary code execution.
127
+ If you trust this file, you can enable reading the extension type by one of:
128
+
129
+ - upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)`
130
+ - install pyarrow-hotfix (`pip install pyarrow-hotfix`) and disable it by running
131
+ `import pyarrow_hotfix; pyarrow_hotfix.uninstall()`
132
+
133
+ We strongly recommend updating your Parquet/Feather files to use extension types
134
+ derived from `pyarrow.ExtensionType` instead, and register this type explicitly.
135
+ """
136
+
137
+
138
+ def patch_pyarrow():
139
+ # starting from pyarrow 14.0.1, it has its own mechanism
140
+ if not pa_version_under14p1:
141
+ return
142
+
143
+ # if https://github.com/pitrou/pyarrow-hotfix was installed and enabled
144
+ if getattr(pyarrow, "_hotfix_installed", False):
145
+ return
146
+
147
+ class ForbiddenExtensionType(pyarrow.ExtensionType):
148
+ def __arrow_ext_serialize__(self):
149
+ return b""
150
+
151
+ @classmethod
152
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
153
+ import io
154
+ import pickletools
155
+
156
+ out = io.StringIO()
157
+ pickletools.dis(serialized, out)
158
+ raise RuntimeError(
159
+ _ERROR_MSG.format(
160
+ storage_type=storage_type,
161
+ serialized=serialized,
162
+ pickle_disassembly=out.getvalue(),
163
+ )
164
+ )
165
+
166
+ pyarrow.unregister_extension_type("arrow.py_extension_type")
167
+ pyarrow.register_extension_type(
168
+ ForbiddenExtensionType(pyarrow.null(), "arrow.py_extension_type")
169
+ )
170
+
171
+ pyarrow._hotfix_installed = True
172
+
173
+
174
+ patch_pyarrow()
videollama2/lib/python3.10/site-packages/pandas/core/arrays/base.py ADDED
@@ -0,0 +1,2588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ An interface for extending pandas with custom arrays.
3
+
4
+ .. warning::
5
+
6
+ This is an experimental API and subject to breaking changes
7
+ without warning.
8
+ """
9
+ from __future__ import annotations
10
+
11
+ import operator
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Any,
15
+ Callable,
16
+ ClassVar,
17
+ Literal,
18
+ cast,
19
+ overload,
20
+ )
21
+ import warnings
22
+
23
+ import numpy as np
24
+
25
+ from pandas._libs import (
26
+ algos as libalgos,
27
+ lib,
28
+ )
29
+ from pandas.compat import set_function_name
30
+ from pandas.compat.numpy import function as nv
31
+ from pandas.errors import AbstractMethodError
32
+ from pandas.util._decorators import (
33
+ Appender,
34
+ Substitution,
35
+ cache_readonly,
36
+ )
37
+ from pandas.util._exceptions import find_stack_level
38
+ from pandas.util._validators import (
39
+ validate_bool_kwarg,
40
+ validate_fillna_kwargs,
41
+ validate_insert_loc,
42
+ )
43
+
44
+ from pandas.core.dtypes.cast import maybe_cast_pointwise_result
45
+ from pandas.core.dtypes.common import (
46
+ is_list_like,
47
+ is_scalar,
48
+ pandas_dtype,
49
+ )
50
+ from pandas.core.dtypes.dtypes import ExtensionDtype
51
+ from pandas.core.dtypes.generic import (
52
+ ABCDataFrame,
53
+ ABCIndex,
54
+ ABCSeries,
55
+ )
56
+ from pandas.core.dtypes.missing import isna
57
+
58
+ from pandas.core import (
59
+ arraylike,
60
+ missing,
61
+ roperator,
62
+ )
63
+ from pandas.core.algorithms import (
64
+ duplicated,
65
+ factorize_array,
66
+ isin,
67
+ map_array,
68
+ mode,
69
+ rank,
70
+ unique,
71
+ )
72
+ from pandas.core.array_algos.quantile import quantile_with_mask
73
+ from pandas.core.missing import _fill_limit_area_1d
74
+ from pandas.core.sorting import (
75
+ nargminmax,
76
+ nargsort,
77
+ )
78
+
79
+ if TYPE_CHECKING:
80
+ from collections.abc import (
81
+ Iterator,
82
+ Sequence,
83
+ )
84
+
85
+ from pandas._typing import (
86
+ ArrayLike,
87
+ AstypeArg,
88
+ AxisInt,
89
+ Dtype,
90
+ DtypeObj,
91
+ FillnaOptions,
92
+ InterpolateOptions,
93
+ NumpySorter,
94
+ NumpyValueArrayLike,
95
+ PositionalIndexer,
96
+ ScalarIndexer,
97
+ Self,
98
+ SequenceIndexer,
99
+ Shape,
100
+ SortKind,
101
+ TakeIndexer,
102
+ npt,
103
+ )
104
+
105
+ from pandas import Index
106
+
107
+ _extension_array_shared_docs: dict[str, str] = {}
108
+
109
+
110
+ class ExtensionArray:
111
+ """
112
+ Abstract base class for custom 1-D array types.
113
+
114
+ pandas will recognize instances of this class as proper arrays
115
+ with a custom type and will not attempt to coerce them to objects. They
116
+ may be stored directly inside a :class:`DataFrame` or :class:`Series`.
117
+
118
+ Attributes
119
+ ----------
120
+ dtype
121
+ nbytes
122
+ ndim
123
+ shape
124
+
125
+ Methods
126
+ -------
127
+ argsort
128
+ astype
129
+ copy
130
+ dropna
131
+ duplicated
132
+ factorize
133
+ fillna
134
+ equals
135
+ insert
136
+ interpolate
137
+ isin
138
+ isna
139
+ ravel
140
+ repeat
141
+ searchsorted
142
+ shift
143
+ take
144
+ tolist
145
+ unique
146
+ view
147
+ _accumulate
148
+ _concat_same_type
149
+ _explode
150
+ _formatter
151
+ _from_factorized
152
+ _from_sequence
153
+ _from_sequence_of_strings
154
+ _hash_pandas_object
155
+ _pad_or_backfill
156
+ _reduce
157
+ _values_for_argsort
158
+ _values_for_factorize
159
+
160
+ Notes
161
+ -----
162
+ The interface includes the following abstract methods that must be
163
+ implemented by subclasses:
164
+
165
+ * _from_sequence
166
+ * _from_factorized
167
+ * __getitem__
168
+ * __len__
169
+ * __eq__
170
+ * dtype
171
+ * nbytes
172
+ * isna
173
+ * take
174
+ * copy
175
+ * _concat_same_type
176
+ * interpolate
177
+
178
+ A default repr displaying the type, (truncated) data, length,
179
+ and dtype is provided. It can be customized or replaced by
180
+ by overriding:
181
+
182
+ * __repr__ : A default repr for the ExtensionArray.
183
+ * _formatter : Print scalars inside a Series or DataFrame.
184
+
185
+ Some methods require casting the ExtensionArray to an ndarray of Python
186
+ objects with ``self.astype(object)``, which may be expensive. When
187
+ performance is a concern, we highly recommend overriding the following
188
+ methods:
189
+
190
+ * fillna
191
+ * _pad_or_backfill
192
+ * dropna
193
+ * unique
194
+ * factorize / _values_for_factorize
195
+ * argsort, argmax, argmin / _values_for_argsort
196
+ * searchsorted
197
+ * map
198
+
199
+ The remaining methods implemented on this class should be performant,
200
+ as they only compose abstract methods. Still, a more efficient
201
+ implementation may be available, and these methods can be overridden.
202
+
203
+ One can implement methods to handle array accumulations or reductions.
204
+
205
+ * _accumulate
206
+ * _reduce
207
+
208
+ One can implement methods to handle parsing from strings that will be used
209
+ in methods such as ``pandas.io.parsers.read_csv``.
210
+
211
+ * _from_sequence_of_strings
212
+
213
+ This class does not inherit from 'abc.ABCMeta' for performance reasons.
214
+ Methods and properties required by the interface raise
215
+ ``pandas.errors.AbstractMethodError`` and no ``register`` method is
216
+ provided for registering virtual subclasses.
217
+
218
+ ExtensionArrays are limited to 1 dimension.
219
+
220
+ They may be backed by none, one, or many NumPy arrays. For example,
221
+ ``pandas.Categorical`` is an extension array backed by two arrays,
222
+ one for codes and one for categories. An array of IPv6 address may
223
+ be backed by a NumPy structured array with two fields, one for the
224
+ lower 64 bits and one for the upper 64 bits. Or they may be backed
225
+ by some other storage type, like Python lists. Pandas makes no
226
+ assumptions on how the data are stored, just that it can be converted
227
+ to a NumPy array.
228
+ The ExtensionArray interface does not impose any rules on how this data
229
+ is stored. However, currently, the backing data cannot be stored in
230
+ attributes called ``.values`` or ``._values`` to ensure full compatibility
231
+ with pandas internals. But other names as ``.data``, ``._data``,
232
+ ``._items``, ... can be freely used.
233
+
234
+ If implementing NumPy's ``__array_ufunc__`` interface, pandas expects
235
+ that
236
+
237
+ 1. You defer by returning ``NotImplemented`` when any Series are present
238
+ in `inputs`. Pandas will extract the arrays and call the ufunc again.
239
+ 2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class.
240
+ Pandas inspect this to determine whether the ufunc is valid for the
241
+ types present.
242
+
243
+ See :ref:`extending.extension.ufunc` for more.
244
+
245
+ By default, ExtensionArrays are not hashable. Immutable subclasses may
246
+ override this behavior.
247
+
248
+ Examples
249
+ --------
250
+ Please see the following:
251
+
252
+ https://github.com/pandas-dev/pandas/blob/main/pandas/tests/extension/list/array.py
253
+ """
254
+
255
+ # '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
256
+ # Don't override this.
257
+ _typ = "extension"
258
+
259
+ # similar to __array_priority__, positions ExtensionArray after Index,
260
+ # Series, and DataFrame. EA subclasses may override to choose which EA
261
+ # subclass takes priority. If overriding, the value should always be
262
+ # strictly less than 2000 to be below Index.__pandas_priority__.
263
+ __pandas_priority__ = 1000
264
+
265
+ # ------------------------------------------------------------------------
266
+ # Constructors
267
+ # ------------------------------------------------------------------------
268
+
269
+ @classmethod
270
+ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
271
+ """
272
+ Construct a new ExtensionArray from a sequence of scalars.
273
+
274
+ Parameters
275
+ ----------
276
+ scalars : Sequence
277
+ Each element will be an instance of the scalar type for this
278
+ array, ``cls.dtype.type`` or be converted into this type in this method.
279
+ dtype : dtype, optional
280
+ Construct for this particular dtype. This should be a Dtype
281
+ compatible with the ExtensionArray.
282
+ copy : bool, default False
283
+ If True, copy the underlying data.
284
+
285
+ Returns
286
+ -------
287
+ ExtensionArray
288
+
289
+ Examples
290
+ --------
291
+ >>> pd.arrays.IntegerArray._from_sequence([4, 5])
292
+ <IntegerArray>
293
+ [4, 5]
294
+ Length: 2, dtype: Int64
295
+ """
296
+ raise AbstractMethodError(cls)
297
+
298
+ @classmethod
299
+ def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self:
300
+ """
301
+ Strict analogue to _from_sequence, allowing only sequences of scalars
302
+ that should be specifically inferred to the given dtype.
303
+
304
+ Parameters
305
+ ----------
306
+ scalars : sequence
307
+ dtype : ExtensionDtype
308
+
309
+ Raises
310
+ ------
311
+ TypeError or ValueError
312
+
313
+ Notes
314
+ -----
315
+ This is called in a try/except block when casting the result of a
316
+ pointwise operation.
317
+ """
318
+ try:
319
+ return cls._from_sequence(scalars, dtype=dtype, copy=False)
320
+ except (ValueError, TypeError):
321
+ raise
322
+ except Exception:
323
+ warnings.warn(
324
+ "_from_scalars should only raise ValueError or TypeError. "
325
+ "Consider overriding _from_scalars where appropriate.",
326
+ stacklevel=find_stack_level(),
327
+ )
328
+ raise
329
+
330
+ @classmethod
331
+ def _from_sequence_of_strings(
332
+ cls, strings, *, dtype: Dtype | None = None, copy: bool = False
333
+ ):
334
+ """
335
+ Construct a new ExtensionArray from a sequence of strings.
336
+
337
+ Parameters
338
+ ----------
339
+ strings : Sequence
340
+ Each element will be an instance of the scalar type for this
341
+ array, ``cls.dtype.type``.
342
+ dtype : dtype, optional
343
+ Construct for this particular dtype. This should be a Dtype
344
+ compatible with the ExtensionArray.
345
+ copy : bool, default False
346
+ If True, copy the underlying data.
347
+
348
+ Returns
349
+ -------
350
+ ExtensionArray
351
+
352
+ Examples
353
+ --------
354
+ >>> pd.arrays.IntegerArray._from_sequence_of_strings(["1", "2", "3"])
355
+ <IntegerArray>
356
+ [1, 2, 3]
357
+ Length: 3, dtype: Int64
358
+ """
359
+ raise AbstractMethodError(cls)
360
+
361
+ @classmethod
362
+ def _from_factorized(cls, values, original):
363
+ """
364
+ Reconstruct an ExtensionArray after factorization.
365
+
366
+ Parameters
367
+ ----------
368
+ values : ndarray
369
+ An integer ndarray with the factorized values.
370
+ original : ExtensionArray
371
+ The original ExtensionArray that factorize was called on.
372
+
373
+ See Also
374
+ --------
375
+ factorize : Top-level factorize method that dispatches here.
376
+ ExtensionArray.factorize : Encode the extension array as an enumerated type.
377
+
378
+ Examples
379
+ --------
380
+ >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1),
381
+ ... pd.Interval(1, 5), pd.Interval(1, 5)])
382
+ >>> codes, uniques = pd.factorize(interv_arr)
383
+ >>> pd.arrays.IntervalArray._from_factorized(uniques, interv_arr)
384
+ <IntervalArray>
385
+ [(0, 1], (1, 5]]
386
+ Length: 2, dtype: interval[int64, right]
387
+ """
388
+ raise AbstractMethodError(cls)
389
+
390
+ # ------------------------------------------------------------------------
391
+ # Must be a Sequence
392
+ # ------------------------------------------------------------------------
393
+ @overload
394
+ def __getitem__(self, item: ScalarIndexer) -> Any:
395
+ ...
396
+
397
+ @overload
398
+ def __getitem__(self, item: SequenceIndexer) -> Self:
399
+ ...
400
+
401
+ def __getitem__(self, item: PositionalIndexer) -> Self | Any:
402
+ """
403
+ Select a subset of self.
404
+
405
+ Parameters
406
+ ----------
407
+ item : int, slice, or ndarray
408
+ * int: The position in 'self' to get.
409
+
410
+ * slice: A slice object, where 'start', 'stop', and 'step' are
411
+ integers or None
412
+
413
+ * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
414
+
415
+ * list[int]: A list of int
416
+
417
+ Returns
418
+ -------
419
+ item : scalar or ExtensionArray
420
+
421
+ Notes
422
+ -----
423
+ For scalar ``item``, return a scalar value suitable for the array's
424
+ type. This should be an instance of ``self.dtype.type``.
425
+
426
+ For slice ``key``, return an instance of ``ExtensionArray``, even
427
+ if the slice is length 0 or 1.
428
+
429
+ For a boolean mask, return an instance of ``ExtensionArray``, filtered
430
+ to the values where ``item`` is True.
431
+ """
432
+ raise AbstractMethodError(self)
433
+
434
+ def __setitem__(self, key, value) -> None:
435
+ """
436
+ Set one or more values inplace.
437
+
438
+ This method is not required to satisfy the pandas extension array
439
+ interface.
440
+
441
+ Parameters
442
+ ----------
443
+ key : int, ndarray, or slice
444
+ When called from, e.g. ``Series.__setitem__``, ``key`` will be
445
+ one of
446
+
447
+ * scalar int
448
+ * ndarray of integers.
449
+ * boolean ndarray
450
+ * slice object
451
+
452
+ value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
453
+ value or values to be set of ``key``.
454
+
455
+ Returns
456
+ -------
457
+ None
458
+ """
459
+ # Some notes to the ExtensionArray implementer who may have ended up
460
+ # here. While this method is not required for the interface, if you
461
+ # *do* choose to implement __setitem__, then some semantics should be
462
+ # observed:
463
+ #
464
+ # * Setting multiple values : ExtensionArrays should support setting
465
+ # multiple values at once, 'key' will be a sequence of integers and
466
+ # 'value' will be a same-length sequence.
467
+ #
468
+ # * Broadcasting : For a sequence 'key' and a scalar 'value',
469
+ # each position in 'key' should be set to 'value'.
470
+ #
471
+ # * Coercion : Most users will expect basic coercion to work. For
472
+ # example, a string like '2018-01-01' is coerced to a datetime
473
+ # when setting on a datetime64ns array. In general, if the
474
+ # __init__ method coerces that value, then so should __setitem__
475
+ # Note, also, that Series/DataFrame.where internally use __setitem__
476
+ # on a copy of the data.
477
+ raise NotImplementedError(f"{type(self)} does not implement __setitem__.")
478
+
479
+ def __len__(self) -> int:
480
+ """
481
+ Length of this array
482
+
483
+ Returns
484
+ -------
485
+ length : int
486
+ """
487
+ raise AbstractMethodError(self)
488
+
489
+ def __iter__(self) -> Iterator[Any]:
490
+ """
491
+ Iterate over elements of the array.
492
+ """
493
+ # This needs to be implemented so that pandas recognizes extension
494
+ # arrays as list-like. The default implementation makes successive
495
+ # calls to ``__getitem__``, which may be slower than necessary.
496
+ for i in range(len(self)):
497
+ yield self[i]
498
+
499
+ def __contains__(self, item: object) -> bool | np.bool_:
500
+ """
501
+ Return for `item in self`.
502
+ """
503
+ # GH37867
504
+ # comparisons of any item to pd.NA always return pd.NA, so e.g. "a" in [pd.NA]
505
+ # would raise a TypeError. The implementation below works around that.
506
+ if is_scalar(item) and isna(item):
507
+ if not self._can_hold_na:
508
+ return False
509
+ elif item is self.dtype.na_value or isinstance(item, self.dtype.type):
510
+ return self._hasna
511
+ else:
512
+ return False
513
+ else:
514
+ # error: Item "ExtensionArray" of "Union[ExtensionArray, ndarray]" has no
515
+ # attribute "any"
516
+ return (item == self).any() # type: ignore[union-attr]
517
+
518
+ # error: Signature of "__eq__" incompatible with supertype "object"
519
+ def __eq__(self, other: object) -> ArrayLike: # type: ignore[override]
520
+ """
521
+ Return for `self == other` (element-wise equality).
522
+ """
523
+ # Implementer note: this should return a boolean numpy ndarray or
524
+ # a boolean ExtensionArray.
525
+ # When `other` is one of Series, Index, or DataFrame, this method should
526
+ # return NotImplemented (to ensure that those objects are responsible for
527
+ # first unpacking the arrays, and then dispatch the operation to the
528
+ # underlying arrays)
529
+ raise AbstractMethodError(self)
530
+
531
+ # error: Signature of "__ne__" incompatible with supertype "object"
532
+ def __ne__(self, other: object) -> ArrayLike: # type: ignore[override]
533
+ """
534
+ Return for `self != other` (element-wise in-equality).
535
+ """
536
+ # error: Unsupported operand type for ~ ("ExtensionArray")
537
+ return ~(self == other) # type: ignore[operator]
538
+
539
+ def to_numpy(
540
+ self,
541
+ dtype: npt.DTypeLike | None = None,
542
+ copy: bool = False,
543
+ na_value: object = lib.no_default,
544
+ ) -> np.ndarray:
545
+ """
546
+ Convert to a NumPy ndarray.
547
+
548
+ This is similar to :meth:`numpy.asarray`, but may provide additional control
549
+ over how the conversion is done.
550
+
551
+ Parameters
552
+ ----------
553
+ dtype : str or numpy.dtype, optional
554
+ The dtype to pass to :meth:`numpy.asarray`.
555
+ copy : bool, default False
556
+ Whether to ensure that the returned value is a not a view on
557
+ another array. Note that ``copy=False`` does not *ensure* that
558
+ ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
559
+ a copy is made, even if not strictly necessary.
560
+ na_value : Any, optional
561
+ The value to use for missing values. The default value depends
562
+ on `dtype` and the type of the array.
563
+
564
+ Returns
565
+ -------
566
+ numpy.ndarray
567
+ """
568
+ result = np.asarray(self, dtype=dtype)
569
+ if copy or na_value is not lib.no_default:
570
+ result = result.copy()
571
+ if na_value is not lib.no_default:
572
+ result[self.isna()] = na_value
573
+ return result
574
+
575
+ # ------------------------------------------------------------------------
576
+ # Required attributes
577
+ # ------------------------------------------------------------------------
578
+
579
+ @property
580
+ def dtype(self) -> ExtensionDtype:
581
+ """
582
+ An instance of ExtensionDtype.
583
+
584
+ Examples
585
+ --------
586
+ >>> pd.array([1, 2, 3]).dtype
587
+ Int64Dtype()
588
+ """
589
+ raise AbstractMethodError(self)
590
+
591
+ @property
592
+ def shape(self) -> Shape:
593
+ """
594
+ Return a tuple of the array dimensions.
595
+
596
+ Examples
597
+ --------
598
+ >>> arr = pd.array([1, 2, 3])
599
+ >>> arr.shape
600
+ (3,)
601
+ """
602
+ return (len(self),)
603
+
604
+ @property
605
+ def size(self) -> int:
606
+ """
607
+ The number of elements in the array.
608
+ """
609
+ # error: Incompatible return value type (got "signedinteger[_64Bit]",
610
+ # expected "int") [return-value]
611
+ return np.prod(self.shape) # type: ignore[return-value]
612
+
613
+ @property
614
+ def ndim(self) -> int:
615
+ """
616
+ Extension Arrays are only allowed to be 1-dimensional.
617
+
618
+ Examples
619
+ --------
620
+ >>> arr = pd.array([1, 2, 3])
621
+ >>> arr.ndim
622
+ 1
623
+ """
624
+ return 1
625
+
626
+ @property
627
+ def nbytes(self) -> int:
628
+ """
629
+ The number of bytes needed to store this object in memory.
630
+
631
+ Examples
632
+ --------
633
+ >>> pd.array([1, 2, 3]).nbytes
634
+ 27
635
+ """
636
+ # If this is expensive to compute, return an approximate lower bound
637
+ # on the number of bytes needed.
638
+ raise AbstractMethodError(self)
639
+
640
+ # ------------------------------------------------------------------------
641
+ # Additional Methods
642
+ # ------------------------------------------------------------------------
643
+
644
+ @overload
645
+ def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
646
+ ...
647
+
648
+ @overload
649
+ def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
650
+ ...
651
+
652
+ @overload
653
+ def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
654
+ ...
655
+
656
+ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
657
+ """
658
+ Cast to a NumPy array or ExtensionArray with 'dtype'.
659
+
660
+ Parameters
661
+ ----------
662
+ dtype : str or dtype
663
+ Typecode or data-type to which the array is cast.
664
+ copy : bool, default True
665
+ Whether to copy the data, even if not necessary. If False,
666
+ a copy is made only if the old dtype does not match the
667
+ new dtype.
668
+
669
+ Returns
670
+ -------
671
+ np.ndarray or pandas.api.extensions.ExtensionArray
672
+ An ``ExtensionArray`` if ``dtype`` is ``ExtensionDtype``,
673
+ otherwise a Numpy ndarray with ``dtype`` for its dtype.
674
+
675
+ Examples
676
+ --------
677
+ >>> arr = pd.array([1, 2, 3])
678
+ >>> arr
679
+ <IntegerArray>
680
+ [1, 2, 3]
681
+ Length: 3, dtype: Int64
682
+
683
+ Casting to another ``ExtensionDtype`` returns an ``ExtensionArray``:
684
+
685
+ >>> arr1 = arr.astype('Float64')
686
+ >>> arr1
687
+ <FloatingArray>
688
+ [1.0, 2.0, 3.0]
689
+ Length: 3, dtype: Float64
690
+ >>> arr1.dtype
691
+ Float64Dtype()
692
+
693
+ Otherwise, we will get a Numpy ndarray:
694
+
695
+ >>> arr2 = arr.astype('float64')
696
+ >>> arr2
697
+ array([1., 2., 3.])
698
+ >>> arr2.dtype
699
+ dtype('float64')
700
+ """
701
+ dtype = pandas_dtype(dtype)
702
+ if dtype == self.dtype:
703
+ if not copy:
704
+ return self
705
+ else:
706
+ return self.copy()
707
+
708
+ if isinstance(dtype, ExtensionDtype):
709
+ cls = dtype.construct_array_type()
710
+ return cls._from_sequence(self, dtype=dtype, copy=copy)
711
+
712
+ elif lib.is_np_dtype(dtype, "M"):
713
+ from pandas.core.arrays import DatetimeArray
714
+
715
+ return DatetimeArray._from_sequence(self, dtype=dtype, copy=copy)
716
+
717
+ elif lib.is_np_dtype(dtype, "m"):
718
+ from pandas.core.arrays import TimedeltaArray
719
+
720
+ return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy)
721
+
722
+ if not copy:
723
+ return np.asarray(self, dtype=dtype)
724
+ else:
725
+ return np.array(self, dtype=dtype, copy=copy)
726
+
727
+ def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll:
728
+ """
729
+ A 1-D array indicating if each value is missing.
730
+
731
+ Returns
732
+ -------
733
+ numpy.ndarray or pandas.api.extensions.ExtensionArray
734
+ In most cases, this should return a NumPy ndarray. For
735
+ exceptional cases like ``SparseArray``, where returning
736
+ an ndarray would be expensive, an ExtensionArray may be
737
+ returned.
738
+
739
+ Notes
740
+ -----
741
+ If returning an ExtensionArray, then
742
+
743
+ * ``na_values._is_boolean`` should be True
744
+ * `na_values` should implement :func:`ExtensionArray._reduce`
745
+ * ``na_values.any`` and ``na_values.all`` should be implemented
746
+
747
+ Examples
748
+ --------
749
+ >>> arr = pd.array([1, 2, np.nan, np.nan])
750
+ >>> arr.isna()
751
+ array([False, False, True, True])
752
+ """
753
+ raise AbstractMethodError(self)
754
+
755
+ @property
756
+ def _hasna(self) -> bool:
757
+ # GH#22680
758
+ """
759
+ Equivalent to `self.isna().any()`.
760
+
761
+ Some ExtensionArray subclasses may be able to optimize this check.
762
+ """
763
+ return bool(self.isna().any())
764
+
765
+ def _values_for_argsort(self) -> np.ndarray:
766
+ """
767
+ Return values for sorting.
768
+
769
+ Returns
770
+ -------
771
+ ndarray
772
+ The transformed values should maintain the ordering between values
773
+ within the array.
774
+
775
+ See Also
776
+ --------
777
+ ExtensionArray.argsort : Return the indices that would sort this array.
778
+
779
+ Notes
780
+ -----
781
+ The caller is responsible for *not* modifying these values in-place, so
782
+ it is safe for implementers to give views on ``self``.
783
+
784
+ Functions that use this (e.g. ``ExtensionArray.argsort``) should ignore
785
+ entries with missing values in the original array (according to
786
+ ``self.isna()``). This means that the corresponding entries in the returned
787
+ array don't need to be modified to sort correctly.
788
+
789
+ Examples
790
+ --------
791
+ In most cases, this is the underlying Numpy array of the ``ExtensionArray``:
792
+
793
+ >>> arr = pd.array([1, 2, 3])
794
+ >>> arr._values_for_argsort()
795
+ array([1, 2, 3])
796
+ """
797
+ # Note: this is used in `ExtensionArray.argsort/argmin/argmax`.
798
+ return np.array(self)
799
+
800
+ def argsort(
801
+ self,
802
+ *,
803
+ ascending: bool = True,
804
+ kind: SortKind = "quicksort",
805
+ na_position: str = "last",
806
+ **kwargs,
807
+ ) -> np.ndarray:
808
+ """
809
+ Return the indices that would sort this array.
810
+
811
+ Parameters
812
+ ----------
813
+ ascending : bool, default True
814
+ Whether the indices should result in an ascending
815
+ or descending sort.
816
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
817
+ Sorting algorithm.
818
+ na_position : {'first', 'last'}, default 'last'
819
+ If ``'first'``, put ``NaN`` values at the beginning.
820
+ If ``'last'``, put ``NaN`` values at the end.
821
+ *args, **kwargs:
822
+ Passed through to :func:`numpy.argsort`.
823
+
824
+ Returns
825
+ -------
826
+ np.ndarray[np.intp]
827
+ Array of indices that sort ``self``. If NaN values are contained,
828
+ NaN values are placed at the end.
829
+
830
+ See Also
831
+ --------
832
+ numpy.argsort : Sorting implementation used internally.
833
+
834
+ Examples
835
+ --------
836
+ >>> arr = pd.array([3, 1, 2, 5, 4])
837
+ >>> arr.argsort()
838
+ array([1, 2, 0, 4, 3])
839
+ """
840
+ # Implementer note: You have two places to override the behavior of
841
+ # argsort.
842
+ # 1. _values_for_argsort : construct the values passed to np.argsort
843
+ # 2. argsort : total control over sorting. In case of overriding this,
844
+ # it is recommended to also override argmax/argmin
845
+ ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs)
846
+
847
+ values = self._values_for_argsort()
848
+ return nargsort(
849
+ values,
850
+ kind=kind,
851
+ ascending=ascending,
852
+ na_position=na_position,
853
+ mask=np.asarray(self.isna()),
854
+ )
855
+
856
+ def argmin(self, skipna: bool = True) -> int:
857
+ """
858
+ Return the index of minimum value.
859
+
860
+ In case of multiple occurrences of the minimum value, the index
861
+ corresponding to the first occurrence is returned.
862
+
863
+ Parameters
864
+ ----------
865
+ skipna : bool, default True
866
+
867
+ Returns
868
+ -------
869
+ int
870
+
871
+ See Also
872
+ --------
873
+ ExtensionArray.argmax : Return the index of the maximum value.
874
+
875
+ Examples
876
+ --------
877
+ >>> arr = pd.array([3, 1, 2, 5, 4])
878
+ >>> arr.argmin()
879
+ 1
880
+ """
881
+ # Implementer note: You have two places to override the behavior of
882
+ # argmin.
883
+ # 1. _values_for_argsort : construct the values used in nargminmax
884
+ # 2. argmin itself : total control over sorting.
885
+ validate_bool_kwarg(skipna, "skipna")
886
+ if not skipna and self._hasna:
887
+ raise NotImplementedError
888
+ return nargminmax(self, "argmin")
889
+
890
+ def argmax(self, skipna: bool = True) -> int:
891
+ """
892
+ Return the index of maximum value.
893
+
894
+ In case of multiple occurrences of the maximum value, the index
895
+ corresponding to the first occurrence is returned.
896
+
897
+ Parameters
898
+ ----------
899
+ skipna : bool, default True
900
+
901
+ Returns
902
+ -------
903
+ int
904
+
905
+ See Also
906
+ --------
907
+ ExtensionArray.argmin : Return the index of the minimum value.
908
+
909
+ Examples
910
+ --------
911
+ >>> arr = pd.array([3, 1, 2, 5, 4])
912
+ >>> arr.argmax()
913
+ 3
914
+ """
915
+ # Implementer note: You have two places to override the behavior of
916
+ # argmax.
917
+ # 1. _values_for_argsort : construct the values used in nargminmax
918
+ # 2. argmax itself : total control over sorting.
919
+ validate_bool_kwarg(skipna, "skipna")
920
+ if not skipna and self._hasna:
921
+ raise NotImplementedError
922
+ return nargminmax(self, "argmax")
923
+
924
+ def interpolate(
925
+ self,
926
+ *,
927
+ method: InterpolateOptions,
928
+ axis: int,
929
+ index: Index,
930
+ limit,
931
+ limit_direction,
932
+ limit_area,
933
+ copy: bool,
934
+ **kwargs,
935
+ ) -> Self:
936
+ """
937
+ See DataFrame.interpolate.__doc__.
938
+
939
+ Examples
940
+ --------
941
+ >>> arr = pd.arrays.NumpyExtensionArray(np.array([0, 1, np.nan, 3]))
942
+ >>> arr.interpolate(method="linear",
943
+ ... limit=3,
944
+ ... limit_direction="forward",
945
+ ... index=pd.Index([1, 2, 3, 4]),
946
+ ... fill_value=1,
947
+ ... copy=False,
948
+ ... axis=0,
949
+ ... limit_area="inside"
950
+ ... )
951
+ <NumpyExtensionArray>
952
+ [0.0, 1.0, 2.0, 3.0]
953
+ Length: 4, dtype: float64
954
+ """
955
+ # NB: we return type(self) even if copy=False
956
+ raise NotImplementedError(
957
+ f"{type(self).__name__} does not implement interpolate"
958
+ )
959
+
960
+ def _pad_or_backfill(
961
+ self,
962
+ *,
963
+ method: FillnaOptions,
964
+ limit: int | None = None,
965
+ limit_area: Literal["inside", "outside"] | None = None,
966
+ copy: bool = True,
967
+ ) -> Self:
968
+ """
969
+ Pad or backfill values, used by Series/DataFrame ffill and bfill.
970
+
971
+ Parameters
972
+ ----------
973
+ method : {'backfill', 'bfill', 'pad', 'ffill'}
974
+ Method to use for filling holes in reindexed Series:
975
+
976
+ * pad / ffill: propagate last valid observation forward to next valid.
977
+ * backfill / bfill: use NEXT valid observation to fill gap.
978
+
979
+ limit : int, default None
980
+ This is the maximum number of consecutive
981
+ NaN values to forward/backward fill. In other words, if there is
982
+ a gap with more than this number of consecutive NaNs, it will only
983
+ be partially filled. If method is not specified, this is the
984
+ maximum number of entries along the entire axis where NaNs will be
985
+ filled.
986
+
987
+ copy : bool, default True
988
+ Whether to make a copy of the data before filling. If False, then
989
+ the original should be modified and no new memory should be allocated.
990
+ For ExtensionArray subclasses that cannot do this, it is at the
991
+ author's discretion whether to ignore "copy=False" or to raise.
992
+ The base class implementation ignores the keyword if any NAs are
993
+ present.
994
+
995
+ Returns
996
+ -------
997
+ Same type as self
998
+
999
+ Examples
1000
+ --------
1001
+ >>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan])
1002
+ >>> arr._pad_or_backfill(method="backfill", limit=1)
1003
+ <IntegerArray>
1004
+ [<NA>, 2, 2, 3, <NA>, <NA>]
1005
+ Length: 6, dtype: Int64
1006
+ """
1007
+
1008
+ # If a 3rd-party EA has implemented this functionality in fillna,
1009
+ # we warn that they need to implement _pad_or_backfill instead.
1010
+ if (
1011
+ type(self).fillna is not ExtensionArray.fillna
1012
+ and type(self)._pad_or_backfill is ExtensionArray._pad_or_backfill
1013
+ ):
1014
+ # Check for _pad_or_backfill here allows us to call
1015
+ # super()._pad_or_backfill without getting this warning
1016
+ warnings.warn(
1017
+ "ExtensionArray.fillna 'method' keyword is deprecated. "
1018
+ "In a future version. arr._pad_or_backfill will be called "
1019
+ "instead. 3rd-party ExtensionArray authors need to implement "
1020
+ "_pad_or_backfill.",
1021
+ DeprecationWarning,
1022
+ stacklevel=find_stack_level(),
1023
+ )
1024
+ if limit_area is not None:
1025
+ raise NotImplementedError(
1026
+ f"{type(self).__name__} does not implement limit_area "
1027
+ "(added in pandas 2.2). 3rd-party ExtnsionArray authors "
1028
+ "need to add this argument to _pad_or_backfill."
1029
+ )
1030
+ return self.fillna(method=method, limit=limit)
1031
+
1032
+ mask = self.isna()
1033
+
1034
+ if mask.any():
1035
+ # NB: the base class does not respect the "copy" keyword
1036
+ meth = missing.clean_fill_method(method)
1037
+
1038
+ npmask = np.asarray(mask)
1039
+ if limit_area is not None and not npmask.all():
1040
+ _fill_limit_area_1d(npmask, limit_area)
1041
+ if meth == "pad":
1042
+ indexer = libalgos.get_fill_indexer(npmask, limit=limit)
1043
+ return self.take(indexer, allow_fill=True)
1044
+ else:
1045
+ # i.e. meth == "backfill"
1046
+ indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1]
1047
+ return self[::-1].take(indexer, allow_fill=True)
1048
+
1049
+ else:
1050
+ if not copy:
1051
+ return self
1052
+ new_values = self.copy()
1053
+ return new_values
1054
+
1055
+ def fillna(
1056
+ self,
1057
+ value: object | ArrayLike | None = None,
1058
+ method: FillnaOptions | None = None,
1059
+ limit: int | None = None,
1060
+ copy: bool = True,
1061
+ ) -> Self:
1062
+ """
1063
+ Fill NA/NaN values using the specified method.
1064
+
1065
+ Parameters
1066
+ ----------
1067
+ value : scalar, array-like
1068
+ If a scalar value is passed it is used to fill all missing values.
1069
+ Alternatively, an array-like "value" can be given. It's expected
1070
+ that the array-like have the same length as 'self'.
1071
+ method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
1072
+ Method to use for filling holes in reindexed Series:
1073
+
1074
+ * pad / ffill: propagate last valid observation forward to next valid.
1075
+ * backfill / bfill: use NEXT valid observation to fill gap.
1076
+
1077
+ .. deprecated:: 2.1.0
1078
+
1079
+ limit : int, default None
1080
+ If method is specified, this is the maximum number of consecutive
1081
+ NaN values to forward/backward fill. In other words, if there is
1082
+ a gap with more than this number of consecutive NaNs, it will only
1083
+ be partially filled. If method is not specified, this is the
1084
+ maximum number of entries along the entire axis where NaNs will be
1085
+ filled.
1086
+
1087
+ .. deprecated:: 2.1.0
1088
+
1089
+ copy : bool, default True
1090
+ Whether to make a copy of the data before filling. If False, then
1091
+ the original should be modified and no new memory should be allocated.
1092
+ For ExtensionArray subclasses that cannot do this, it is at the
1093
+ author's discretion whether to ignore "copy=False" or to raise.
1094
+ The base class implementation ignores the keyword in pad/backfill
1095
+ cases.
1096
+
1097
+ Returns
1098
+ -------
1099
+ ExtensionArray
1100
+ With NA/NaN filled.
1101
+
1102
+ Examples
1103
+ --------
1104
+ >>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan])
1105
+ >>> arr.fillna(0)
1106
+ <IntegerArray>
1107
+ [0, 0, 2, 3, 0, 0]
1108
+ Length: 6, dtype: Int64
1109
+ """
1110
+ if method is not None:
1111
+ warnings.warn(
1112
+ f"The 'method' keyword in {type(self).__name__}.fillna is "
1113
+ "deprecated and will be removed in a future version.",
1114
+ FutureWarning,
1115
+ stacklevel=find_stack_level(),
1116
+ )
1117
+
1118
+ value, method = validate_fillna_kwargs(value, method)
1119
+
1120
+ mask = self.isna()
1121
+ # error: Argument 2 to "check_value_size" has incompatible type
1122
+ # "ExtensionArray"; expected "ndarray"
1123
+ value = missing.check_value_size(
1124
+ value, mask, len(self) # type: ignore[arg-type]
1125
+ )
1126
+
1127
+ if mask.any():
1128
+ if method is not None:
1129
+ meth = missing.clean_fill_method(method)
1130
+
1131
+ npmask = np.asarray(mask)
1132
+ if meth == "pad":
1133
+ indexer = libalgos.get_fill_indexer(npmask, limit=limit)
1134
+ return self.take(indexer, allow_fill=True)
1135
+ else:
1136
+ # i.e. meth == "backfill"
1137
+ indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1]
1138
+ return self[::-1].take(indexer, allow_fill=True)
1139
+ else:
1140
+ # fill with value
1141
+ if not copy:
1142
+ new_values = self[:]
1143
+ else:
1144
+ new_values = self.copy()
1145
+ new_values[mask] = value
1146
+ else:
1147
+ if not copy:
1148
+ new_values = self[:]
1149
+ else:
1150
+ new_values = self.copy()
1151
+ return new_values
1152
+
1153
+ def dropna(self) -> Self:
1154
+ """
1155
+ Return ExtensionArray without NA values.
1156
+
1157
+ Returns
1158
+ -------
1159
+
1160
+ Examples
1161
+ --------
1162
+ >>> pd.array([1, 2, np.nan]).dropna()
1163
+ <IntegerArray>
1164
+ [1, 2]
1165
+ Length: 2, dtype: Int64
1166
+ """
1167
+ # error: Unsupported operand type for ~ ("ExtensionArray")
1168
+ return self[~self.isna()] # type: ignore[operator]
1169
+
1170
+ def duplicated(
1171
+ self, keep: Literal["first", "last", False] = "first"
1172
+ ) -> npt.NDArray[np.bool_]:
1173
+ """
1174
+ Return boolean ndarray denoting duplicate values.
1175
+
1176
+ Parameters
1177
+ ----------
1178
+ keep : {'first', 'last', False}, default 'first'
1179
+ - ``first`` : Mark duplicates as ``True`` except for the first occurrence.
1180
+ - ``last`` : Mark duplicates as ``True`` except for the last occurrence.
1181
+ - False : Mark all duplicates as ``True``.
1182
+
1183
+ Returns
1184
+ -------
1185
+ ndarray[bool]
1186
+
1187
+ Examples
1188
+ --------
1189
+ >>> pd.array([1, 1, 2, 3, 3], dtype="Int64").duplicated()
1190
+ array([False, True, False, False, True])
1191
+ """
1192
+ mask = self.isna().astype(np.bool_, copy=False)
1193
+ return duplicated(values=self, keep=keep, mask=mask)
1194
+
1195
+ def shift(self, periods: int = 1, fill_value: object = None) -> ExtensionArray:
1196
+ """
1197
+ Shift values by desired number.
1198
+
1199
+ Newly introduced missing values are filled with
1200
+ ``self.dtype.na_value``.
1201
+
1202
+ Parameters
1203
+ ----------
1204
+ periods : int, default 1
1205
+ The number of periods to shift. Negative values are allowed
1206
+ for shifting backwards.
1207
+
1208
+ fill_value : object, optional
1209
+ The scalar value to use for newly introduced missing values.
1210
+ The default is ``self.dtype.na_value``.
1211
+
1212
+ Returns
1213
+ -------
1214
+ ExtensionArray
1215
+ Shifted.
1216
+
1217
+ Notes
1218
+ -----
1219
+ If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
1220
+ returned.
1221
+
1222
+ If ``periods > len(self)``, then an array of size
1223
+ len(self) is returned, with all values filled with
1224
+ ``self.dtype.na_value``.
1225
+
1226
+ For 2-dimensional ExtensionArrays, we are always shifting along axis=0.
1227
+
1228
+ Examples
1229
+ --------
1230
+ >>> arr = pd.array([1, 2, 3])
1231
+ >>> arr.shift(2)
1232
+ <IntegerArray>
1233
+ [<NA>, <NA>, 1]
1234
+ Length: 3, dtype: Int64
1235
+ """
1236
+ # Note: this implementation assumes that `self.dtype.na_value` can be
1237
+ # stored in an instance of your ExtensionArray with `self.dtype`.
1238
+ if not len(self) or periods == 0:
1239
+ return self.copy()
1240
+
1241
+ if isna(fill_value):
1242
+ fill_value = self.dtype.na_value
1243
+
1244
+ empty = self._from_sequence(
1245
+ [fill_value] * min(abs(periods), len(self)), dtype=self.dtype
1246
+ )
1247
+ if periods > 0:
1248
+ a = empty
1249
+ b = self[:-periods]
1250
+ else:
1251
+ a = self[abs(periods) :]
1252
+ b = empty
1253
+ return self._concat_same_type([a, b])
1254
+
1255
+ def unique(self) -> Self:
1256
+ """
1257
+ Compute the ExtensionArray of unique values.
1258
+
1259
+ Returns
1260
+ -------
1261
+ pandas.api.extensions.ExtensionArray
1262
+
1263
+ Examples
1264
+ --------
1265
+ >>> arr = pd.array([1, 2, 3, 1, 2, 3])
1266
+ >>> arr.unique()
1267
+ <IntegerArray>
1268
+ [1, 2, 3]
1269
+ Length: 3, dtype: Int64
1270
+ """
1271
+ uniques = unique(self.astype(object))
1272
+ return self._from_sequence(uniques, dtype=self.dtype)
1273
+
1274
+ def searchsorted(
1275
+ self,
1276
+ value: NumpyValueArrayLike | ExtensionArray,
1277
+ side: Literal["left", "right"] = "left",
1278
+ sorter: NumpySorter | None = None,
1279
+ ) -> npt.NDArray[np.intp] | np.intp:
1280
+ """
1281
+ Find indices where elements should be inserted to maintain order.
1282
+
1283
+ Find the indices into a sorted array `self` (a) such that, if the
1284
+ corresponding elements in `value` were inserted before the indices,
1285
+ the order of `self` would be preserved.
1286
+
1287
+ Assuming that `self` is sorted:
1288
+
1289
+ ====== ================================
1290
+ `side` returned index `i` satisfies
1291
+ ====== ================================
1292
+ left ``self[i-1] < value <= self[i]``
1293
+ right ``self[i-1] <= value < self[i]``
1294
+ ====== ================================
1295
+
1296
+ Parameters
1297
+ ----------
1298
+ value : array-like, list or scalar
1299
+ Value(s) to insert into `self`.
1300
+ side : {'left', 'right'}, optional
1301
+ If 'left', the index of the first suitable location found is given.
1302
+ If 'right', return the last such index. If there is no suitable
1303
+ index, return either 0 or N (where N is the length of `self`).
1304
+ sorter : 1-D array-like, optional
1305
+ Optional array of integer indices that sort array a into ascending
1306
+ order. They are typically the result of argsort.
1307
+
1308
+ Returns
1309
+ -------
1310
+ array of ints or int
1311
+ If value is array-like, array of insertion points.
1312
+ If value is scalar, a single integer.
1313
+
1314
+ See Also
1315
+ --------
1316
+ numpy.searchsorted : Similar method from NumPy.
1317
+
1318
+ Examples
1319
+ --------
1320
+ >>> arr = pd.array([1, 2, 3, 5])
1321
+ >>> arr.searchsorted([4])
1322
+ array([3])
1323
+ """
1324
+ # Note: the base tests provided by pandas only test the basics.
1325
+ # We do not test
1326
+ # 1. Values outside the range of the `data_for_sorting` fixture
1327
+ # 2. Values between the values in the `data_for_sorting` fixture
1328
+ # 3. Missing values.
1329
+ arr = self.astype(object)
1330
+ if isinstance(value, ExtensionArray):
1331
+ value = value.astype(object)
1332
+ return arr.searchsorted(value, side=side, sorter=sorter)
1333
+
1334
+ def equals(self, other: object) -> bool:
1335
+ """
1336
+ Return if another array is equivalent to this array.
1337
+
1338
+ Equivalent means that both arrays have the same shape and dtype, and
1339
+ all values compare equal. Missing values in the same location are
1340
+ considered equal (in contrast with normal equality).
1341
+
1342
+ Parameters
1343
+ ----------
1344
+ other : ExtensionArray
1345
+ Array to compare to this Array.
1346
+
1347
+ Returns
1348
+ -------
1349
+ boolean
1350
+ Whether the arrays are equivalent.
1351
+
1352
+ Examples
1353
+ --------
1354
+ >>> arr1 = pd.array([1, 2, np.nan])
1355
+ >>> arr2 = pd.array([1, 2, np.nan])
1356
+ >>> arr1.equals(arr2)
1357
+ True
1358
+ """
1359
+ if type(self) != type(other):
1360
+ return False
1361
+ other = cast(ExtensionArray, other)
1362
+ if self.dtype != other.dtype:
1363
+ return False
1364
+ elif len(self) != len(other):
1365
+ return False
1366
+ else:
1367
+ equal_values = self == other
1368
+ if isinstance(equal_values, ExtensionArray):
1369
+ # boolean array with NA -> fill with False
1370
+ equal_values = equal_values.fillna(False)
1371
+ # error: Unsupported left operand type for & ("ExtensionArray")
1372
+ equal_na = self.isna() & other.isna() # type: ignore[operator]
1373
+ return bool((equal_values | equal_na).all())
1374
+
1375
+ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
1376
+ """
1377
+ Pointwise comparison for set containment in the given values.
1378
+
1379
+ Roughly equivalent to `np.array([x in values for x in self])`
1380
+
1381
+ Parameters
1382
+ ----------
1383
+ values : np.ndarray or ExtensionArray
1384
+
1385
+ Returns
1386
+ -------
1387
+ np.ndarray[bool]
1388
+
1389
+ Examples
1390
+ --------
1391
+ >>> arr = pd.array([1, 2, 3])
1392
+ >>> arr.isin([1])
1393
+ <BooleanArray>
1394
+ [True, False, False]
1395
+ Length: 3, dtype: boolean
1396
+ """
1397
+ return isin(np.asarray(self), values)
1398
+
1399
+ def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
1400
+ """
1401
+ Return an array and missing value suitable for factorization.
1402
+
1403
+ Returns
1404
+ -------
1405
+ values : ndarray
1406
+ An array suitable for factorization. This should maintain order
1407
+ and be a supported dtype (Float64, Int64, UInt64, String, Object).
1408
+ By default, the extension array is cast to object dtype.
1409
+ na_value : object
1410
+ The value in `values` to consider missing. This will be treated
1411
+ as NA in the factorization routines, so it will be coded as
1412
+ `-1` and not included in `uniques`. By default,
1413
+ ``np.nan`` is used.
1414
+
1415
+ Notes
1416
+ -----
1417
+ The values returned by this method are also used in
1418
+ :func:`pandas.util.hash_pandas_object`. If needed, this can be
1419
+ overridden in the ``self._hash_pandas_object()`` method.
1420
+
1421
+ Examples
1422
+ --------
1423
+ >>> pd.array([1, 2, 3])._values_for_factorize()
1424
+ (array([1, 2, 3], dtype=object), nan)
1425
+ """
1426
+ return self.astype(object), np.nan
1427
+
1428
+ def factorize(
1429
+ self,
1430
+ use_na_sentinel: bool = True,
1431
+ ) -> tuple[np.ndarray, ExtensionArray]:
1432
+ """
1433
+ Encode the extension array as an enumerated type.
1434
+
1435
+ Parameters
1436
+ ----------
1437
+ use_na_sentinel : bool, default True
1438
+ If True, the sentinel -1 will be used for NaN values. If False,
1439
+ NaN values will be encoded as non-negative integers and will not drop the
1440
+ NaN from the uniques of the values.
1441
+
1442
+ .. versionadded:: 1.5.0
1443
+
1444
+ Returns
1445
+ -------
1446
+ codes : ndarray
1447
+ An integer NumPy array that's an indexer into the original
1448
+ ExtensionArray.
1449
+ uniques : ExtensionArray
1450
+ An ExtensionArray containing the unique values of `self`.
1451
+
1452
+ .. note::
1453
+
1454
+ uniques will *not* contain an entry for the NA value of
1455
+ the ExtensionArray if there are any missing values present
1456
+ in `self`.
1457
+
1458
+ See Also
1459
+ --------
1460
+ factorize : Top-level factorize method that dispatches here.
1461
+
1462
+ Notes
1463
+ -----
1464
+ :meth:`pandas.factorize` offers a `sort` keyword as well.
1465
+
1466
+ Examples
1467
+ --------
1468
+ >>> idx1 = pd.PeriodIndex(["2014-01", "2014-01", "2014-02", "2014-02",
1469
+ ... "2014-03", "2014-03"], freq="M")
1470
+ >>> arr, idx = idx1.factorize()
1471
+ >>> arr
1472
+ array([0, 0, 1, 1, 2, 2])
1473
+ >>> idx
1474
+ PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]')
1475
+ """
1476
+ # Implementer note: There are two ways to override the behavior of
1477
+ # pandas.factorize
1478
+ # 1. _values_for_factorize and _from_factorize.
1479
+ # Specify the values passed to pandas' internal factorization
1480
+ # routines, and how to convert from those values back to the
1481
+ # original ExtensionArray.
1482
+ # 2. ExtensionArray.factorize.
1483
+ # Complete control over factorization.
1484
+ arr, na_value = self._values_for_factorize()
1485
+
1486
+ codes, uniques = factorize_array(
1487
+ arr, use_na_sentinel=use_na_sentinel, na_value=na_value
1488
+ )
1489
+
1490
+ uniques_ea = self._from_factorized(uniques, self)
1491
+ return codes, uniques_ea
1492
+
1493
+ _extension_array_shared_docs[
1494
+ "repeat"
1495
+ ] = """
1496
+ Repeat elements of a %(klass)s.
1497
+
1498
+ Returns a new %(klass)s where each element of the current %(klass)s
1499
+ is repeated consecutively a given number of times.
1500
+
1501
+ Parameters
1502
+ ----------
1503
+ repeats : int or array of ints
1504
+ The number of repetitions for each element. This should be a
1505
+ non-negative integer. Repeating 0 times will return an empty
1506
+ %(klass)s.
1507
+ axis : None
1508
+ Must be ``None``. Has no effect but is accepted for compatibility
1509
+ with numpy.
1510
+
1511
+ Returns
1512
+ -------
1513
+ %(klass)s
1514
+ Newly created %(klass)s with repeated elements.
1515
+
1516
+ See Also
1517
+ --------
1518
+ Series.repeat : Equivalent function for Series.
1519
+ Index.repeat : Equivalent function for Index.
1520
+ numpy.repeat : Similar method for :class:`numpy.ndarray`.
1521
+ ExtensionArray.take : Take arbitrary positions.
1522
+
1523
+ Examples
1524
+ --------
1525
+ >>> cat = pd.Categorical(['a', 'b', 'c'])
1526
+ >>> cat
1527
+ ['a', 'b', 'c']
1528
+ Categories (3, object): ['a', 'b', 'c']
1529
+ >>> cat.repeat(2)
1530
+ ['a', 'a', 'b', 'b', 'c', 'c']
1531
+ Categories (3, object): ['a', 'b', 'c']
1532
+ >>> cat.repeat([1, 2, 3])
1533
+ ['a', 'b', 'b', 'c', 'c', 'c']
1534
+ Categories (3, object): ['a', 'b', 'c']
1535
+ """
1536
+
1537
+ @Substitution(klass="ExtensionArray")
1538
+ @Appender(_extension_array_shared_docs["repeat"])
1539
+ def repeat(self, repeats: int | Sequence[int], axis: AxisInt | None = None) -> Self:
1540
+ nv.validate_repeat((), {"axis": axis})
1541
+ ind = np.arange(len(self)).repeat(repeats)
1542
+ return self.take(ind)
1543
+
1544
+ # ------------------------------------------------------------------------
1545
+ # Indexing methods
1546
+ # ------------------------------------------------------------------------
1547
+
1548
+ def take(
1549
+ self,
1550
+ indices: TakeIndexer,
1551
+ *,
1552
+ allow_fill: bool = False,
1553
+ fill_value: Any = None,
1554
+ ) -> Self:
1555
+ """
1556
+ Take elements from an array.
1557
+
1558
+ Parameters
1559
+ ----------
1560
+ indices : sequence of int or one-dimensional np.ndarray of int
1561
+ Indices to be taken.
1562
+ allow_fill : bool, default False
1563
+ How to handle negative values in `indices`.
1564
+
1565
+ * False: negative values in `indices` indicate positional indices
1566
+ from the right (the default). This is similar to
1567
+ :func:`numpy.take`.
1568
+
1569
+ * True: negative values in `indices` indicate
1570
+ missing values. These values are set to `fill_value`. Any other
1571
+ other negative values raise a ``ValueError``.
1572
+
1573
+ fill_value : any, optional
1574
+ Fill value to use for NA-indices when `allow_fill` is True.
1575
+ This may be ``None``, in which case the default NA value for
1576
+ the type, ``self.dtype.na_value``, is used.
1577
+
1578
+ For many ExtensionArrays, there will be two representations of
1579
+ `fill_value`: a user-facing "boxed" scalar, and a low-level
1580
+ physical NA value. `fill_value` should be the user-facing version,
1581
+ and the implementation should handle translating that to the
1582
+ physical version for processing the take if necessary.
1583
+
1584
+ Returns
1585
+ -------
1586
+ ExtensionArray
1587
+
1588
+ Raises
1589
+ ------
1590
+ IndexError
1591
+ When the indices are out of bounds for the array.
1592
+ ValueError
1593
+ When `indices` contains negative values other than ``-1``
1594
+ and `allow_fill` is True.
1595
+
1596
+ See Also
1597
+ --------
1598
+ numpy.take : Take elements from an array along an axis.
1599
+ api.extensions.take : Take elements from an array.
1600
+
1601
+ Notes
1602
+ -----
1603
+ ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
1604
+ ``iloc``, when `indices` is a sequence of values. Additionally,
1605
+ it's called by :meth:`Series.reindex`, or any other method
1606
+ that causes realignment, with a `fill_value`.
1607
+
1608
+ Examples
1609
+ --------
1610
+ Here's an example implementation, which relies on casting the
1611
+ extension array to object dtype. This uses the helper method
1612
+ :func:`pandas.api.extensions.take`.
1613
+
1614
+ .. code-block:: python
1615
+
1616
+ def take(self, indices, allow_fill=False, fill_value=None):
1617
+ from pandas.core.algorithms import take
1618
+
1619
+ # If the ExtensionArray is backed by an ndarray, then
1620
+ # just pass that here instead of coercing to object.
1621
+ data = self.astype(object)
1622
+
1623
+ if allow_fill and fill_value is None:
1624
+ fill_value = self.dtype.na_value
1625
+
1626
+ # fill value should always be translated from the scalar
1627
+ # type for the array, to the physical storage type for
1628
+ # the data, before passing to take.
1629
+
1630
+ result = take(data, indices, fill_value=fill_value,
1631
+ allow_fill=allow_fill)
1632
+ return self._from_sequence(result, dtype=self.dtype)
1633
+ """
1634
+ # Implementer note: The `fill_value` parameter should be a user-facing
1635
+ # value, an instance of self.dtype.type. When passed `fill_value=None`,
1636
+ # the default of `self.dtype.na_value` should be used.
1637
+ # This may differ from the physical storage type your ExtensionArray
1638
+ # uses. In this case, your implementation is responsible for casting
1639
+ # the user-facing type to the storage type, before using
1640
+ # pandas.api.extensions.take
1641
+ raise AbstractMethodError(self)
1642
+
1643
+ def copy(self) -> Self:
1644
+ """
1645
+ Return a copy of the array.
1646
+
1647
+ Returns
1648
+ -------
1649
+ ExtensionArray
1650
+
1651
+ Examples
1652
+ --------
1653
+ >>> arr = pd.array([1, 2, 3])
1654
+ >>> arr2 = arr.copy()
1655
+ >>> arr[0] = 2
1656
+ >>> arr2
1657
+ <IntegerArray>
1658
+ [1, 2, 3]
1659
+ Length: 3, dtype: Int64
1660
+ """
1661
+ raise AbstractMethodError(self)
1662
+
1663
+ def view(self, dtype: Dtype | None = None) -> ArrayLike:
1664
+ """
1665
+ Return a view on the array.
1666
+
1667
+ Parameters
1668
+ ----------
1669
+ dtype : str, np.dtype, or ExtensionDtype, optional
1670
+ Default None.
1671
+
1672
+ Returns
1673
+ -------
1674
+ ExtensionArray or np.ndarray
1675
+ A view on the :class:`ExtensionArray`'s data.
1676
+
1677
+ Examples
1678
+ --------
1679
+ This gives view on the underlying data of an ``ExtensionArray`` and is not a
1680
+ copy. Modifications on either the view or the original ``ExtensionArray``
1681
+ will be reflectd on the underlying data:
1682
+
1683
+ >>> arr = pd.array([1, 2, 3])
1684
+ >>> arr2 = arr.view()
1685
+ >>> arr[0] = 2
1686
+ >>> arr2
1687
+ <IntegerArray>
1688
+ [2, 2, 3]
1689
+ Length: 3, dtype: Int64
1690
+ """
1691
+ # NB:
1692
+ # - This must return a *new* object referencing the same data, not self.
1693
+ # - The only case that *must* be implemented is with dtype=None,
1694
+ # giving a view with the same dtype as self.
1695
+ if dtype is not None:
1696
+ raise NotImplementedError(dtype)
1697
+ return self[:]
1698
+
1699
+ # ------------------------------------------------------------------------
1700
+ # Printing
1701
+ # ------------------------------------------------------------------------
1702
+
1703
+ def __repr__(self) -> str:
1704
+ if self.ndim > 1:
1705
+ return self._repr_2d()
1706
+
1707
+ from pandas.io.formats.printing import format_object_summary
1708
+
1709
+ # the short repr has no trailing newline, while the truncated
1710
+ # repr does. So we include a newline in our template, and strip
1711
+ # any trailing newlines from format_object_summary
1712
+ data = format_object_summary(
1713
+ self, self._formatter(), indent_for_name=False
1714
+ ).rstrip(", \n")
1715
+ class_name = f"<{type(self).__name__}>\n"
1716
+ footer = self._get_repr_footer()
1717
+ return f"{class_name}{data}\n{footer}"
1718
+
1719
+ def _get_repr_footer(self) -> str:
1720
+ # GH#24278
1721
+ if self.ndim > 1:
1722
+ return f"Shape: {self.shape}, dtype: {self.dtype}"
1723
+ return f"Length: {len(self)}, dtype: {self.dtype}"
1724
+
1725
+ def _repr_2d(self) -> str:
1726
+ from pandas.io.formats.printing import format_object_summary
1727
+
1728
+ # the short repr has no trailing newline, while the truncated
1729
+ # repr does. So we include a newline in our template, and strip
1730
+ # any trailing newlines from format_object_summary
1731
+ lines = [
1732
+ format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
1733
+ ", \n"
1734
+ )
1735
+ for x in self
1736
+ ]
1737
+ data = ",\n".join(lines)
1738
+ class_name = f"<{type(self).__name__}>"
1739
+ footer = self._get_repr_footer()
1740
+ return f"{class_name}\n[\n{data}\n]\n{footer}"
1741
+
1742
+ def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
1743
+ """
1744
+ Formatting function for scalar values.
1745
+
1746
+ This is used in the default '__repr__'. The returned formatting
1747
+ function receives instances of your scalar type.
1748
+
1749
+ Parameters
1750
+ ----------
1751
+ boxed : bool, default False
1752
+ An indicated for whether or not your array is being printed
1753
+ within a Series, DataFrame, or Index (True), or just by
1754
+ itself (False). This may be useful if you want scalar values
1755
+ to appear differently within a Series versus on its own (e.g.
1756
+ quoted or not).
1757
+
1758
+ Returns
1759
+ -------
1760
+ Callable[[Any], str]
1761
+ A callable that gets instances of the scalar type and
1762
+ returns a string. By default, :func:`repr` is used
1763
+ when ``boxed=False`` and :func:`str` is used when
1764
+ ``boxed=True``.
1765
+
1766
+ Examples
1767
+ --------
1768
+ >>> class MyExtensionArray(pd.arrays.NumpyExtensionArray):
1769
+ ... def _formatter(self, boxed=False):
1770
+ ... return lambda x: '*' + str(x) + '*' if boxed else repr(x) + '*'
1771
+ >>> MyExtensionArray(np.array([1, 2, 3, 4]))
1772
+ <MyExtensionArray>
1773
+ [1*, 2*, 3*, 4*]
1774
+ Length: 4, dtype: int64
1775
+ """
1776
+ if boxed:
1777
+ return str
1778
+ return repr
1779
+
1780
+ # ------------------------------------------------------------------------
1781
+ # Reshaping
1782
+ # ------------------------------------------------------------------------
1783
+
1784
+ def transpose(self, *axes: int) -> ExtensionArray:
1785
+ """
1786
+ Return a transposed view on this array.
1787
+
1788
+ Because ExtensionArrays are always 1D, this is a no-op. It is included
1789
+ for compatibility with np.ndarray.
1790
+
1791
+ Returns
1792
+ -------
1793
+ ExtensionArray
1794
+
1795
+ Examples
1796
+ --------
1797
+ >>> pd.array([1, 2, 3]).transpose()
1798
+ <IntegerArray>
1799
+ [1, 2, 3]
1800
+ Length: 3, dtype: Int64
1801
+ """
1802
+ return self[:]
1803
+
1804
+ @property
1805
+ def T(self) -> ExtensionArray:
1806
+ return self.transpose()
1807
+
1808
+ def ravel(self, order: Literal["C", "F", "A", "K"] | None = "C") -> ExtensionArray:
1809
+ """
1810
+ Return a flattened view on this array.
1811
+
1812
+ Parameters
1813
+ ----------
1814
+ order : {None, 'C', 'F', 'A', 'K'}, default 'C'
1815
+
1816
+ Returns
1817
+ -------
1818
+ ExtensionArray
1819
+
1820
+ Notes
1821
+ -----
1822
+ - Because ExtensionArrays are 1D-only, this is a no-op.
1823
+ - The "order" argument is ignored, is for compatibility with NumPy.
1824
+
1825
+ Examples
1826
+ --------
1827
+ >>> pd.array([1, 2, 3]).ravel()
1828
+ <IntegerArray>
1829
+ [1, 2, 3]
1830
+ Length: 3, dtype: Int64
1831
+ """
1832
+ return self
1833
+
1834
+ @classmethod
1835
+ def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self:
1836
+ """
1837
+ Concatenate multiple array of this dtype.
1838
+
1839
+ Parameters
1840
+ ----------
1841
+ to_concat : sequence of this type
1842
+
1843
+ Returns
1844
+ -------
1845
+ ExtensionArray
1846
+
1847
+ Examples
1848
+ --------
1849
+ >>> arr1 = pd.array([1, 2, 3])
1850
+ >>> arr2 = pd.array([4, 5, 6])
1851
+ >>> pd.arrays.IntegerArray._concat_same_type([arr1, arr2])
1852
+ <IntegerArray>
1853
+ [1, 2, 3, 4, 5, 6]
1854
+ Length: 6, dtype: Int64
1855
+ """
1856
+ # Implementer note: this method will only be called with a sequence of
1857
+ # ExtensionArrays of this class and with the same dtype as self. This
1858
+ # should allow "easy" concatenation (no upcasting needed), and result
1859
+ # in a new ExtensionArray of the same dtype.
1860
+ # Note: this strict behaviour is only guaranteed starting with pandas 1.1
1861
+ raise AbstractMethodError(cls)
1862
+
1863
+ # The _can_hold_na attribute is set to True so that pandas internals
1864
+ # will use the ExtensionDtype.na_value as the NA value in operations
1865
+ # such as take(), reindex(), shift(), etc. In addition, those results
1866
+ # will then be of the ExtensionArray subclass rather than an array
1867
+ # of objects
1868
+ @cache_readonly
1869
+ def _can_hold_na(self) -> bool:
1870
+ return self.dtype._can_hold_na
1871
+
1872
+ def _accumulate(
1873
+ self, name: str, *, skipna: bool = True, **kwargs
1874
+ ) -> ExtensionArray:
1875
+ """
1876
+ Return an ExtensionArray performing an accumulation operation.
1877
+
1878
+ The underlying data type might change.
1879
+
1880
+ Parameters
1881
+ ----------
1882
+ name : str
1883
+ Name of the function, supported values are:
1884
+ - cummin
1885
+ - cummax
1886
+ - cumsum
1887
+ - cumprod
1888
+ skipna : bool, default True
1889
+ If True, skip NA values.
1890
+ **kwargs
1891
+ Additional keyword arguments passed to the accumulation function.
1892
+ Currently, there is no supported kwarg.
1893
+
1894
+ Returns
1895
+ -------
1896
+ array
1897
+
1898
+ Raises
1899
+ ------
1900
+ NotImplementedError : subclass does not define accumulations
1901
+
1902
+ Examples
1903
+ --------
1904
+ >>> arr = pd.array([1, 2, 3])
1905
+ >>> arr._accumulate(name='cumsum')
1906
+ <IntegerArray>
1907
+ [1, 3, 6]
1908
+ Length: 3, dtype: Int64
1909
+ """
1910
+ raise NotImplementedError(f"cannot perform {name} with type {self.dtype}")
1911
+
1912
+ def _reduce(
1913
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
1914
+ ):
1915
+ """
1916
+ Return a scalar result of performing the reduction operation.
1917
+
1918
+ Parameters
1919
+ ----------
1920
+ name : str
1921
+ Name of the function, supported values are:
1922
+ { any, all, min, max, sum, mean, median, prod,
1923
+ std, var, sem, kurt, skew }.
1924
+ skipna : bool, default True
1925
+ If True, skip NaN values.
1926
+ keepdims : bool, default False
1927
+ If False, a scalar is returned.
1928
+ If True, the result has dimension with size one along the reduced axis.
1929
+
1930
+ .. versionadded:: 2.1
1931
+
1932
+ This parameter is not required in the _reduce signature to keep backward
1933
+ compatibility, but will become required in the future. If the parameter
1934
+ is not found in the method signature, a FutureWarning will be emitted.
1935
+ **kwargs
1936
+ Additional keyword arguments passed to the reduction function.
1937
+ Currently, `ddof` is the only supported kwarg.
1938
+
1939
+ Returns
1940
+ -------
1941
+ scalar
1942
+
1943
+ Raises
1944
+ ------
1945
+ TypeError : subclass does not define reductions
1946
+
1947
+ Examples
1948
+ --------
1949
+ >>> pd.array([1, 2, 3])._reduce("min")
1950
+ 1
1951
+ """
1952
+ meth = getattr(self, name, None)
1953
+ if meth is None:
1954
+ raise TypeError(
1955
+ f"'{type(self).__name__}' with dtype {self.dtype} "
1956
+ f"does not support reduction '{name}'"
1957
+ )
1958
+ result = meth(skipna=skipna, **kwargs)
1959
+ if keepdims:
1960
+ result = np.array([result])
1961
+
1962
+ return result
1963
+
1964
+ # https://github.com/python/typeshed/issues/2148#issuecomment-520783318
1965
+ # Incompatible types in assignment (expression has type "None", base class
1966
+ # "object" defined the type as "Callable[[object], int]")
1967
+ __hash__: ClassVar[None] # type: ignore[assignment]
1968
+
1969
+ # ------------------------------------------------------------------------
1970
+ # Non-Optimized Default Methods; in the case of the private methods here,
1971
+ # these are not guaranteed to be stable across pandas versions.
1972
+
1973
+ def _values_for_json(self) -> np.ndarray:
1974
+ """
1975
+ Specify how to render our entries in to_json.
1976
+
1977
+ Notes
1978
+ -----
1979
+ The dtype on the returned ndarray is not restricted, but for non-native
1980
+ types that are not specifically handled in objToJSON.c, to_json is
1981
+ liable to raise. In these cases, it may be safer to return an ndarray
1982
+ of strings.
1983
+ """
1984
+ return np.asarray(self)
1985
+
1986
+ def _hash_pandas_object(
1987
+ self, *, encoding: str, hash_key: str, categorize: bool
1988
+ ) -> npt.NDArray[np.uint64]:
1989
+ """
1990
+ Hook for hash_pandas_object.
1991
+
1992
+ Default is to use the values returned by _values_for_factorize.
1993
+
1994
+ Parameters
1995
+ ----------
1996
+ encoding : str
1997
+ Encoding for data & key when strings.
1998
+ hash_key : str
1999
+ Hash_key for string key to encode.
2000
+ categorize : bool
2001
+ Whether to first categorize object arrays before hashing. This is more
2002
+ efficient when the array contains duplicate values.
2003
+
2004
+ Returns
2005
+ -------
2006
+ np.ndarray[uint64]
2007
+
2008
+ Examples
2009
+ --------
2010
+ >>> pd.array([1, 2])._hash_pandas_object(encoding='utf-8',
2011
+ ... hash_key="1000000000000000",
2012
+ ... categorize=False
2013
+ ... )
2014
+ array([ 6238072747940578789, 15839785061582574730], dtype=uint64)
2015
+ """
2016
+ from pandas.core.util.hashing import hash_array
2017
+
2018
+ values, _ = self._values_for_factorize()
2019
+ return hash_array(
2020
+ values, encoding=encoding, hash_key=hash_key, categorize=categorize
2021
+ )
2022
+
2023
+ def _explode(self) -> tuple[Self, npt.NDArray[np.uint64]]:
2024
+ """
2025
+ Transform each element of list-like to a row.
2026
+
2027
+ For arrays that do not contain list-like elements the default
2028
+ implementation of this method just returns a copy and an array
2029
+ of ones (unchanged index).
2030
+
2031
+ Returns
2032
+ -------
2033
+ ExtensionArray
2034
+ Array with the exploded values.
2035
+ np.ndarray[uint64]
2036
+ The original lengths of each list-like for determining the
2037
+ resulting index.
2038
+
2039
+ See Also
2040
+ --------
2041
+ Series.explode : The method on the ``Series`` object that this
2042
+ extension array method is meant to support.
2043
+
2044
+ Examples
2045
+ --------
2046
+ >>> import pyarrow as pa
2047
+ >>> a = pd.array([[1, 2, 3], [4], [5, 6]],
2048
+ ... dtype=pd.ArrowDtype(pa.list_(pa.int64())))
2049
+ >>> a._explode()
2050
+ (<ArrowExtensionArray>
2051
+ [1, 2, 3, 4, 5, 6]
2052
+ Length: 6, dtype: int64[pyarrow], array([3, 1, 2], dtype=int32))
2053
+ """
2054
+ values = self.copy()
2055
+ counts = np.ones(shape=(len(self),), dtype=np.uint64)
2056
+ return values, counts
2057
+
2058
+ def tolist(self) -> list:
2059
+ """
2060
+ Return a list of the values.
2061
+
2062
+ These are each a scalar type, which is a Python scalar
2063
+ (for str, int, float) or a pandas scalar
2064
+ (for Timestamp/Timedelta/Interval/Period)
2065
+
2066
+ Returns
2067
+ -------
2068
+ list
2069
+
2070
+ Examples
2071
+ --------
2072
+ >>> arr = pd.array([1, 2, 3])
2073
+ >>> arr.tolist()
2074
+ [1, 2, 3]
2075
+ """
2076
+ if self.ndim > 1:
2077
+ return [x.tolist() for x in self]
2078
+ return list(self)
2079
+
2080
+ def delete(self, loc: PositionalIndexer) -> Self:
2081
+ indexer = np.delete(np.arange(len(self)), loc)
2082
+ return self.take(indexer)
2083
+
2084
+ def insert(self, loc: int, item) -> Self:
2085
+ """
2086
+ Insert an item at the given position.
2087
+
2088
+ Parameters
2089
+ ----------
2090
+ loc : int
2091
+ item : scalar-like
2092
+
2093
+ Returns
2094
+ -------
2095
+ same type as self
2096
+
2097
+ Notes
2098
+ -----
2099
+ This method should be both type and dtype-preserving. If the item
2100
+ cannot be held in an array of this type/dtype, either ValueError or
2101
+ TypeError should be raised.
2102
+
2103
+ The default implementation relies on _from_sequence to raise on invalid
2104
+ items.
2105
+
2106
+ Examples
2107
+ --------
2108
+ >>> arr = pd.array([1, 2, 3])
2109
+ >>> arr.insert(2, -1)
2110
+ <IntegerArray>
2111
+ [1, 2, -1, 3]
2112
+ Length: 4, dtype: Int64
2113
+ """
2114
+ loc = validate_insert_loc(loc, len(self))
2115
+
2116
+ item_arr = type(self)._from_sequence([item], dtype=self.dtype)
2117
+
2118
+ return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]])
2119
+
2120
+ def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
2121
+ """
2122
+ Analogue to np.putmask(self, mask, value)
2123
+
2124
+ Parameters
2125
+ ----------
2126
+ mask : np.ndarray[bool]
2127
+ value : scalar or listlike
2128
+ If listlike, must be arraylike with same length as self.
2129
+
2130
+ Returns
2131
+ -------
2132
+ None
2133
+
2134
+ Notes
2135
+ -----
2136
+ Unlike np.putmask, we do not repeat listlike values with mismatched length.
2137
+ 'value' should either be a scalar or an arraylike with the same length
2138
+ as self.
2139
+ """
2140
+ if is_list_like(value):
2141
+ val = value[mask]
2142
+ else:
2143
+ val = value
2144
+
2145
+ self[mask] = val
2146
+
2147
+ def _where(self, mask: npt.NDArray[np.bool_], value) -> Self:
2148
+ """
2149
+ Analogue to np.where(mask, self, value)
2150
+
2151
+ Parameters
2152
+ ----------
2153
+ mask : np.ndarray[bool]
2154
+ value : scalar or listlike
2155
+
2156
+ Returns
2157
+ -------
2158
+ same type as self
2159
+ """
2160
+ result = self.copy()
2161
+
2162
+ if is_list_like(value):
2163
+ val = value[~mask]
2164
+ else:
2165
+ val = value
2166
+
2167
+ result[~mask] = val
2168
+ return result
2169
+
2170
+ # TODO(3.0): this can be removed once GH#33302 deprecation is enforced
2171
+ def _fill_mask_inplace(
2172
+ self, method: str, limit: int | None, mask: npt.NDArray[np.bool_]
2173
+ ) -> None:
2174
+ """
2175
+ Replace values in locations specified by 'mask' using pad or backfill.
2176
+
2177
+ See also
2178
+ --------
2179
+ ExtensionArray.fillna
2180
+ """
2181
+ func = missing.get_fill_func(method)
2182
+ npvalues = self.astype(object)
2183
+ # NB: if we don't copy mask here, it may be altered inplace, which
2184
+ # would mess up the `self[mask] = ...` below.
2185
+ func(npvalues, limit=limit, mask=mask.copy())
2186
+ new_values = self._from_sequence(npvalues, dtype=self.dtype)
2187
+ self[mask] = new_values[mask]
2188
+
2189
+ def _rank(
2190
+ self,
2191
+ *,
2192
+ axis: AxisInt = 0,
2193
+ method: str = "average",
2194
+ na_option: str = "keep",
2195
+ ascending: bool = True,
2196
+ pct: bool = False,
2197
+ ):
2198
+ """
2199
+ See Series.rank.__doc__.
2200
+ """
2201
+ if axis != 0:
2202
+ raise NotImplementedError
2203
+
2204
+ return rank(
2205
+ self._values_for_argsort(),
2206
+ axis=axis,
2207
+ method=method,
2208
+ na_option=na_option,
2209
+ ascending=ascending,
2210
+ pct=pct,
2211
+ )
2212
+
2213
+ @classmethod
2214
+ def _empty(cls, shape: Shape, dtype: ExtensionDtype):
2215
+ """
2216
+ Create an ExtensionArray with the given shape and dtype.
2217
+
2218
+ See also
2219
+ --------
2220
+ ExtensionDtype.empty
2221
+ ExtensionDtype.empty is the 'official' public version of this API.
2222
+ """
2223
+ # Implementer note: while ExtensionDtype.empty is the public way to
2224
+ # call this method, it is still required to implement this `_empty`
2225
+ # method as well (it is called internally in pandas)
2226
+ obj = cls._from_sequence([], dtype=dtype)
2227
+
2228
+ taker = np.broadcast_to(np.intp(-1), shape)
2229
+ result = obj.take(taker, allow_fill=True)
2230
+ if not isinstance(result, cls) or dtype != result.dtype:
2231
+ raise NotImplementedError(
2232
+ f"Default 'empty' implementation is invalid for dtype='{dtype}'"
2233
+ )
2234
+ return result
2235
+
2236
+ def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self:
2237
+ """
2238
+ Compute the quantiles of self for each quantile in `qs`.
2239
+
2240
+ Parameters
2241
+ ----------
2242
+ qs : np.ndarray[float64]
2243
+ interpolation: str
2244
+
2245
+ Returns
2246
+ -------
2247
+ same type as self
2248
+ """
2249
+ mask = np.asarray(self.isna())
2250
+ arr = np.asarray(self)
2251
+ fill_value = np.nan
2252
+
2253
+ res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation)
2254
+ return type(self)._from_sequence(res_values)
2255
+
2256
+ def _mode(self, dropna: bool = True) -> Self:
2257
+ """
2258
+ Returns the mode(s) of the ExtensionArray.
2259
+
2260
+ Always returns `ExtensionArray` even if only one value.
2261
+
2262
+ Parameters
2263
+ ----------
2264
+ dropna : bool, default True
2265
+ Don't consider counts of NA values.
2266
+
2267
+ Returns
2268
+ -------
2269
+ same type as self
2270
+ Sorted, if possible.
2271
+ """
2272
+ # error: Incompatible return value type (got "Union[ExtensionArray,
2273
+ # ndarray[Any, Any]]", expected "Self")
2274
+ return mode(self, dropna=dropna) # type: ignore[return-value]
2275
+
2276
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
2277
+ if any(
2278
+ isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)) for other in inputs
2279
+ ):
2280
+ return NotImplemented
2281
+
2282
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
2283
+ self, ufunc, method, *inputs, **kwargs
2284
+ )
2285
+ if result is not NotImplemented:
2286
+ return result
2287
+
2288
+ if "out" in kwargs:
2289
+ return arraylike.dispatch_ufunc_with_out(
2290
+ self, ufunc, method, *inputs, **kwargs
2291
+ )
2292
+
2293
+ if method == "reduce":
2294
+ result = arraylike.dispatch_reduction_ufunc(
2295
+ self, ufunc, method, *inputs, **kwargs
2296
+ )
2297
+ if result is not NotImplemented:
2298
+ return result
2299
+
2300
+ return arraylike.default_array_ufunc(self, ufunc, method, *inputs, **kwargs)
2301
+
2302
+ def map(self, mapper, na_action=None):
2303
+ """
2304
+ Map values using an input mapping or function.
2305
+
2306
+ Parameters
2307
+ ----------
2308
+ mapper : function, dict, or Series
2309
+ Mapping correspondence.
2310
+ na_action : {None, 'ignore'}, default None
2311
+ If 'ignore', propagate NA values, without passing them to the
2312
+ mapping correspondence. If 'ignore' is not supported, a
2313
+ ``NotImplementedError`` should be raised.
2314
+
2315
+ Returns
2316
+ -------
2317
+ Union[ndarray, Index, ExtensionArray]
2318
+ The output of the mapping function applied to the array.
2319
+ If the function returns a tuple with more than one element
2320
+ a MultiIndex will be returned.
2321
+ """
2322
+ return map_array(self, mapper, na_action=na_action)
2323
+
2324
+ # ------------------------------------------------------------------------
2325
+ # GroupBy Methods
2326
+
2327
+ def _groupby_op(
2328
+ self,
2329
+ *,
2330
+ how: str,
2331
+ has_dropped_na: bool,
2332
+ min_count: int,
2333
+ ngroups: int,
2334
+ ids: npt.NDArray[np.intp],
2335
+ **kwargs,
2336
+ ) -> ArrayLike:
2337
+ """
2338
+ Dispatch GroupBy reduction or transformation operation.
2339
+
2340
+ This is an *experimental* API to allow ExtensionArray authors to implement
2341
+ reductions and transformations. The API is subject to change.
2342
+
2343
+ Parameters
2344
+ ----------
2345
+ how : {'any', 'all', 'sum', 'prod', 'min', 'max', 'mean', 'median',
2346
+ 'median', 'var', 'std', 'sem', 'nth', 'last', 'ohlc',
2347
+ 'cumprod', 'cumsum', 'cummin', 'cummax', 'rank'}
2348
+ has_dropped_na : bool
2349
+ min_count : int
2350
+ ngroups : int
2351
+ ids : np.ndarray[np.intp]
2352
+ ids[i] gives the integer label for the group that self[i] belongs to.
2353
+ **kwargs : operation-specific
2354
+ 'any', 'all' -> ['skipna']
2355
+ 'var', 'std', 'sem' -> ['ddof']
2356
+ 'cumprod', 'cumsum', 'cummin', 'cummax' -> ['skipna']
2357
+ 'rank' -> ['ties_method', 'ascending', 'na_option', 'pct']
2358
+
2359
+ Returns
2360
+ -------
2361
+ np.ndarray or ExtensionArray
2362
+ """
2363
+ from pandas.core.arrays.string_ import StringDtype
2364
+ from pandas.core.groupby.ops import WrappedCythonOp
2365
+
2366
+ kind = WrappedCythonOp.get_kind_from_how(how)
2367
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
2368
+
2369
+ # GH#43682
2370
+ if isinstance(self.dtype, StringDtype):
2371
+ # StringArray
2372
+ if op.how not in ["any", "all"]:
2373
+ # Fail early to avoid conversion to object
2374
+ op._get_cython_function(op.kind, op.how, np.dtype(object), False)
2375
+ npvalues = self.to_numpy(object, na_value=np.nan)
2376
+ else:
2377
+ raise NotImplementedError(
2378
+ f"function is not implemented for this dtype: {self.dtype}"
2379
+ )
2380
+
2381
+ res_values = op._cython_op_ndim_compat(
2382
+ npvalues,
2383
+ min_count=min_count,
2384
+ ngroups=ngroups,
2385
+ comp_ids=ids,
2386
+ mask=None,
2387
+ **kwargs,
2388
+ )
2389
+
2390
+ if op.how in op.cast_blocklist:
2391
+ # i.e. how in ["rank"], since other cast_blocklist methods don't go
2392
+ # through cython_operation
2393
+ return res_values
2394
+
2395
+ if isinstance(self.dtype, StringDtype):
2396
+ dtype = self.dtype
2397
+ string_array_cls = dtype.construct_array_type()
2398
+ return string_array_cls._from_sequence(res_values, dtype=dtype)
2399
+
2400
+ else:
2401
+ raise NotImplementedError
2402
+
2403
+
2404
+ class ExtensionArraySupportsAnyAll(ExtensionArray):
2405
+ def any(self, *, skipna: bool = True) -> bool:
2406
+ raise AbstractMethodError(self)
2407
+
2408
+ def all(self, *, skipna: bool = True) -> bool:
2409
+ raise AbstractMethodError(self)
2410
+
2411
+
2412
+ class ExtensionOpsMixin:
2413
+ """
2414
+ A base class for linking the operators to their dunder names.
2415
+
2416
+ .. note::
2417
+
2418
+ You may want to set ``__array_priority__`` if you want your
2419
+ implementation to be called when involved in binary operations
2420
+ with NumPy arrays.
2421
+ """
2422
+
2423
+ @classmethod
2424
+ def _create_arithmetic_method(cls, op):
2425
+ raise AbstractMethodError(cls)
2426
+
2427
+ @classmethod
2428
+ def _add_arithmetic_ops(cls) -> None:
2429
+ setattr(cls, "__add__", cls._create_arithmetic_method(operator.add))
2430
+ setattr(cls, "__radd__", cls._create_arithmetic_method(roperator.radd))
2431
+ setattr(cls, "__sub__", cls._create_arithmetic_method(operator.sub))
2432
+ setattr(cls, "__rsub__", cls._create_arithmetic_method(roperator.rsub))
2433
+ setattr(cls, "__mul__", cls._create_arithmetic_method(operator.mul))
2434
+ setattr(cls, "__rmul__", cls._create_arithmetic_method(roperator.rmul))
2435
+ setattr(cls, "__pow__", cls._create_arithmetic_method(operator.pow))
2436
+ setattr(cls, "__rpow__", cls._create_arithmetic_method(roperator.rpow))
2437
+ setattr(cls, "__mod__", cls._create_arithmetic_method(operator.mod))
2438
+ setattr(cls, "__rmod__", cls._create_arithmetic_method(roperator.rmod))
2439
+ setattr(cls, "__floordiv__", cls._create_arithmetic_method(operator.floordiv))
2440
+ setattr(
2441
+ cls, "__rfloordiv__", cls._create_arithmetic_method(roperator.rfloordiv)
2442
+ )
2443
+ setattr(cls, "__truediv__", cls._create_arithmetic_method(operator.truediv))
2444
+ setattr(cls, "__rtruediv__", cls._create_arithmetic_method(roperator.rtruediv))
2445
+ setattr(cls, "__divmod__", cls._create_arithmetic_method(divmod))
2446
+ setattr(cls, "__rdivmod__", cls._create_arithmetic_method(roperator.rdivmod))
2447
+
2448
+ @classmethod
2449
+ def _create_comparison_method(cls, op):
2450
+ raise AbstractMethodError(cls)
2451
+
2452
+ @classmethod
2453
+ def _add_comparison_ops(cls) -> None:
2454
+ setattr(cls, "__eq__", cls._create_comparison_method(operator.eq))
2455
+ setattr(cls, "__ne__", cls._create_comparison_method(operator.ne))
2456
+ setattr(cls, "__lt__", cls._create_comparison_method(operator.lt))
2457
+ setattr(cls, "__gt__", cls._create_comparison_method(operator.gt))
2458
+ setattr(cls, "__le__", cls._create_comparison_method(operator.le))
2459
+ setattr(cls, "__ge__", cls._create_comparison_method(operator.ge))
2460
+
2461
+ @classmethod
2462
+ def _create_logical_method(cls, op):
2463
+ raise AbstractMethodError(cls)
2464
+
2465
+ @classmethod
2466
+ def _add_logical_ops(cls) -> None:
2467
+ setattr(cls, "__and__", cls._create_logical_method(operator.and_))
2468
+ setattr(cls, "__rand__", cls._create_logical_method(roperator.rand_))
2469
+ setattr(cls, "__or__", cls._create_logical_method(operator.or_))
2470
+ setattr(cls, "__ror__", cls._create_logical_method(roperator.ror_))
2471
+ setattr(cls, "__xor__", cls._create_logical_method(operator.xor))
2472
+ setattr(cls, "__rxor__", cls._create_logical_method(roperator.rxor))
2473
+
2474
+
2475
+ class ExtensionScalarOpsMixin(ExtensionOpsMixin):
2476
+ """
2477
+ A mixin for defining ops on an ExtensionArray.
2478
+
2479
+ It is assumed that the underlying scalar objects have the operators
2480
+ already defined.
2481
+
2482
+ Notes
2483
+ -----
2484
+ If you have defined a subclass MyExtensionArray(ExtensionArray), then
2485
+ use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to
2486
+ get the arithmetic operators. After the definition of MyExtensionArray,
2487
+ insert the lines
2488
+
2489
+ MyExtensionArray._add_arithmetic_ops()
2490
+ MyExtensionArray._add_comparison_ops()
2491
+
2492
+ to link the operators to your class.
2493
+
2494
+ .. note::
2495
+
2496
+ You may want to set ``__array_priority__`` if you want your
2497
+ implementation to be called when involved in binary operations
2498
+ with NumPy arrays.
2499
+ """
2500
+
2501
+ @classmethod
2502
+ def _create_method(cls, op, coerce_to_dtype: bool = True, result_dtype=None):
2503
+ """
2504
+ A class method that returns a method that will correspond to an
2505
+ operator for an ExtensionArray subclass, by dispatching to the
2506
+ relevant operator defined on the individual elements of the
2507
+ ExtensionArray.
2508
+
2509
+ Parameters
2510
+ ----------
2511
+ op : function
2512
+ An operator that takes arguments op(a, b)
2513
+ coerce_to_dtype : bool, default True
2514
+ boolean indicating whether to attempt to convert
2515
+ the result to the underlying ExtensionArray dtype.
2516
+ If it's not possible to create a new ExtensionArray with the
2517
+ values, an ndarray is returned instead.
2518
+
2519
+ Returns
2520
+ -------
2521
+ Callable[[Any, Any], Union[ndarray, ExtensionArray]]
2522
+ A method that can be bound to a class. When used, the method
2523
+ receives the two arguments, one of which is the instance of
2524
+ this class, and should return an ExtensionArray or an ndarray.
2525
+
2526
+ Returning an ndarray may be necessary when the result of the
2527
+ `op` cannot be stored in the ExtensionArray. The dtype of the
2528
+ ndarray uses NumPy's normal inference rules.
2529
+
2530
+ Examples
2531
+ --------
2532
+ Given an ExtensionArray subclass called MyExtensionArray, use
2533
+
2534
+ __add__ = cls._create_method(operator.add)
2535
+
2536
+ in the class definition of MyExtensionArray to create the operator
2537
+ for addition, that will be based on the operator implementation
2538
+ of the underlying elements of the ExtensionArray
2539
+ """
2540
+
2541
+ def _binop(self, other):
2542
+ def convert_values(param):
2543
+ if isinstance(param, ExtensionArray) or is_list_like(param):
2544
+ ovalues = param
2545
+ else: # Assume its an object
2546
+ ovalues = [param] * len(self)
2547
+ return ovalues
2548
+
2549
+ if isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)):
2550
+ # rely on pandas to unbox and dispatch to us
2551
+ return NotImplemented
2552
+
2553
+ lvalues = self
2554
+ rvalues = convert_values(other)
2555
+
2556
+ # If the operator is not defined for the underlying objects,
2557
+ # a TypeError should be raised
2558
+ res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
2559
+
2560
+ def _maybe_convert(arr):
2561
+ if coerce_to_dtype:
2562
+ # https://github.com/pandas-dev/pandas/issues/22850
2563
+ # We catch all regular exceptions here, and fall back
2564
+ # to an ndarray.
2565
+ res = maybe_cast_pointwise_result(arr, self.dtype, same_dtype=False)
2566
+ if not isinstance(res, type(self)):
2567
+ # exception raised in _from_sequence; ensure we have ndarray
2568
+ res = np.asarray(arr)
2569
+ else:
2570
+ res = np.asarray(arr, dtype=result_dtype)
2571
+ return res
2572
+
2573
+ if op.__name__ in {"divmod", "rdivmod"}:
2574
+ a, b = zip(*res)
2575
+ return _maybe_convert(a), _maybe_convert(b)
2576
+
2577
+ return _maybe_convert(res)
2578
+
2579
+ op_name = f"__{op.__name__}__"
2580
+ return set_function_name(_binop, op_name, cls)
2581
+
2582
+ @classmethod
2583
+ def _create_arithmetic_method(cls, op):
2584
+ return cls._create_method(op)
2585
+
2586
+ @classmethod
2587
+ def _create_comparison_method(cls, op):
2588
+ return cls._create_method(op, coerce_to_dtype=False, result_dtype=bool)
videollama2/lib/python3.10/site-packages/pandas/core/arrays/boolean.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import numbers
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ ClassVar,
7
+ cast,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import (
13
+ lib,
14
+ missing as libmissing,
15
+ )
16
+
17
+ from pandas.core.dtypes.common import is_list_like
18
+ from pandas.core.dtypes.dtypes import register_extension_dtype
19
+ from pandas.core.dtypes.missing import isna
20
+
21
+ from pandas.core import ops
22
+ from pandas.core.array_algos import masked_accumulations
23
+ from pandas.core.arrays.masked import (
24
+ BaseMaskedArray,
25
+ BaseMaskedDtype,
26
+ )
27
+
28
+ if TYPE_CHECKING:
29
+ import pyarrow
30
+
31
+ from pandas._typing import (
32
+ Dtype,
33
+ DtypeObj,
34
+ Self,
35
+ npt,
36
+ type_t,
37
+ )
38
+
39
+
40
+ @register_extension_dtype
41
+ class BooleanDtype(BaseMaskedDtype):
42
+ """
43
+ Extension dtype for boolean data.
44
+
45
+ .. warning::
46
+
47
+ BooleanDtype is considered experimental. The implementation and
48
+ parts of the API may change without warning.
49
+
50
+ Attributes
51
+ ----------
52
+ None
53
+
54
+ Methods
55
+ -------
56
+ None
57
+
58
+ Examples
59
+ --------
60
+ >>> pd.BooleanDtype()
61
+ BooleanDtype
62
+ """
63
+
64
+ name: ClassVar[str] = "boolean"
65
+
66
+ # https://github.com/python/mypy/issues/4125
67
+ # error: Signature of "type" incompatible with supertype "BaseMaskedDtype"
68
+ @property
69
+ def type(self) -> type: # type: ignore[override]
70
+ return np.bool_
71
+
72
+ @property
73
+ def kind(self) -> str:
74
+ return "b"
75
+
76
+ @property
77
+ def numpy_dtype(self) -> np.dtype:
78
+ return np.dtype("bool")
79
+
80
+ @classmethod
81
+ def construct_array_type(cls) -> type_t[BooleanArray]:
82
+ """
83
+ Return the array type associated with this dtype.
84
+
85
+ Returns
86
+ -------
87
+ type
88
+ """
89
+ return BooleanArray
90
+
91
+ def __repr__(self) -> str:
92
+ return "BooleanDtype"
93
+
94
+ @property
95
+ def _is_boolean(self) -> bool:
96
+ return True
97
+
98
+ @property
99
+ def _is_numeric(self) -> bool:
100
+ return True
101
+
102
+ def __from_arrow__(
103
+ self, array: pyarrow.Array | pyarrow.ChunkedArray
104
+ ) -> BooleanArray:
105
+ """
106
+ Construct BooleanArray from pyarrow Array/ChunkedArray.
107
+ """
108
+ import pyarrow
109
+
110
+ if array.type != pyarrow.bool_() and not pyarrow.types.is_null(array.type):
111
+ raise TypeError(f"Expected array of boolean type, got {array.type} instead")
112
+
113
+ if isinstance(array, pyarrow.Array):
114
+ chunks = [array]
115
+ length = len(array)
116
+ else:
117
+ # pyarrow.ChunkedArray
118
+ chunks = array.chunks
119
+ length = array.length()
120
+
121
+ if pyarrow.types.is_null(array.type):
122
+ mask = np.ones(length, dtype=bool)
123
+ # No need to init data, since all null
124
+ data = np.empty(length, dtype=bool)
125
+ return BooleanArray(data, mask)
126
+
127
+ results = []
128
+ for arr in chunks:
129
+ buflist = arr.buffers()
130
+ data = pyarrow.BooleanArray.from_buffers(
131
+ arr.type, len(arr), [None, buflist[1]], offset=arr.offset
132
+ ).to_numpy(zero_copy_only=False)
133
+ if arr.null_count != 0:
134
+ mask = pyarrow.BooleanArray.from_buffers(
135
+ arr.type, len(arr), [None, buflist[0]], offset=arr.offset
136
+ ).to_numpy(zero_copy_only=False)
137
+ mask = ~mask
138
+ else:
139
+ mask = np.zeros(len(arr), dtype=bool)
140
+
141
+ bool_arr = BooleanArray(data, mask)
142
+ results.append(bool_arr)
143
+
144
+ if not results:
145
+ return BooleanArray(
146
+ np.array([], dtype=np.bool_), np.array([], dtype=np.bool_)
147
+ )
148
+ else:
149
+ return BooleanArray._concat_same_type(results)
150
+
151
+
152
+ def coerce_to_array(
153
+ values, mask=None, copy: bool = False
154
+ ) -> tuple[np.ndarray, np.ndarray]:
155
+ """
156
+ Coerce the input values array to numpy arrays with a mask.
157
+
158
+ Parameters
159
+ ----------
160
+ values : 1D list-like
161
+ mask : bool 1D array, optional
162
+ copy : bool, default False
163
+ if True, copy the input
164
+
165
+ Returns
166
+ -------
167
+ tuple of (values, mask)
168
+ """
169
+ if isinstance(values, BooleanArray):
170
+ if mask is not None:
171
+ raise ValueError("cannot pass mask for BooleanArray input")
172
+ values, mask = values._data, values._mask
173
+ if copy:
174
+ values = values.copy()
175
+ mask = mask.copy()
176
+ return values, mask
177
+
178
+ mask_values = None
179
+ if isinstance(values, np.ndarray) and values.dtype == np.bool_:
180
+ if copy:
181
+ values = values.copy()
182
+ elif isinstance(values, np.ndarray) and values.dtype.kind in "iufcb":
183
+ mask_values = isna(values)
184
+
185
+ values_bool = np.zeros(len(values), dtype=bool)
186
+ values_bool[~mask_values] = values[~mask_values].astype(bool)
187
+
188
+ if not np.all(
189
+ values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
190
+ ):
191
+ raise TypeError("Need to pass bool-like values")
192
+
193
+ values = values_bool
194
+ else:
195
+ values_object = np.asarray(values, dtype=object)
196
+
197
+ inferred_dtype = lib.infer_dtype(values_object, skipna=True)
198
+ integer_like = ("floating", "integer", "mixed-integer-float")
199
+ if inferred_dtype not in ("boolean", "empty") + integer_like:
200
+ raise TypeError("Need to pass bool-like values")
201
+
202
+ # mypy does not narrow the type of mask_values to npt.NDArray[np.bool_]
203
+ # within this branch, it assumes it can also be None
204
+ mask_values = cast("npt.NDArray[np.bool_]", isna(values_object))
205
+ values = np.zeros(len(values), dtype=bool)
206
+ values[~mask_values] = values_object[~mask_values].astype(bool)
207
+
208
+ # if the values were integer-like, validate it were actually 0/1's
209
+ if (inferred_dtype in integer_like) and not (
210
+ np.all(
211
+ values[~mask_values].astype(float)
212
+ == values_object[~mask_values].astype(float)
213
+ )
214
+ ):
215
+ raise TypeError("Need to pass bool-like values")
216
+
217
+ if mask is None and mask_values is None:
218
+ mask = np.zeros(values.shape, dtype=bool)
219
+ elif mask is None:
220
+ mask = mask_values
221
+ else:
222
+ if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
223
+ if mask_values is not None:
224
+ mask = mask | mask_values
225
+ else:
226
+ if copy:
227
+ mask = mask.copy()
228
+ else:
229
+ mask = np.array(mask, dtype=bool)
230
+ if mask_values is not None:
231
+ mask = mask | mask_values
232
+
233
+ if values.shape != mask.shape:
234
+ raise ValueError("values.shape and mask.shape must match")
235
+
236
+ return values, mask
237
+
238
+
239
+ class BooleanArray(BaseMaskedArray):
240
+ """
241
+ Array of boolean (True/False) data with missing values.
242
+
243
+ This is a pandas Extension array for boolean data, under the hood
244
+ represented by 2 numpy arrays: a boolean array with the data and
245
+ a boolean array with the mask (True indicating missing).
246
+
247
+ BooleanArray implements Kleene logic (sometimes called three-value
248
+ logic) for logical operations. See :ref:`boolean.kleene` for more.
249
+
250
+ To construct an BooleanArray from generic array-like input, use
251
+ :func:`pandas.array` specifying ``dtype="boolean"`` (see examples
252
+ below).
253
+
254
+ .. warning::
255
+
256
+ BooleanArray is considered experimental. The implementation and
257
+ parts of the API may change without warning.
258
+
259
+ Parameters
260
+ ----------
261
+ values : numpy.ndarray
262
+ A 1-d boolean-dtype array with the data.
263
+ mask : numpy.ndarray
264
+ A 1-d boolean-dtype array indicating missing values (True
265
+ indicates missing).
266
+ copy : bool, default False
267
+ Whether to copy the `values` and `mask` arrays.
268
+
269
+ Attributes
270
+ ----------
271
+ None
272
+
273
+ Methods
274
+ -------
275
+ None
276
+
277
+ Returns
278
+ -------
279
+ BooleanArray
280
+
281
+ Examples
282
+ --------
283
+ Create an BooleanArray with :func:`pandas.array`:
284
+
285
+ >>> pd.array([True, False, None], dtype="boolean")
286
+ <BooleanArray>
287
+ [True, False, <NA>]
288
+ Length: 3, dtype: boolean
289
+ """
290
+
291
+ # The value used to fill '_data' to avoid upcasting
292
+ _internal_fill_value = False
293
+ # Fill values used for any/all
294
+ # Incompatible types in assignment (expression has type "bool", base class
295
+ # "BaseMaskedArray" defined the type as "<typing special form>")
296
+ _truthy_value = True # type: ignore[assignment]
297
+ _falsey_value = False # type: ignore[assignment]
298
+ _TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
299
+ _FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
300
+
301
+ @classmethod
302
+ def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self:
303
+ result = super()._simple_new(values, mask)
304
+ result._dtype = BooleanDtype()
305
+ return result
306
+
307
+ def __init__(
308
+ self, values: np.ndarray, mask: np.ndarray, copy: bool = False
309
+ ) -> None:
310
+ if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
311
+ raise TypeError(
312
+ "values should be boolean numpy array. Use "
313
+ "the 'pd.array' function instead"
314
+ )
315
+ self._dtype = BooleanDtype()
316
+ super().__init__(values, mask, copy=copy)
317
+
318
+ @property
319
+ def dtype(self) -> BooleanDtype:
320
+ return self._dtype
321
+
322
+ @classmethod
323
+ def _from_sequence_of_strings(
324
+ cls,
325
+ strings: list[str],
326
+ *,
327
+ dtype: Dtype | None = None,
328
+ copy: bool = False,
329
+ true_values: list[str] | None = None,
330
+ false_values: list[str] | None = None,
331
+ ) -> BooleanArray:
332
+ true_values_union = cls._TRUE_VALUES.union(true_values or [])
333
+ false_values_union = cls._FALSE_VALUES.union(false_values or [])
334
+
335
+ def map_string(s) -> bool:
336
+ if s in true_values_union:
337
+ return True
338
+ elif s in false_values_union:
339
+ return False
340
+ else:
341
+ raise ValueError(f"{s} cannot be cast to bool")
342
+
343
+ scalars = np.array(strings, dtype=object)
344
+ mask = isna(scalars)
345
+ scalars[~mask] = list(map(map_string, scalars[~mask]))
346
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
347
+
348
+ _HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
349
+
350
+ @classmethod
351
+ def _coerce_to_array(
352
+ cls, value, *, dtype: DtypeObj, copy: bool = False
353
+ ) -> tuple[np.ndarray, np.ndarray]:
354
+ if dtype:
355
+ assert dtype == "boolean"
356
+ return coerce_to_array(value, copy=copy)
357
+
358
+ def _logical_method(self, other, op):
359
+ assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
360
+ other_is_scalar = lib.is_scalar(other)
361
+ mask = None
362
+
363
+ if isinstance(other, BooleanArray):
364
+ other, mask = other._data, other._mask
365
+ elif is_list_like(other):
366
+ other = np.asarray(other, dtype="bool")
367
+ if other.ndim > 1:
368
+ raise NotImplementedError("can only perform ops with 1-d structures")
369
+ other, mask = coerce_to_array(other, copy=False)
370
+ elif isinstance(other, np.bool_):
371
+ other = other.item()
372
+
373
+ if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
374
+ raise TypeError(
375
+ "'other' should be pandas.NA or a bool. "
376
+ f"Got {type(other).__name__} instead."
377
+ )
378
+
379
+ if not other_is_scalar and len(self) != len(other):
380
+ raise ValueError("Lengths must match")
381
+
382
+ if op.__name__ in {"or_", "ror_"}:
383
+ result, mask = ops.kleene_or(self._data, other, self._mask, mask)
384
+ elif op.__name__ in {"and_", "rand_"}:
385
+ result, mask = ops.kleene_and(self._data, other, self._mask, mask)
386
+ else:
387
+ # i.e. xor, rxor
388
+ result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
389
+
390
+ # i.e. BooleanArray
391
+ return self._maybe_mask_result(result, mask)
392
+
393
+ def _accumulate(
394
+ self, name: str, *, skipna: bool = True, **kwargs
395
+ ) -> BaseMaskedArray:
396
+ data = self._data
397
+ mask = self._mask
398
+ if name in ("cummin", "cummax"):
399
+ op = getattr(masked_accumulations, name)
400
+ data, mask = op(data, mask, skipna=skipna, **kwargs)
401
+ return self._simple_new(data, mask)
402
+ else:
403
+ from pandas.core.arrays import IntegerArray
404
+
405
+ return IntegerArray(data.astype(int), mask)._accumulate(
406
+ name, skipna=skipna, **kwargs
407
+ )
videollama2/lib/python3.10/site-packages/pandas/core/arrays/categorical.py ADDED
The diff for this file is too large to render. See raw diff
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/floating.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import ClassVar
4
+
5
+ import numpy as np
6
+
7
+ from pandas.core.dtypes.base import register_extension_dtype
8
+ from pandas.core.dtypes.common import is_float_dtype
9
+
10
+ from pandas.core.arrays.numeric import (
11
+ NumericArray,
12
+ NumericDtype,
13
+ )
14
+
15
+
16
+ class FloatingDtype(NumericDtype):
17
+ """
18
+ An ExtensionDtype to hold a single size of floating dtype.
19
+
20
+ These specific implementations are subclasses of the non-public
21
+ FloatingDtype. For example we have Float32Dtype to represent float32.
22
+
23
+ The attributes name & type are set when these subclasses are created.
24
+ """
25
+
26
+ _default_np_dtype = np.dtype(np.float64)
27
+ _checker = is_float_dtype
28
+
29
+ @classmethod
30
+ def construct_array_type(cls) -> type[FloatingArray]:
31
+ """
32
+ Return the array type associated with this dtype.
33
+
34
+ Returns
35
+ -------
36
+ type
37
+ """
38
+ return FloatingArray
39
+
40
+ @classmethod
41
+ def _get_dtype_mapping(cls) -> dict[np.dtype, FloatingDtype]:
42
+ return NUMPY_FLOAT_TO_DTYPE
43
+
44
+ @classmethod
45
+ def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
46
+ """
47
+ Safely cast the values to the given dtype.
48
+
49
+ "safe" in this context means the casting is lossless.
50
+ """
51
+ # This is really only here for compatibility with IntegerDtype
52
+ # Here for compat with IntegerDtype
53
+ return values.astype(dtype, copy=copy)
54
+
55
+
56
+ class FloatingArray(NumericArray):
57
+ """
58
+ Array of floating (optional missing) values.
59
+
60
+ .. warning::
61
+
62
+ FloatingArray is currently experimental, and its API or internal
63
+ implementation may change without warning. Especially the behaviour
64
+ regarding NaN (distinct from NA missing values) is subject to change.
65
+
66
+ We represent a FloatingArray with 2 numpy arrays:
67
+
68
+ - data: contains a numpy float array of the appropriate dtype
69
+ - mask: a boolean array holding a mask on the data, True is missing
70
+
71
+ To construct an FloatingArray from generic array-like input, use
72
+ :func:`pandas.array` with one of the float dtypes (see examples).
73
+
74
+ See :ref:`integer_na` for more.
75
+
76
+ Parameters
77
+ ----------
78
+ values : numpy.ndarray
79
+ A 1-d float-dtype array.
80
+ mask : numpy.ndarray
81
+ A 1-d boolean-dtype array indicating missing values.
82
+ copy : bool, default False
83
+ Whether to copy the `values` and `mask`.
84
+
85
+ Attributes
86
+ ----------
87
+ None
88
+
89
+ Methods
90
+ -------
91
+ None
92
+
93
+ Returns
94
+ -------
95
+ FloatingArray
96
+
97
+ Examples
98
+ --------
99
+ Create an FloatingArray with :func:`pandas.array`:
100
+
101
+ >>> pd.array([0.1, None, 0.3], dtype=pd.Float32Dtype())
102
+ <FloatingArray>
103
+ [0.1, <NA>, 0.3]
104
+ Length: 3, dtype: Float32
105
+
106
+ String aliases for the dtypes are also available. They are capitalized.
107
+
108
+ >>> pd.array([0.1, None, 0.3], dtype="Float32")
109
+ <FloatingArray>
110
+ [0.1, <NA>, 0.3]
111
+ Length: 3, dtype: Float32
112
+ """
113
+
114
+ _dtype_cls = FloatingDtype
115
+
116
+ # The value used to fill '_data' to avoid upcasting
117
+ _internal_fill_value = np.nan
118
+ # Fill values used for any/all
119
+ # Incompatible types in assignment (expression has type "float", base class
120
+ # "BaseMaskedArray" defined the type as "<typing special form>")
121
+ _truthy_value = 1.0 # type: ignore[assignment]
122
+ _falsey_value = 0.0 # type: ignore[assignment]
123
+
124
+
125
+ _dtype_docstring = """
126
+ An ExtensionDtype for {dtype} data.
127
+
128
+ This dtype uses ``pd.NA`` as missing value indicator.
129
+
130
+ Attributes
131
+ ----------
132
+ None
133
+
134
+ Methods
135
+ -------
136
+ None
137
+
138
+ Examples
139
+ --------
140
+ For Float32Dtype:
141
+
142
+ >>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float32Dtype())
143
+ >>> ser.dtype
144
+ Float32Dtype()
145
+
146
+ For Float64Dtype:
147
+
148
+ >>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float64Dtype())
149
+ >>> ser.dtype
150
+ Float64Dtype()
151
+ """
152
+
153
+ # create the Dtype
154
+
155
+
156
+ @register_extension_dtype
157
+ class Float32Dtype(FloatingDtype):
158
+ type = np.float32
159
+ name: ClassVar[str] = "Float32"
160
+ __doc__ = _dtype_docstring.format(dtype="float32")
161
+
162
+
163
+ @register_extension_dtype
164
+ class Float64Dtype(FloatingDtype):
165
+ type = np.float64
166
+ name: ClassVar[str] = "Float64"
167
+ __doc__ = _dtype_docstring.format(dtype="float64")
168
+
169
+
170
+ NUMPY_FLOAT_TO_DTYPE: dict[np.dtype, FloatingDtype] = {
171
+ np.dtype(np.float32): Float32Dtype(),
172
+ np.dtype(np.float64): Float64Dtype(),
173
+ }
videollama2/lib/python3.10/site-packages/pandas/core/arrays/numpy_.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Literal,
6
+ )
7
+
8
+ import numpy as np
9
+
10
+ from pandas._libs import lib
11
+ from pandas._libs.tslibs import is_supported_dtype
12
+ from pandas.compat.numpy import function as nv
13
+
14
+ from pandas.core.dtypes.astype import astype_array
15
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
16
+ from pandas.core.dtypes.common import pandas_dtype
17
+ from pandas.core.dtypes.dtypes import NumpyEADtype
18
+ from pandas.core.dtypes.missing import isna
19
+
20
+ from pandas.core import (
21
+ arraylike,
22
+ missing,
23
+ nanops,
24
+ ops,
25
+ )
26
+ from pandas.core.arraylike import OpsMixin
27
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
28
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
29
+ from pandas.core.strings.object_array import ObjectStringArrayMixin
30
+
31
+ if TYPE_CHECKING:
32
+ from pandas._typing import (
33
+ AxisInt,
34
+ Dtype,
35
+ FillnaOptions,
36
+ InterpolateOptions,
37
+ NpDtype,
38
+ Scalar,
39
+ Self,
40
+ npt,
41
+ )
42
+
43
+ from pandas import Index
44
+
45
+
46
+ # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
47
+ # incompatible with definition in base class "ExtensionArray"
48
+ class NumpyExtensionArray( # type: ignore[misc]
49
+ OpsMixin,
50
+ NDArrayBackedExtensionArray,
51
+ ObjectStringArrayMixin,
52
+ ):
53
+ """
54
+ A pandas ExtensionArray for NumPy data.
55
+
56
+ This is mostly for internal compatibility, and is not especially
57
+ useful on its own.
58
+
59
+ Parameters
60
+ ----------
61
+ values : ndarray
62
+ The NumPy ndarray to wrap. Must be 1-dimensional.
63
+ copy : bool, default False
64
+ Whether to copy `values`.
65
+
66
+ Attributes
67
+ ----------
68
+ None
69
+
70
+ Methods
71
+ -------
72
+ None
73
+
74
+ Examples
75
+ --------
76
+ >>> pd.arrays.NumpyExtensionArray(np.array([0, 1, 2, 3]))
77
+ <NumpyExtensionArray>
78
+ [0, 1, 2, 3]
79
+ Length: 4, dtype: int64
80
+ """
81
+
82
+ # If you're wondering why pd.Series(cls) doesn't put the array in an
83
+ # ExtensionBlock, search for `ABCNumpyExtensionArray`. We check for
84
+ # that _typ to ensure that users don't unnecessarily use EAs inside
85
+ # pandas internals, which turns off things like block consolidation.
86
+ _typ = "npy_extension"
87
+ __array_priority__ = 1000
88
+ _ndarray: np.ndarray
89
+ _dtype: NumpyEADtype
90
+ _internal_fill_value = np.nan
91
+
92
+ # ------------------------------------------------------------------------
93
+ # Constructors
94
+
95
+ def __init__(
96
+ self, values: np.ndarray | NumpyExtensionArray, copy: bool = False
97
+ ) -> None:
98
+ if isinstance(values, type(self)):
99
+ values = values._ndarray
100
+ if not isinstance(values, np.ndarray):
101
+ raise ValueError(
102
+ f"'values' must be a NumPy array, not {type(values).__name__}"
103
+ )
104
+
105
+ if values.ndim == 0:
106
+ # Technically we support 2, but do not advertise that fact.
107
+ raise ValueError("NumpyExtensionArray must be 1-dimensional.")
108
+
109
+ if copy:
110
+ values = values.copy()
111
+
112
+ dtype = NumpyEADtype(values.dtype)
113
+ super().__init__(values, dtype)
114
+
115
+ @classmethod
116
+ def _from_sequence(
117
+ cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
118
+ ) -> NumpyExtensionArray:
119
+ if isinstance(dtype, NumpyEADtype):
120
+ dtype = dtype._dtype
121
+
122
+ # error: Argument "dtype" to "asarray" has incompatible type
123
+ # "Union[ExtensionDtype, str, dtype[Any], dtype[floating[_64Bit]], Type[object],
124
+ # None]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
125
+ # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
126
+ # _DTypeDict, Tuple[Any, Any]]]"
127
+ result = np.asarray(scalars, dtype=dtype) # type: ignore[arg-type]
128
+ if (
129
+ result.ndim > 1
130
+ and not hasattr(scalars, "dtype")
131
+ and (dtype is None or dtype == object)
132
+ ):
133
+ # e.g. list-of-tuples
134
+ result = construct_1d_object_array_from_listlike(scalars)
135
+
136
+ if copy and result is scalars:
137
+ result = result.copy()
138
+ return cls(result)
139
+
140
+ def _from_backing_data(self, arr: np.ndarray) -> NumpyExtensionArray:
141
+ return type(self)(arr)
142
+
143
+ # ------------------------------------------------------------------------
144
+ # Data
145
+
146
+ @property
147
+ def dtype(self) -> NumpyEADtype:
148
+ return self._dtype
149
+
150
+ # ------------------------------------------------------------------------
151
+ # NumPy Array Interface
152
+
153
+ def __array__(
154
+ self, dtype: NpDtype | None = None, copy: bool | None = None
155
+ ) -> np.ndarray:
156
+ return np.asarray(self._ndarray, dtype=dtype)
157
+
158
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
159
+ # Lightly modified version of
160
+ # https://numpy.org/doc/stable/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html
161
+ # The primary modification is not boxing scalar return values
162
+ # in NumpyExtensionArray, since pandas' ExtensionArrays are 1-d.
163
+ out = kwargs.get("out", ())
164
+
165
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
166
+ self, ufunc, method, *inputs, **kwargs
167
+ )
168
+ if result is not NotImplemented:
169
+ return result
170
+
171
+ if "out" in kwargs:
172
+ # e.g. test_ufunc_unary
173
+ return arraylike.dispatch_ufunc_with_out(
174
+ self, ufunc, method, *inputs, **kwargs
175
+ )
176
+
177
+ if method == "reduce":
178
+ result = arraylike.dispatch_reduction_ufunc(
179
+ self, ufunc, method, *inputs, **kwargs
180
+ )
181
+ if result is not NotImplemented:
182
+ # e.g. tests.series.test_ufunc.TestNumpyReductions
183
+ return result
184
+
185
+ # Defer to the implementation of the ufunc on unwrapped values.
186
+ inputs = tuple(
187
+ x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in inputs
188
+ )
189
+ if out:
190
+ kwargs["out"] = tuple(
191
+ x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in out
192
+ )
193
+ result = getattr(ufunc, method)(*inputs, **kwargs)
194
+
195
+ if ufunc.nout > 1:
196
+ # multiple return values; re-box array-like results
197
+ return tuple(type(self)(x) for x in result)
198
+ elif method == "at":
199
+ # no return value
200
+ return None
201
+ elif method == "reduce":
202
+ if isinstance(result, np.ndarray):
203
+ # e.g. test_np_reduce_2d
204
+ return type(self)(result)
205
+
206
+ # e.g. test_np_max_nested_tuples
207
+ return result
208
+ else:
209
+ # one return value; re-box array-like results
210
+ return type(self)(result)
211
+
212
+ # ------------------------------------------------------------------------
213
+ # Pandas ExtensionArray Interface
214
+
215
+ def astype(self, dtype, copy: bool = True):
216
+ dtype = pandas_dtype(dtype)
217
+
218
+ if dtype == self.dtype:
219
+ if copy:
220
+ return self.copy()
221
+ return self
222
+
223
+ result = astype_array(self._ndarray, dtype=dtype, copy=copy)
224
+ return result
225
+
226
+ def isna(self) -> np.ndarray:
227
+ return isna(self._ndarray)
228
+
229
+ def _validate_scalar(self, fill_value):
230
+ if fill_value is None:
231
+ # Primarily for subclasses
232
+ fill_value = self.dtype.na_value
233
+ return fill_value
234
+
235
+ def _values_for_factorize(self) -> tuple[np.ndarray, float | None]:
236
+ if self.dtype.kind in "iub":
237
+ fv = None
238
+ else:
239
+ fv = np.nan
240
+ return self._ndarray, fv
241
+
242
+ # Base EA class (and all other EA classes) don't have limit_area keyword
243
+ # This can be removed here as well when the interpolate ffill/bfill method
244
+ # deprecation is enforced
245
+ def _pad_or_backfill(
246
+ self,
247
+ *,
248
+ method: FillnaOptions,
249
+ limit: int | None = None,
250
+ limit_area: Literal["inside", "outside"] | None = None,
251
+ copy: bool = True,
252
+ ) -> Self:
253
+ """
254
+ ffill or bfill along axis=0.
255
+ """
256
+ if copy:
257
+ out_data = self._ndarray.copy()
258
+ else:
259
+ out_data = self._ndarray
260
+
261
+ meth = missing.clean_fill_method(method)
262
+ missing.pad_or_backfill_inplace(
263
+ out_data.T,
264
+ method=meth,
265
+ axis=0,
266
+ limit=limit,
267
+ limit_area=limit_area,
268
+ )
269
+
270
+ if not copy:
271
+ return self
272
+ return type(self)._simple_new(out_data, dtype=self.dtype)
273
+
274
+ def interpolate(
275
+ self,
276
+ *,
277
+ method: InterpolateOptions,
278
+ axis: int,
279
+ index: Index,
280
+ limit,
281
+ limit_direction,
282
+ limit_area,
283
+ copy: bool,
284
+ **kwargs,
285
+ ) -> Self:
286
+ """
287
+ See NDFrame.interpolate.__doc__.
288
+ """
289
+ # NB: we return type(self) even if copy=False
290
+ if not copy:
291
+ out_data = self._ndarray
292
+ else:
293
+ out_data = self._ndarray.copy()
294
+
295
+ # TODO: assert we have floating dtype?
296
+ missing.interpolate_2d_inplace(
297
+ out_data,
298
+ method=method,
299
+ axis=axis,
300
+ index=index,
301
+ limit=limit,
302
+ limit_direction=limit_direction,
303
+ limit_area=limit_area,
304
+ **kwargs,
305
+ )
306
+ if not copy:
307
+ return self
308
+ return type(self)._simple_new(out_data, dtype=self.dtype)
309
+
310
+ # ------------------------------------------------------------------------
311
+ # Reductions
312
+
313
+ def any(
314
+ self,
315
+ *,
316
+ axis: AxisInt | None = None,
317
+ out=None,
318
+ keepdims: bool = False,
319
+ skipna: bool = True,
320
+ ):
321
+ nv.validate_any((), {"out": out, "keepdims": keepdims})
322
+ result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
323
+ return self._wrap_reduction_result(axis, result)
324
+
325
+ def all(
326
+ self,
327
+ *,
328
+ axis: AxisInt | None = None,
329
+ out=None,
330
+ keepdims: bool = False,
331
+ skipna: bool = True,
332
+ ):
333
+ nv.validate_all((), {"out": out, "keepdims": keepdims})
334
+ result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
335
+ return self._wrap_reduction_result(axis, result)
336
+
337
+ def min(
338
+ self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs
339
+ ) -> Scalar:
340
+ nv.validate_min((), kwargs)
341
+ result = nanops.nanmin(
342
+ values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
343
+ )
344
+ return self._wrap_reduction_result(axis, result)
345
+
346
+ def max(
347
+ self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs
348
+ ) -> Scalar:
349
+ nv.validate_max((), kwargs)
350
+ result = nanops.nanmax(
351
+ values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
352
+ )
353
+ return self._wrap_reduction_result(axis, result)
354
+
355
+ def sum(
356
+ self,
357
+ *,
358
+ axis: AxisInt | None = None,
359
+ skipna: bool = True,
360
+ min_count: int = 0,
361
+ **kwargs,
362
+ ) -> Scalar:
363
+ nv.validate_sum((), kwargs)
364
+ result = nanops.nansum(
365
+ self._ndarray, axis=axis, skipna=skipna, min_count=min_count
366
+ )
367
+ return self._wrap_reduction_result(axis, result)
368
+
369
+ def prod(
370
+ self,
371
+ *,
372
+ axis: AxisInt | None = None,
373
+ skipna: bool = True,
374
+ min_count: int = 0,
375
+ **kwargs,
376
+ ) -> Scalar:
377
+ nv.validate_prod((), kwargs)
378
+ result = nanops.nanprod(
379
+ self._ndarray, axis=axis, skipna=skipna, min_count=min_count
380
+ )
381
+ return self._wrap_reduction_result(axis, result)
382
+
383
+ def mean(
384
+ self,
385
+ *,
386
+ axis: AxisInt | None = None,
387
+ dtype: NpDtype | None = None,
388
+ out=None,
389
+ keepdims: bool = False,
390
+ skipna: bool = True,
391
+ ):
392
+ nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims})
393
+ result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
394
+ return self._wrap_reduction_result(axis, result)
395
+
396
+ def median(
397
+ self,
398
+ *,
399
+ axis: AxisInt | None = None,
400
+ out=None,
401
+ overwrite_input: bool = False,
402
+ keepdims: bool = False,
403
+ skipna: bool = True,
404
+ ):
405
+ nv.validate_median(
406
+ (), {"out": out, "overwrite_input": overwrite_input, "keepdims": keepdims}
407
+ )
408
+ result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
409
+ return self._wrap_reduction_result(axis, result)
410
+
411
+ def std(
412
+ self,
413
+ *,
414
+ axis: AxisInt | None = None,
415
+ dtype: NpDtype | None = None,
416
+ out=None,
417
+ ddof: int = 1,
418
+ keepdims: bool = False,
419
+ skipna: bool = True,
420
+ ):
421
+ nv.validate_stat_ddof_func(
422
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
423
+ )
424
+ result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
425
+ return self._wrap_reduction_result(axis, result)
426
+
427
+ def var(
428
+ self,
429
+ *,
430
+ axis: AxisInt | None = None,
431
+ dtype: NpDtype | None = None,
432
+ out=None,
433
+ ddof: int = 1,
434
+ keepdims: bool = False,
435
+ skipna: bool = True,
436
+ ):
437
+ nv.validate_stat_ddof_func(
438
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var"
439
+ )
440
+ result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
441
+ return self._wrap_reduction_result(axis, result)
442
+
443
+ def sem(
444
+ self,
445
+ *,
446
+ axis: AxisInt | None = None,
447
+ dtype: NpDtype | None = None,
448
+ out=None,
449
+ ddof: int = 1,
450
+ keepdims: bool = False,
451
+ skipna: bool = True,
452
+ ):
453
+ nv.validate_stat_ddof_func(
454
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem"
455
+ )
456
+ result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
457
+ return self._wrap_reduction_result(axis, result)
458
+
459
+ def kurt(
460
+ self,
461
+ *,
462
+ axis: AxisInt | None = None,
463
+ dtype: NpDtype | None = None,
464
+ out=None,
465
+ keepdims: bool = False,
466
+ skipna: bool = True,
467
+ ):
468
+ nv.validate_stat_ddof_func(
469
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt"
470
+ )
471
+ result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
472
+ return self._wrap_reduction_result(axis, result)
473
+
474
+ def skew(
475
+ self,
476
+ *,
477
+ axis: AxisInt | None = None,
478
+ dtype: NpDtype | None = None,
479
+ out=None,
480
+ keepdims: bool = False,
481
+ skipna: bool = True,
482
+ ):
483
+ nv.validate_stat_ddof_func(
484
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew"
485
+ )
486
+ result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
487
+ return self._wrap_reduction_result(axis, result)
488
+
489
+ # ------------------------------------------------------------------------
490
+ # Additional Methods
491
+
492
+ def to_numpy(
493
+ self,
494
+ dtype: npt.DTypeLike | None = None,
495
+ copy: bool = False,
496
+ na_value: object = lib.no_default,
497
+ ) -> np.ndarray:
498
+ mask = self.isna()
499
+ if na_value is not lib.no_default and mask.any():
500
+ result = self._ndarray.copy()
501
+ result[mask] = na_value
502
+ else:
503
+ result = self._ndarray
504
+
505
+ result = np.asarray(result, dtype=dtype)
506
+
507
+ if copy and result is self._ndarray:
508
+ result = result.copy()
509
+
510
+ return result
511
+
512
+ # ------------------------------------------------------------------------
513
+ # Ops
514
+
515
+ def __invert__(self) -> NumpyExtensionArray:
516
+ return type(self)(~self._ndarray)
517
+
518
+ def __neg__(self) -> NumpyExtensionArray:
519
+ return type(self)(-self._ndarray)
520
+
521
+ def __pos__(self) -> NumpyExtensionArray:
522
+ return type(self)(+self._ndarray)
523
+
524
+ def __abs__(self) -> NumpyExtensionArray:
525
+ return type(self)(abs(self._ndarray))
526
+
527
+ def _cmp_method(self, other, op):
528
+ if isinstance(other, NumpyExtensionArray):
529
+ other = other._ndarray
530
+
531
+ other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
532
+ pd_op = ops.get_array_op(op)
533
+ other = ensure_wrapped_if_datetimelike(other)
534
+ result = pd_op(self._ndarray, other)
535
+
536
+ if op is divmod or op is ops.rdivmod:
537
+ a, b = result
538
+ if isinstance(a, np.ndarray):
539
+ # for e.g. op vs TimedeltaArray, we may already
540
+ # have an ExtensionArray, in which case we do not wrap
541
+ return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b)
542
+ return a, b
543
+
544
+ if isinstance(result, np.ndarray):
545
+ # for e.g. multiplication vs TimedeltaArray, we may already
546
+ # have an ExtensionArray, in which case we do not wrap
547
+ return self._wrap_ndarray_result(result)
548
+ return result
549
+
550
+ _arith_method = _cmp_method
551
+
552
+ def _wrap_ndarray_result(self, result: np.ndarray):
553
+ # If we have timedelta64[ns] result, return a TimedeltaArray instead
554
+ # of a NumpyExtensionArray
555
+ if result.dtype.kind == "m" and is_supported_dtype(result.dtype):
556
+ from pandas.core.arrays import TimedeltaArray
557
+
558
+ return TimedeltaArray._simple_new(result, dtype=result.dtype)
559
+ return type(self)(result)
560
+
561
+ # ------------------------------------------------------------------------
562
+ # String methods interface
563
+ _str_na_value = np.nan
videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.arrays.sparse.accessor import (
2
+ SparseAccessor,
3
+ SparseFrameAccessor,
4
+ )
5
+ from pandas.core.arrays.sparse.array import (
6
+ BlockIndex,
7
+ IntIndex,
8
+ SparseArray,
9
+ make_sparse_index,
10
+ )
11
+
12
+ __all__ = [
13
+ "BlockIndex",
14
+ "IntIndex",
15
+ "make_sparse_index",
16
+ "SparseAccessor",
17
+ "SparseArray",
18
+ "SparseFrameAccessor",
19
+ ]
videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (471 Bytes). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc ADDED
Binary file (44.4 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc ADDED
Binary file (6.42 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Sparse accessor"""
2
+ from __future__ import annotations
3
+
4
+ from typing import TYPE_CHECKING
5
+
6
+ import numpy as np
7
+
8
+ from pandas.compat._optional import import_optional_dependency
9
+
10
+ from pandas.core.dtypes.cast import find_common_type
11
+ from pandas.core.dtypes.dtypes import SparseDtype
12
+
13
+ from pandas.core.accessor import (
14
+ PandasDelegate,
15
+ delegate_names,
16
+ )
17
+ from pandas.core.arrays.sparse.array import SparseArray
18
+
19
+ if TYPE_CHECKING:
20
+ from pandas import (
21
+ DataFrame,
22
+ Series,
23
+ )
24
+
25
+
26
+ class BaseAccessor:
27
+ _validation_msg = "Can only use the '.sparse' accessor with Sparse data."
28
+
29
+ def __init__(self, data=None) -> None:
30
+ self._parent = data
31
+ self._validate(data)
32
+
33
+ def _validate(self, data):
34
+ raise NotImplementedError
35
+
36
+
37
+ @delegate_names(
38
+ SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
39
+ )
40
+ class SparseAccessor(BaseAccessor, PandasDelegate):
41
+ """
42
+ Accessor for SparseSparse from other sparse matrix data types.
43
+
44
+ Examples
45
+ --------
46
+ >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]")
47
+ >>> ser.sparse.density
48
+ 0.6
49
+ >>> ser.sparse.sp_values
50
+ array([2, 2, 2])
51
+ """
52
+
53
+ def _validate(self, data):
54
+ if not isinstance(data.dtype, SparseDtype):
55
+ raise AttributeError(self._validation_msg)
56
+
57
+ def _delegate_property_get(self, name: str, *args, **kwargs):
58
+ return getattr(self._parent.array, name)
59
+
60
+ def _delegate_method(self, name: str, *args, **kwargs):
61
+ if name == "from_coo":
62
+ return self.from_coo(*args, **kwargs)
63
+ elif name == "to_coo":
64
+ return self.to_coo(*args, **kwargs)
65
+ else:
66
+ raise ValueError
67
+
68
+ @classmethod
69
+ def from_coo(cls, A, dense_index: bool = False) -> Series:
70
+ """
71
+ Create a Series with sparse values from a scipy.sparse.coo_matrix.
72
+
73
+ Parameters
74
+ ----------
75
+ A : scipy.sparse.coo_matrix
76
+ dense_index : bool, default False
77
+ If False (default), the index consists of only the
78
+ coords of the non-null entries of the original coo_matrix.
79
+ If True, the index consists of the full sorted
80
+ (row, col) coordinates of the coo_matrix.
81
+
82
+ Returns
83
+ -------
84
+ s : Series
85
+ A Series with sparse values.
86
+
87
+ Examples
88
+ --------
89
+ >>> from scipy import sparse
90
+
91
+ >>> A = sparse.coo_matrix(
92
+ ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
93
+ ... )
94
+ >>> A
95
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
96
+ with 3 stored elements in COOrdinate format>
97
+
98
+ >>> A.todense()
99
+ matrix([[0., 0., 1., 2.],
100
+ [3., 0., 0., 0.],
101
+ [0., 0., 0., 0.]])
102
+
103
+ >>> ss = pd.Series.sparse.from_coo(A)
104
+ >>> ss
105
+ 0 2 1.0
106
+ 3 2.0
107
+ 1 0 3.0
108
+ dtype: Sparse[float64, nan]
109
+ """
110
+ from pandas import Series
111
+ from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
112
+
113
+ result = coo_to_sparse_series(A, dense_index=dense_index)
114
+ result = Series(result.array, index=result.index, copy=False)
115
+
116
+ return result
117
+
118
+ def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False):
119
+ """
120
+ Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
121
+
122
+ Use row_levels and column_levels to determine the row and column
123
+ coordinates respectively. row_levels and column_levels are the names
124
+ (labels) or numbers of the levels. {row_levels, column_levels} must be
125
+ a partition of the MultiIndex level names (or numbers).
126
+
127
+ Parameters
128
+ ----------
129
+ row_levels : tuple/list
130
+ column_levels : tuple/list
131
+ sort_labels : bool, default False
132
+ Sort the row and column labels before forming the sparse matrix.
133
+ When `row_levels` and/or `column_levels` refer to a single level,
134
+ set to `True` for a faster execution.
135
+
136
+ Returns
137
+ -------
138
+ y : scipy.sparse.coo_matrix
139
+ rows : list (row labels)
140
+ columns : list (column labels)
141
+
142
+ Examples
143
+ --------
144
+ >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
145
+ >>> s.index = pd.MultiIndex.from_tuples(
146
+ ... [
147
+ ... (1, 2, "a", 0),
148
+ ... (1, 2, "a", 1),
149
+ ... (1, 1, "b", 0),
150
+ ... (1, 1, "b", 1),
151
+ ... (2, 1, "b", 0),
152
+ ... (2, 1, "b", 1)
153
+ ... ],
154
+ ... names=["A", "B", "C", "D"],
155
+ ... )
156
+ >>> s
157
+ A B C D
158
+ 1 2 a 0 3.0
159
+ 1 NaN
160
+ 1 b 0 1.0
161
+ 1 3.0
162
+ 2 1 b 0 NaN
163
+ 1 NaN
164
+ dtype: float64
165
+
166
+ >>> ss = s.astype("Sparse")
167
+ >>> ss
168
+ A B C D
169
+ 1 2 a 0 3.0
170
+ 1 NaN
171
+ 1 b 0 1.0
172
+ 1 3.0
173
+ 2 1 b 0 NaN
174
+ 1 NaN
175
+ dtype: Sparse[float64, nan]
176
+
177
+ >>> A, rows, columns = ss.sparse.to_coo(
178
+ ... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
179
+ ... )
180
+ >>> A
181
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
182
+ with 3 stored elements in COOrdinate format>
183
+ >>> A.todense()
184
+ matrix([[0., 0., 1., 3.],
185
+ [3., 0., 0., 0.],
186
+ [0., 0., 0., 0.]])
187
+
188
+ >>> rows
189
+ [(1, 1), (1, 2), (2, 1)]
190
+ >>> columns
191
+ [('a', 0), ('a', 1), ('b', 0), ('b', 1)]
192
+ """
193
+ from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
194
+
195
+ A, rows, columns = sparse_series_to_coo(
196
+ self._parent, row_levels, column_levels, sort_labels=sort_labels
197
+ )
198
+ return A, rows, columns
199
+
200
+ def to_dense(self) -> Series:
201
+ """
202
+ Convert a Series from sparse values to dense.
203
+
204
+ Returns
205
+ -------
206
+ Series:
207
+ A Series with the same values, stored as a dense array.
208
+
209
+ Examples
210
+ --------
211
+ >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
212
+ >>> series
213
+ 0 0
214
+ 1 1
215
+ 2 0
216
+ dtype: Sparse[int64, 0]
217
+
218
+ >>> series.sparse.to_dense()
219
+ 0 0
220
+ 1 1
221
+ 2 0
222
+ dtype: int64
223
+ """
224
+ from pandas import Series
225
+
226
+ return Series(
227
+ self._parent.array.to_dense(),
228
+ index=self._parent.index,
229
+ name=self._parent.name,
230
+ copy=False,
231
+ )
232
+
233
+
234
+ class SparseFrameAccessor(BaseAccessor, PandasDelegate):
235
+ """
236
+ DataFrame accessor for sparse data.
237
+
238
+ Examples
239
+ --------
240
+ >>> df = pd.DataFrame({"a": [1, 2, 0, 0],
241
+ ... "b": [3, 0, 0, 4]}, dtype="Sparse[int]")
242
+ >>> df.sparse.density
243
+ 0.5
244
+ """
245
+
246
+ def _validate(self, data):
247
+ dtypes = data.dtypes
248
+ if not all(isinstance(t, SparseDtype) for t in dtypes):
249
+ raise AttributeError(self._validation_msg)
250
+
251
+ @classmethod
252
+ def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame:
253
+ """
254
+ Create a new DataFrame from a scipy sparse matrix.
255
+
256
+ Parameters
257
+ ----------
258
+ data : scipy.sparse.spmatrix
259
+ Must be convertible to csc format.
260
+ index, columns : Index, optional
261
+ Row and column labels to use for the resulting DataFrame.
262
+ Defaults to a RangeIndex.
263
+
264
+ Returns
265
+ -------
266
+ DataFrame
267
+ Each column of the DataFrame is stored as a
268
+ :class:`arrays.SparseArray`.
269
+
270
+ Examples
271
+ --------
272
+ >>> import scipy.sparse
273
+ >>> mat = scipy.sparse.eye(3, dtype=float)
274
+ >>> pd.DataFrame.sparse.from_spmatrix(mat)
275
+ 0 1 2
276
+ 0 1.0 0 0
277
+ 1 0 1.0 0
278
+ 2 0 0 1.0
279
+ """
280
+ from pandas._libs.sparse import IntIndex
281
+
282
+ from pandas import DataFrame
283
+
284
+ data = data.tocsc()
285
+ index, columns = cls._prep_index(data, index, columns)
286
+ n_rows, n_columns = data.shape
287
+ # We need to make sure indices are sorted, as we create
288
+ # IntIndex with no input validation (i.e. check_integrity=False ).
289
+ # Indices may already be sorted in scipy in which case this adds
290
+ # a small overhead.
291
+ data.sort_indices()
292
+ indices = data.indices
293
+ indptr = data.indptr
294
+ array_data = data.data
295
+ dtype = SparseDtype(array_data.dtype, 0)
296
+ arrays = []
297
+ for i in range(n_columns):
298
+ sl = slice(indptr[i], indptr[i + 1])
299
+ idx = IntIndex(n_rows, indices[sl], check_integrity=False)
300
+ arr = SparseArray._simple_new(array_data[sl], idx, dtype)
301
+ arrays.append(arr)
302
+ return DataFrame._from_arrays(
303
+ arrays, columns=columns, index=index, verify_integrity=False
304
+ )
305
+
306
+ def to_dense(self) -> DataFrame:
307
+ """
308
+ Convert a DataFrame with sparse values to dense.
309
+
310
+ Returns
311
+ -------
312
+ DataFrame
313
+ A DataFrame with the same values stored as dense arrays.
314
+
315
+ Examples
316
+ --------
317
+ >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
318
+ >>> df.sparse.to_dense()
319
+ A
320
+ 0 0
321
+ 1 1
322
+ 2 0
323
+ """
324
+ from pandas import DataFrame
325
+
326
+ data = {k: v.array.to_dense() for k, v in self._parent.items()}
327
+ return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
328
+
329
+ def to_coo(self):
330
+ """
331
+ Return the contents of the frame as a sparse SciPy COO matrix.
332
+
333
+ Returns
334
+ -------
335
+ scipy.sparse.spmatrix
336
+ If the caller is heterogeneous and contains booleans or objects,
337
+ the result will be of dtype=object. See Notes.
338
+
339
+ Notes
340
+ -----
341
+ The dtype will be the lowest-common-denominator type (implicit
342
+ upcasting); that is to say if the dtypes (even of numeric types)
343
+ are mixed, the one that accommodates all will be chosen.
344
+
345
+ e.g. If the dtypes are float16 and float32, dtype will be upcast to
346
+ float32. By numpy.find_common_type convention, mixing int64 and
347
+ and uint64 will result in a float64 dtype.
348
+
349
+ Examples
350
+ --------
351
+ >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])})
352
+ >>> df.sparse.to_coo()
353
+ <4x1 sparse matrix of type '<class 'numpy.int64'>'
354
+ with 2 stored elements in COOrdinate format>
355
+ """
356
+ import_optional_dependency("scipy")
357
+ from scipy.sparse import coo_matrix
358
+
359
+ dtype = find_common_type(self._parent.dtypes.to_list())
360
+ if isinstance(dtype, SparseDtype):
361
+ dtype = dtype.subtype
362
+
363
+ cols, rows, data = [], [], []
364
+ for col, (_, ser) in enumerate(self._parent.items()):
365
+ sp_arr = ser.array
366
+ if sp_arr.fill_value != 0:
367
+ raise ValueError("fill value must be 0 when converting to COO matrix")
368
+
369
+ row = sp_arr.sp_index.indices
370
+ cols.append(np.repeat(col, len(row)))
371
+ rows.append(row)
372
+ data.append(sp_arr.sp_values.astype(dtype, copy=False))
373
+
374
+ cols = np.concatenate(cols)
375
+ rows = np.concatenate(rows)
376
+ data = np.concatenate(data)
377
+ return coo_matrix((data, (rows, cols)), shape=self._parent.shape)
378
+
379
+ @property
380
+ def density(self) -> float:
381
+ """
382
+ Ratio of non-sparse points to total (dense) data points.
383
+
384
+ Examples
385
+ --------
386
+ >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])})
387
+ >>> df.sparse.density
388
+ 0.5
389
+ """
390
+ tmp = np.mean([column.array.density for _, column in self._parent.items()])
391
+ return tmp
392
+
393
+ @staticmethod
394
+ def _prep_index(data, index, columns):
395
+ from pandas.core.indexes.api import (
396
+ default_index,
397
+ ensure_index,
398
+ )
399
+
400
+ N, K = data.shape
401
+ if index is None:
402
+ index = default_index(N)
403
+ else:
404
+ index = ensure_index(index)
405
+ if columns is None:
406
+ columns = default_index(K)
407
+ else:
408
+ columns = ensure_index(columns)
409
+
410
+ if len(columns) != K:
411
+ raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
412
+ if len(index) != N:
413
+ raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
414
+ return index, columns
videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/array.py ADDED
@@ -0,0 +1,1929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SparseArray data structure
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from collections import abc
7
+ import numbers
8
+ import operator
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Callable,
13
+ Literal,
14
+ cast,
15
+ overload,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+
21
+ from pandas._libs import lib
22
+ import pandas._libs.sparse as splib
23
+ from pandas._libs.sparse import (
24
+ BlockIndex,
25
+ IntIndex,
26
+ SparseIndex,
27
+ )
28
+ from pandas._libs.tslibs import NaT
29
+ from pandas.compat.numpy import function as nv
30
+ from pandas.errors import PerformanceWarning
31
+ from pandas.util._decorators import doc
32
+ from pandas.util._exceptions import find_stack_level
33
+ from pandas.util._validators import (
34
+ validate_bool_kwarg,
35
+ validate_insert_loc,
36
+ )
37
+
38
+ from pandas.core.dtypes.astype import astype_array
39
+ from pandas.core.dtypes.cast import (
40
+ construct_1d_arraylike_from_scalar,
41
+ find_common_type,
42
+ maybe_box_datetimelike,
43
+ )
44
+ from pandas.core.dtypes.common import (
45
+ is_bool_dtype,
46
+ is_integer,
47
+ is_list_like,
48
+ is_object_dtype,
49
+ is_scalar,
50
+ is_string_dtype,
51
+ pandas_dtype,
52
+ )
53
+ from pandas.core.dtypes.dtypes import (
54
+ DatetimeTZDtype,
55
+ SparseDtype,
56
+ )
57
+ from pandas.core.dtypes.generic import (
58
+ ABCIndex,
59
+ ABCSeries,
60
+ )
61
+ from pandas.core.dtypes.missing import (
62
+ isna,
63
+ na_value_for_dtype,
64
+ notna,
65
+ )
66
+
67
+ from pandas.core import arraylike
68
+ import pandas.core.algorithms as algos
69
+ from pandas.core.arraylike import OpsMixin
70
+ from pandas.core.arrays import ExtensionArray
71
+ from pandas.core.base import PandasObject
72
+ import pandas.core.common as com
73
+ from pandas.core.construction import (
74
+ ensure_wrapped_if_datetimelike,
75
+ extract_array,
76
+ sanitize_array,
77
+ )
78
+ from pandas.core.indexers import (
79
+ check_array_indexer,
80
+ unpack_tuple_and_ellipses,
81
+ )
82
+ from pandas.core.nanops import check_below_min_count
83
+
84
+ from pandas.io.formats import printing
85
+
86
+ # See https://github.com/python/typing/issues/684
87
+ if TYPE_CHECKING:
88
+ from collections.abc import Sequence
89
+ from enum import Enum
90
+
91
+ class ellipsis(Enum):
92
+ Ellipsis = "..."
93
+
94
+ Ellipsis = ellipsis.Ellipsis
95
+
96
+ from scipy.sparse import spmatrix
97
+
98
+ from pandas._typing import (
99
+ FillnaOptions,
100
+ NumpySorter,
101
+ )
102
+
103
+ SparseIndexKind = Literal["integer", "block"]
104
+
105
+ from pandas._typing import (
106
+ ArrayLike,
107
+ AstypeArg,
108
+ Axis,
109
+ AxisInt,
110
+ Dtype,
111
+ NpDtype,
112
+ PositionalIndexer,
113
+ Scalar,
114
+ ScalarIndexer,
115
+ Self,
116
+ SequenceIndexer,
117
+ npt,
118
+ )
119
+
120
+ from pandas import Series
121
+
122
+ else:
123
+ ellipsis = type(Ellipsis)
124
+
125
+
126
+ # ----------------------------------------------------------------------------
127
+ # Array
128
+
129
+ _sparray_doc_kwargs = {"klass": "SparseArray"}
130
+
131
+
132
+ def _get_fill(arr: SparseArray) -> np.ndarray:
133
+ """
134
+ Create a 0-dim ndarray containing the fill value
135
+
136
+ Parameters
137
+ ----------
138
+ arr : SparseArray
139
+
140
+ Returns
141
+ -------
142
+ fill_value : ndarray
143
+ 0-dim ndarray with just the fill value.
144
+
145
+ Notes
146
+ -----
147
+ coerce fill_value to arr dtype if possible
148
+ int64 SparseArray can have NaN as fill_value if there is no missing
149
+ """
150
+ try:
151
+ return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
152
+ except ValueError:
153
+ return np.asarray(arr.fill_value)
154
+
155
+
156
+ def _sparse_array_op(
157
+ left: SparseArray, right: SparseArray, op: Callable, name: str
158
+ ) -> SparseArray:
159
+ """
160
+ Perform a binary operation between two arrays.
161
+
162
+ Parameters
163
+ ----------
164
+ left : Union[SparseArray, ndarray]
165
+ right : Union[SparseArray, ndarray]
166
+ op : Callable
167
+ The binary operation to perform
168
+ name str
169
+ Name of the callable.
170
+
171
+ Returns
172
+ -------
173
+ SparseArray
174
+ """
175
+ if name.startswith("__"):
176
+ # For lookups in _libs.sparse we need non-dunder op name
177
+ name = name[2:-2]
178
+
179
+ # dtype used to find corresponding sparse method
180
+ ltype = left.dtype.subtype
181
+ rtype = right.dtype.subtype
182
+
183
+ if ltype != rtype:
184
+ subtype = find_common_type([ltype, rtype])
185
+ ltype = SparseDtype(subtype, left.fill_value)
186
+ rtype = SparseDtype(subtype, right.fill_value)
187
+
188
+ left = left.astype(ltype, copy=False)
189
+ right = right.astype(rtype, copy=False)
190
+ dtype = ltype.subtype
191
+ else:
192
+ dtype = ltype
193
+
194
+ # dtype the result must have
195
+ result_dtype = None
196
+
197
+ if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
198
+ with np.errstate(all="ignore"):
199
+ result = op(left.to_dense(), right.to_dense())
200
+ fill = op(_get_fill(left), _get_fill(right))
201
+
202
+ if left.sp_index.ngaps == 0:
203
+ index = left.sp_index
204
+ else:
205
+ index = right.sp_index
206
+ elif left.sp_index.equals(right.sp_index):
207
+ with np.errstate(all="ignore"):
208
+ result = op(left.sp_values, right.sp_values)
209
+ fill = op(_get_fill(left), _get_fill(right))
210
+ index = left.sp_index
211
+ else:
212
+ if name[0] == "r":
213
+ left, right = right, left
214
+ name = name[1:]
215
+
216
+ if name in ("and", "or", "xor") and dtype == "bool":
217
+ opname = f"sparse_{name}_uint8"
218
+ # to make template simple, cast here
219
+ left_sp_values = left.sp_values.view(np.uint8)
220
+ right_sp_values = right.sp_values.view(np.uint8)
221
+ result_dtype = bool
222
+ else:
223
+ opname = f"sparse_{name}_{dtype}"
224
+ left_sp_values = left.sp_values
225
+ right_sp_values = right.sp_values
226
+
227
+ if (
228
+ name in ["floordiv", "mod"]
229
+ and (right == 0).any()
230
+ and left.dtype.kind in "iu"
231
+ ):
232
+ # Match the non-Sparse Series behavior
233
+ opname = f"sparse_{name}_float64"
234
+ left_sp_values = left_sp_values.astype("float64")
235
+ right_sp_values = right_sp_values.astype("float64")
236
+
237
+ sparse_op = getattr(splib, opname)
238
+
239
+ with np.errstate(all="ignore"):
240
+ result, index, fill = sparse_op(
241
+ left_sp_values,
242
+ left.sp_index,
243
+ left.fill_value,
244
+ right_sp_values,
245
+ right.sp_index,
246
+ right.fill_value,
247
+ )
248
+
249
+ if name == "divmod":
250
+ # result is a 2-tuple
251
+ # error: Incompatible return value type (got "Tuple[SparseArray,
252
+ # SparseArray]", expected "SparseArray")
253
+ return ( # type: ignore[return-value]
254
+ _wrap_result(name, result[0], index, fill[0], dtype=result_dtype),
255
+ _wrap_result(name, result[1], index, fill[1], dtype=result_dtype),
256
+ )
257
+
258
+ if result_dtype is None:
259
+ result_dtype = result.dtype
260
+
261
+ return _wrap_result(name, result, index, fill, dtype=result_dtype)
262
+
263
+
264
+ def _wrap_result(
265
+ name: str, data, sparse_index, fill_value, dtype: Dtype | None = None
266
+ ) -> SparseArray:
267
+ """
268
+ wrap op result to have correct dtype
269
+ """
270
+ if name.startswith("__"):
271
+ # e.g. __eq__ --> eq
272
+ name = name[2:-2]
273
+
274
+ if name in ("eq", "ne", "lt", "gt", "le", "ge"):
275
+ dtype = bool
276
+
277
+ fill_value = lib.item_from_zerodim(fill_value)
278
+
279
+ if is_bool_dtype(dtype):
280
+ # fill_value may be np.bool_
281
+ fill_value = bool(fill_value)
282
+ return SparseArray(
283
+ data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype
284
+ )
285
+
286
+
287
+ class SparseArray(OpsMixin, PandasObject, ExtensionArray):
288
+ """
289
+ An ExtensionArray for storing sparse data.
290
+
291
+ Parameters
292
+ ----------
293
+ data : array-like or scalar
294
+ A dense array of values to store in the SparseArray. This may contain
295
+ `fill_value`.
296
+ sparse_index : SparseIndex, optional
297
+ fill_value : scalar, optional
298
+ Elements in data that are ``fill_value`` are not stored in the
299
+ SparseArray. For memory savings, this should be the most common value
300
+ in `data`. By default, `fill_value` depends on the dtype of `data`:
301
+
302
+ =========== ==========
303
+ data.dtype na_value
304
+ =========== ==========
305
+ float ``np.nan``
306
+ int ``0``
307
+ bool False
308
+ datetime64 ``pd.NaT``
309
+ timedelta64 ``pd.NaT``
310
+ =========== ==========
311
+
312
+ The fill value is potentially specified in three ways. In order of
313
+ precedence, these are
314
+
315
+ 1. The `fill_value` argument
316
+ 2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
317
+ a ``SparseDtype``
318
+ 3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
319
+ is not a ``SparseDtype`` and `data` is a ``SparseArray``.
320
+
321
+ kind : str
322
+ Can be 'integer' or 'block', default is 'integer'.
323
+ The type of storage for sparse locations.
324
+
325
+ * 'block': Stores a `block` and `block_length` for each
326
+ contiguous *span* of sparse values. This is best when
327
+ sparse data tends to be clumped together, with large
328
+ regions of ``fill-value`` values between sparse values.
329
+ * 'integer': uses an integer to store the location of
330
+ each sparse value.
331
+
332
+ dtype : np.dtype or SparseDtype, optional
333
+ The dtype to use for the SparseArray. For numpy dtypes, this
334
+ determines the dtype of ``self.sp_values``. For SparseDtype,
335
+ this determines ``self.sp_values`` and ``self.fill_value``.
336
+ copy : bool, default False
337
+ Whether to explicitly copy the incoming `data` array.
338
+
339
+ Attributes
340
+ ----------
341
+ None
342
+
343
+ Methods
344
+ -------
345
+ None
346
+
347
+ Examples
348
+ --------
349
+ >>> from pandas.arrays import SparseArray
350
+ >>> arr = SparseArray([0, 0, 1, 2])
351
+ >>> arr
352
+ [0, 0, 1, 2]
353
+ Fill: 0
354
+ IntIndex
355
+ Indices: array([2, 3], dtype=int32)
356
+ """
357
+
358
+ _subtyp = "sparse_array" # register ABCSparseArray
359
+ _hidden_attrs = PandasObject._hidden_attrs | frozenset([])
360
+ _sparse_index: SparseIndex
361
+ _sparse_values: np.ndarray
362
+ _dtype: SparseDtype
363
+
364
+ def __init__(
365
+ self,
366
+ data,
367
+ sparse_index=None,
368
+ fill_value=None,
369
+ kind: SparseIndexKind = "integer",
370
+ dtype: Dtype | None = None,
371
+ copy: bool = False,
372
+ ) -> None:
373
+ if fill_value is None and isinstance(dtype, SparseDtype):
374
+ fill_value = dtype.fill_value
375
+
376
+ if isinstance(data, type(self)):
377
+ # disable normal inference on dtype, sparse_index, & fill_value
378
+ if sparse_index is None:
379
+ sparse_index = data.sp_index
380
+ if fill_value is None:
381
+ fill_value = data.fill_value
382
+ if dtype is None:
383
+ dtype = data.dtype
384
+ # TODO: make kind=None, and use data.kind?
385
+ data = data.sp_values
386
+
387
+ # Handle use-provided dtype
388
+ if isinstance(dtype, str):
389
+ # Two options: dtype='int', regular numpy dtype
390
+ # or dtype='Sparse[int]', a sparse dtype
391
+ try:
392
+ dtype = SparseDtype.construct_from_string(dtype)
393
+ except TypeError:
394
+ dtype = pandas_dtype(dtype)
395
+
396
+ if isinstance(dtype, SparseDtype):
397
+ if fill_value is None:
398
+ fill_value = dtype.fill_value
399
+ dtype = dtype.subtype
400
+
401
+ if is_scalar(data):
402
+ warnings.warn(
403
+ f"Constructing {type(self).__name__} with scalar data is deprecated "
404
+ "and will raise in a future version. Pass a sequence instead.",
405
+ FutureWarning,
406
+ stacklevel=find_stack_level(),
407
+ )
408
+ if sparse_index is None:
409
+ npoints = 1
410
+ else:
411
+ npoints = sparse_index.length
412
+
413
+ data = construct_1d_arraylike_from_scalar(data, npoints, dtype=None)
414
+ dtype = data.dtype
415
+
416
+ if dtype is not None:
417
+ dtype = pandas_dtype(dtype)
418
+
419
+ # TODO: disentangle the fill_value dtype inference from
420
+ # dtype inference
421
+ if data is None:
422
+ # TODO: What should the empty dtype be? Object or float?
423
+
424
+ # error: Argument "dtype" to "array" has incompatible type
425
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected "Union[dtype[Any],
426
+ # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
427
+ # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
428
+ data = np.array([], dtype=dtype) # type: ignore[arg-type]
429
+
430
+ try:
431
+ data = sanitize_array(data, index=None)
432
+ except ValueError:
433
+ # NumPy may raise a ValueError on data like [1, []]
434
+ # we retry with object dtype here.
435
+ if dtype is None:
436
+ dtype = np.dtype(object)
437
+ data = np.atleast_1d(np.asarray(data, dtype=dtype))
438
+ else:
439
+ raise
440
+
441
+ if copy:
442
+ # TODO: avoid double copy when dtype forces cast.
443
+ data = data.copy()
444
+
445
+ if fill_value is None:
446
+ fill_value_dtype = data.dtype if dtype is None else dtype
447
+ if fill_value_dtype is None:
448
+ fill_value = np.nan
449
+ else:
450
+ fill_value = na_value_for_dtype(fill_value_dtype)
451
+
452
+ if isinstance(data, type(self)) and sparse_index is None:
453
+ sparse_index = data._sparse_index
454
+ # error: Argument "dtype" to "asarray" has incompatible type
455
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
456
+ sparse_values = np.asarray(
457
+ data.sp_values, dtype=dtype # type: ignore[arg-type]
458
+ )
459
+ elif sparse_index is None:
460
+ data = extract_array(data, extract_numpy=True)
461
+ if not isinstance(data, np.ndarray):
462
+ # EA
463
+ if isinstance(data.dtype, DatetimeTZDtype):
464
+ warnings.warn(
465
+ f"Creating SparseArray from {data.dtype} data "
466
+ "loses timezone information. Cast to object before "
467
+ "sparse to retain timezone information.",
468
+ UserWarning,
469
+ stacklevel=find_stack_level(),
470
+ )
471
+ data = np.asarray(data, dtype="datetime64[ns]")
472
+ if fill_value is NaT:
473
+ fill_value = np.datetime64("NaT", "ns")
474
+ data = np.asarray(data)
475
+ sparse_values, sparse_index, fill_value = _make_sparse(
476
+ # error: Argument "dtype" to "_make_sparse" has incompatible type
477
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected
478
+ # "Optional[dtype[Any]]"
479
+ data,
480
+ kind=kind,
481
+ fill_value=fill_value,
482
+ dtype=dtype, # type: ignore[arg-type]
483
+ )
484
+ else:
485
+ # error: Argument "dtype" to "asarray" has incompatible type
486
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
487
+ sparse_values = np.asarray(data, dtype=dtype) # type: ignore[arg-type]
488
+ if len(sparse_values) != sparse_index.npoints:
489
+ raise AssertionError(
490
+ f"Non array-like type {type(sparse_values)} must "
491
+ "have the same length as the index"
492
+ )
493
+ self._sparse_index = sparse_index
494
+ self._sparse_values = sparse_values
495
+ self._dtype = SparseDtype(sparse_values.dtype, fill_value)
496
+
497
+ @classmethod
498
+ def _simple_new(
499
+ cls,
500
+ sparse_array: np.ndarray,
501
+ sparse_index: SparseIndex,
502
+ dtype: SparseDtype,
503
+ ) -> Self:
504
+ new = object.__new__(cls)
505
+ new._sparse_index = sparse_index
506
+ new._sparse_values = sparse_array
507
+ new._dtype = dtype
508
+ return new
509
+
510
+ @classmethod
511
+ def from_spmatrix(cls, data: spmatrix) -> Self:
512
+ """
513
+ Create a SparseArray from a scipy.sparse matrix.
514
+
515
+ Parameters
516
+ ----------
517
+ data : scipy.sparse.sp_matrix
518
+ This should be a SciPy sparse matrix where the size
519
+ of the second dimension is 1. In other words, a
520
+ sparse matrix with a single column.
521
+
522
+ Returns
523
+ -------
524
+ SparseArray
525
+
526
+ Examples
527
+ --------
528
+ >>> import scipy.sparse
529
+ >>> mat = scipy.sparse.coo_matrix((4, 1))
530
+ >>> pd.arrays.SparseArray.from_spmatrix(mat)
531
+ [0.0, 0.0, 0.0, 0.0]
532
+ Fill: 0.0
533
+ IntIndex
534
+ Indices: array([], dtype=int32)
535
+ """
536
+ length, ncol = data.shape
537
+
538
+ if ncol != 1:
539
+ raise ValueError(f"'data' must have a single column, not '{ncol}'")
540
+
541
+ # our sparse index classes require that the positions be strictly
542
+ # increasing. So we need to sort loc, and arr accordingly.
543
+ data = data.tocsc()
544
+ data.sort_indices()
545
+ arr = data.data
546
+ idx = data.indices
547
+
548
+ zero = np.array(0, dtype=arr.dtype).item()
549
+ dtype = SparseDtype(arr.dtype, zero)
550
+ index = IntIndex(length, idx)
551
+
552
+ return cls._simple_new(arr, index, dtype)
553
+
554
+ def __array__(
555
+ self, dtype: NpDtype | None = None, copy: bool | None = None
556
+ ) -> np.ndarray:
557
+ fill_value = self.fill_value
558
+
559
+ if self.sp_index.ngaps == 0:
560
+ # Compat for na dtype and int values.
561
+ return self.sp_values
562
+ if dtype is None:
563
+ # Can NumPy represent this type?
564
+ # If not, `np.result_type` will raise. We catch that
565
+ # and return object.
566
+ if self.sp_values.dtype.kind == "M":
567
+ # However, we *do* special-case the common case of
568
+ # a datetime64 with pandas NaT.
569
+ if fill_value is NaT:
570
+ # Can't put pd.NaT in a datetime64[ns]
571
+ fill_value = np.datetime64("NaT")
572
+ try:
573
+ dtype = np.result_type(self.sp_values.dtype, type(fill_value))
574
+ except TypeError:
575
+ dtype = object
576
+
577
+ out = np.full(self.shape, fill_value, dtype=dtype)
578
+ out[self.sp_index.indices] = self.sp_values
579
+ return out
580
+
581
+ def __setitem__(self, key, value) -> None:
582
+ # I suppose we could allow setting of non-fill_value elements.
583
+ # TODO(SparseArray.__setitem__): remove special cases in
584
+ # ExtensionBlock.where
585
+ msg = "SparseArray does not support item assignment via setitem"
586
+ raise TypeError(msg)
587
+
588
+ @classmethod
589
+ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
590
+ return cls(scalars, dtype=dtype)
591
+
592
+ @classmethod
593
+ def _from_factorized(cls, values, original):
594
+ return cls(values, dtype=original.dtype)
595
+
596
+ # ------------------------------------------------------------------------
597
+ # Data
598
+ # ------------------------------------------------------------------------
599
+ @property
600
+ def sp_index(self) -> SparseIndex:
601
+ """
602
+ The SparseIndex containing the location of non- ``fill_value`` points.
603
+ """
604
+ return self._sparse_index
605
+
606
+ @property
607
+ def sp_values(self) -> np.ndarray:
608
+ """
609
+ An ndarray containing the non- ``fill_value`` values.
610
+
611
+ Examples
612
+ --------
613
+ >>> from pandas.arrays import SparseArray
614
+ >>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
615
+ >>> s.sp_values
616
+ array([1, 2])
617
+ """
618
+ return self._sparse_values
619
+
620
+ @property
621
+ def dtype(self) -> SparseDtype:
622
+ return self._dtype
623
+
624
+ @property
625
+ def fill_value(self):
626
+ """
627
+ Elements in `data` that are `fill_value` are not stored.
628
+
629
+ For memory savings, this should be the most common value in the array.
630
+
631
+ Examples
632
+ --------
633
+ >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]")
634
+ >>> ser.sparse.fill_value
635
+ 0
636
+ >>> spa_dtype = pd.SparseDtype(dtype=np.int32, fill_value=2)
637
+ >>> ser = pd.Series([0, 0, 2, 2, 2], dtype=spa_dtype)
638
+ >>> ser.sparse.fill_value
639
+ 2
640
+ """
641
+ return self.dtype.fill_value
642
+
643
+ @fill_value.setter
644
+ def fill_value(self, value) -> None:
645
+ self._dtype = SparseDtype(self.dtype.subtype, value)
646
+
647
+ @property
648
+ def kind(self) -> SparseIndexKind:
649
+ """
650
+ The kind of sparse index for this array. One of {'integer', 'block'}.
651
+ """
652
+ if isinstance(self.sp_index, IntIndex):
653
+ return "integer"
654
+ else:
655
+ return "block"
656
+
657
+ @property
658
+ def _valid_sp_values(self) -> np.ndarray:
659
+ sp_vals = self.sp_values
660
+ mask = notna(sp_vals)
661
+ return sp_vals[mask]
662
+
663
+ def __len__(self) -> int:
664
+ return self.sp_index.length
665
+
666
+ @property
667
+ def _null_fill_value(self) -> bool:
668
+ return self._dtype._is_na_fill_value
669
+
670
+ def _fill_value_matches(self, fill_value) -> bool:
671
+ if self._null_fill_value:
672
+ return isna(fill_value)
673
+ else:
674
+ return self.fill_value == fill_value
675
+
676
+ @property
677
+ def nbytes(self) -> int:
678
+ return self.sp_values.nbytes + self.sp_index.nbytes
679
+
680
+ @property
681
+ def density(self) -> float:
682
+ """
683
+ The percent of non- ``fill_value`` points, as decimal.
684
+
685
+ Examples
686
+ --------
687
+ >>> from pandas.arrays import SparseArray
688
+ >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
689
+ >>> s.density
690
+ 0.6
691
+ """
692
+ return self.sp_index.npoints / self.sp_index.length
693
+
694
+ @property
695
+ def npoints(self) -> int:
696
+ """
697
+ The number of non- ``fill_value`` points.
698
+
699
+ Examples
700
+ --------
701
+ >>> from pandas.arrays import SparseArray
702
+ >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
703
+ >>> s.npoints
704
+ 3
705
+ """
706
+ return self.sp_index.npoints
707
+
708
+ # error: Return type "SparseArray" of "isna" incompatible with return type
709
+ # "ndarray[Any, Any] | ExtensionArraySupportsAnyAll" in supertype "ExtensionArray"
710
+ def isna(self) -> Self: # type: ignore[override]
711
+ # If null fill value, we want SparseDtype[bool, true]
712
+ # to preserve the same memory usage.
713
+ dtype = SparseDtype(bool, self._null_fill_value)
714
+ if self._null_fill_value:
715
+ return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype)
716
+ mask = np.full(len(self), False, dtype=np.bool_)
717
+ mask[self.sp_index.indices] = isna(self.sp_values)
718
+ return type(self)(mask, fill_value=False, dtype=dtype)
719
+
720
+ def _pad_or_backfill( # pylint: disable=useless-parent-delegation
721
+ self,
722
+ *,
723
+ method: FillnaOptions,
724
+ limit: int | None = None,
725
+ limit_area: Literal["inside", "outside"] | None = None,
726
+ copy: bool = True,
727
+ ) -> Self:
728
+ # TODO(3.0): We can remove this method once deprecation for fillna method
729
+ # keyword is enforced.
730
+ return super()._pad_or_backfill(
731
+ method=method, limit=limit, limit_area=limit_area, copy=copy
732
+ )
733
+
734
+ def fillna(
735
+ self,
736
+ value=None,
737
+ method: FillnaOptions | None = None,
738
+ limit: int | None = None,
739
+ copy: bool = True,
740
+ ) -> Self:
741
+ """
742
+ Fill missing values with `value`.
743
+
744
+ Parameters
745
+ ----------
746
+ value : scalar, optional
747
+ method : str, optional
748
+
749
+ .. warning::
750
+
751
+ Using 'method' will result in high memory use,
752
+ as all `fill_value` methods will be converted to
753
+ an in-memory ndarray
754
+
755
+ limit : int, optional
756
+
757
+ copy: bool, default True
758
+ Ignored for SparseArray.
759
+
760
+ Returns
761
+ -------
762
+ SparseArray
763
+
764
+ Notes
765
+ -----
766
+ When `value` is specified, the result's ``fill_value`` depends on
767
+ ``self.fill_value``. The goal is to maintain low-memory use.
768
+
769
+ If ``self.fill_value`` is NA, the result dtype will be
770
+ ``SparseDtype(self.dtype, fill_value=value)``. This will preserve
771
+ amount of memory used before and after filling.
772
+
773
+ When ``self.fill_value`` is not NA, the result dtype will be
774
+ ``self.dtype``. Again, this preserves the amount of memory used.
775
+ """
776
+ if (method is None and value is None) or (
777
+ method is not None and value is not None
778
+ ):
779
+ raise ValueError("Must specify one of 'method' or 'value'.")
780
+
781
+ if method is not None:
782
+ return super().fillna(method=method, limit=limit)
783
+
784
+ else:
785
+ new_values = np.where(isna(self.sp_values), value, self.sp_values)
786
+
787
+ if self._null_fill_value:
788
+ # This is essentially just updating the dtype.
789
+ new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
790
+ else:
791
+ new_dtype = self.dtype
792
+
793
+ return self._simple_new(new_values, self._sparse_index, new_dtype)
794
+
795
+ def shift(self, periods: int = 1, fill_value=None) -> Self:
796
+ if not len(self) or periods == 0:
797
+ return self.copy()
798
+
799
+ if isna(fill_value):
800
+ fill_value = self.dtype.na_value
801
+
802
+ subtype = np.result_type(fill_value, self.dtype.subtype)
803
+
804
+ if subtype != self.dtype.subtype:
805
+ # just coerce up front
806
+ arr = self.astype(SparseDtype(subtype, self.fill_value))
807
+ else:
808
+ arr = self
809
+
810
+ empty = self._from_sequence(
811
+ [fill_value] * min(abs(periods), len(self)), dtype=arr.dtype
812
+ )
813
+
814
+ if periods > 0:
815
+ a = empty
816
+ b = arr[:-periods]
817
+ else:
818
+ a = arr[abs(periods) :]
819
+ b = empty
820
+ return arr._concat_same_type([a, b])
821
+
822
+ def _first_fill_value_loc(self):
823
+ """
824
+ Get the location of the first fill value.
825
+
826
+ Returns
827
+ -------
828
+ int
829
+ """
830
+ if len(self) == 0 or self.sp_index.npoints == len(self):
831
+ return -1
832
+
833
+ indices = self.sp_index.indices
834
+ if not len(indices) or indices[0] > 0:
835
+ return 0
836
+
837
+ # a number larger than 1 should be appended to
838
+ # the last in case of fill value only appears
839
+ # in the tail of array
840
+ diff = np.r_[np.diff(indices), 2]
841
+ return indices[(diff > 1).argmax()] + 1
842
+
843
+ @doc(ExtensionArray.duplicated)
844
+ def duplicated(
845
+ self, keep: Literal["first", "last", False] = "first"
846
+ ) -> npt.NDArray[np.bool_]:
847
+ values = np.asarray(self)
848
+ mask = np.asarray(self.isna())
849
+ return algos.duplicated(values, keep=keep, mask=mask)
850
+
851
+ def unique(self) -> Self:
852
+ uniques = algos.unique(self.sp_values)
853
+ if len(self.sp_values) != len(self):
854
+ fill_loc = self._first_fill_value_loc()
855
+ # Inorder to align the behavior of pd.unique or
856
+ # pd.Series.unique, we should keep the original
857
+ # order, here we use unique again to find the
858
+ # insertion place. Since the length of sp_values
859
+ # is not large, maybe minor performance hurt
860
+ # is worthwhile to the correctness.
861
+ insert_loc = len(algos.unique(self.sp_values[:fill_loc]))
862
+ uniques = np.insert(uniques, insert_loc, self.fill_value)
863
+ return type(self)._from_sequence(uniques, dtype=self.dtype)
864
+
865
+ def _values_for_factorize(self):
866
+ # Still override this for hash_pandas_object
867
+ return np.asarray(self), self.fill_value
868
+
869
+ def factorize(
870
+ self,
871
+ use_na_sentinel: bool = True,
872
+ ) -> tuple[np.ndarray, SparseArray]:
873
+ # Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
874
+ # The sparsity on this is backwards from what Sparse would want. Want
875
+ # ExtensionArray.factorize -> Tuple[EA, EA]
876
+ # Given that we have to return a dense array of codes, why bother
877
+ # implementing an efficient factorize?
878
+ codes, uniques = algos.factorize(
879
+ np.asarray(self), use_na_sentinel=use_na_sentinel
880
+ )
881
+ uniques_sp = SparseArray(uniques, dtype=self.dtype)
882
+ return codes, uniques_sp
883
+
884
+ def value_counts(self, dropna: bool = True) -> Series:
885
+ """
886
+ Returns a Series containing counts of unique values.
887
+
888
+ Parameters
889
+ ----------
890
+ dropna : bool, default True
891
+ Don't include counts of NaN, even if NaN is in sp_values.
892
+
893
+ Returns
894
+ -------
895
+ counts : Series
896
+ """
897
+ from pandas import (
898
+ Index,
899
+ Series,
900
+ )
901
+
902
+ keys, counts, _ = algos.value_counts_arraylike(self.sp_values, dropna=dropna)
903
+ fcounts = self.sp_index.ngaps
904
+ if fcounts > 0 and (not self._null_fill_value or not dropna):
905
+ mask = isna(keys) if self._null_fill_value else keys == self.fill_value
906
+ if mask.any():
907
+ counts[mask] += fcounts
908
+ else:
909
+ # error: Argument 1 to "insert" has incompatible type "Union[
910
+ # ExtensionArray,ndarray[Any, Any]]"; expected "Union[
911
+ # _SupportsArray[dtype[Any]], Sequence[_SupportsArray[dtype
912
+ # [Any]]], Sequence[Sequence[_SupportsArray[dtype[Any]]]],
913
+ # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]], Sequence
914
+ # [Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]"
915
+ keys = np.insert(keys, 0, self.fill_value) # type: ignore[arg-type]
916
+ counts = np.insert(counts, 0, fcounts)
917
+
918
+ if not isinstance(keys, ABCIndex):
919
+ index = Index(keys)
920
+ else:
921
+ index = keys
922
+ return Series(counts, index=index, copy=False)
923
+
924
+ # --------
925
+ # Indexing
926
+ # --------
927
+ @overload
928
+ def __getitem__(self, key: ScalarIndexer) -> Any:
929
+ ...
930
+
931
+ @overload
932
+ def __getitem__(
933
+ self,
934
+ key: SequenceIndexer | tuple[int | ellipsis, ...],
935
+ ) -> Self:
936
+ ...
937
+
938
+ def __getitem__(
939
+ self,
940
+ key: PositionalIndexer | tuple[int | ellipsis, ...],
941
+ ) -> Self | Any:
942
+ if isinstance(key, tuple):
943
+ key = unpack_tuple_and_ellipses(key)
944
+ if key is Ellipsis:
945
+ raise ValueError("Cannot slice with Ellipsis")
946
+
947
+ if is_integer(key):
948
+ return self._get_val_at(key)
949
+ elif isinstance(key, tuple):
950
+ # error: Invalid index type "Tuple[Union[int, ellipsis], ...]"
951
+ # for "ndarray[Any, Any]"; expected type
952
+ # "Union[SupportsIndex, _SupportsArray[dtype[Union[bool_,
953
+ # integer[Any]]]], _NestedSequence[_SupportsArray[dtype[
954
+ # Union[bool_, integer[Any]]]]], _NestedSequence[Union[
955
+ # bool, int]], Tuple[Union[SupportsIndex, _SupportsArray[
956
+ # dtype[Union[bool_, integer[Any]]]], _NestedSequence[
957
+ # _SupportsArray[dtype[Union[bool_, integer[Any]]]]],
958
+ # _NestedSequence[Union[bool, int]]], ...]]"
959
+ data_slice = self.to_dense()[key] # type: ignore[index]
960
+ elif isinstance(key, slice):
961
+ # Avoid densifying when handling contiguous slices
962
+ if key.step is None or key.step == 1:
963
+ start = 0 if key.start is None else key.start
964
+ if start < 0:
965
+ start += len(self)
966
+
967
+ end = len(self) if key.stop is None else key.stop
968
+ if end < 0:
969
+ end += len(self)
970
+
971
+ indices = self.sp_index.indices
972
+ keep_inds = np.flatnonzero((indices >= start) & (indices < end))
973
+ sp_vals = self.sp_values[keep_inds]
974
+
975
+ sp_index = indices[keep_inds].copy()
976
+
977
+ # If we've sliced to not include the start of the array, all our indices
978
+ # should be shifted. NB: here we are careful to also not shift by a
979
+ # negative value for a case like [0, 1][-100:] where the start index
980
+ # should be treated like 0
981
+ if start > 0:
982
+ sp_index -= start
983
+
984
+ # Length of our result should match applying this slice to a range
985
+ # of the length of our original array
986
+ new_len = len(range(len(self))[key])
987
+ new_sp_index = make_sparse_index(new_len, sp_index, self.kind)
988
+ return type(self)._simple_new(sp_vals, new_sp_index, self.dtype)
989
+ else:
990
+ indices = np.arange(len(self), dtype=np.int32)[key]
991
+ return self.take(indices)
992
+
993
+ elif not is_list_like(key):
994
+ # e.g. "foo" or 2.5
995
+ # exception message copied from numpy
996
+ raise IndexError(
997
+ r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
998
+ r"(`None`) and integer or boolean arrays are valid indices"
999
+ )
1000
+
1001
+ else:
1002
+ if isinstance(key, SparseArray):
1003
+ # NOTE: If we guarantee that SparseDType(bool)
1004
+ # has only fill_value - true, false or nan
1005
+ # (see GH PR 44955)
1006
+ # we can apply mask very fast:
1007
+ if is_bool_dtype(key):
1008
+ if isna(key.fill_value):
1009
+ return self.take(key.sp_index.indices[key.sp_values])
1010
+ if not key.fill_value:
1011
+ return self.take(key.sp_index.indices)
1012
+ n = len(self)
1013
+ mask = np.full(n, True, dtype=np.bool_)
1014
+ mask[key.sp_index.indices] = False
1015
+ return self.take(np.arange(n)[mask])
1016
+ else:
1017
+ key = np.asarray(key)
1018
+
1019
+ key = check_array_indexer(self, key)
1020
+
1021
+ if com.is_bool_indexer(key):
1022
+ # mypy doesn't know we have an array here
1023
+ key = cast(np.ndarray, key)
1024
+ return self.take(np.arange(len(key), dtype=np.int32)[key])
1025
+ elif hasattr(key, "__len__"):
1026
+ return self.take(key)
1027
+ else:
1028
+ raise ValueError(f"Cannot slice with '{key}'")
1029
+
1030
+ return type(self)(data_slice, kind=self.kind)
1031
+
1032
+ def _get_val_at(self, loc):
1033
+ loc = validate_insert_loc(loc, len(self))
1034
+
1035
+ sp_loc = self.sp_index.lookup(loc)
1036
+ if sp_loc == -1:
1037
+ return self.fill_value
1038
+ else:
1039
+ val = self.sp_values[sp_loc]
1040
+ val = maybe_box_datetimelike(val, self.sp_values.dtype)
1041
+ return val
1042
+
1043
+ def take(self, indices, *, allow_fill: bool = False, fill_value=None) -> Self:
1044
+ if is_scalar(indices):
1045
+ raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.")
1046
+ indices = np.asarray(indices, dtype=np.int32)
1047
+
1048
+ dtype = None
1049
+ if indices.size == 0:
1050
+ result = np.array([], dtype="object")
1051
+ dtype = self.dtype
1052
+ elif allow_fill:
1053
+ result = self._take_with_fill(indices, fill_value=fill_value)
1054
+ else:
1055
+ return self._take_without_fill(indices)
1056
+
1057
+ return type(self)(
1058
+ result, fill_value=self.fill_value, kind=self.kind, dtype=dtype
1059
+ )
1060
+
1061
+ def _take_with_fill(self, indices, fill_value=None) -> np.ndarray:
1062
+ if fill_value is None:
1063
+ fill_value = self.dtype.na_value
1064
+
1065
+ if indices.min() < -1:
1066
+ raise ValueError(
1067
+ "Invalid value in 'indices'. Must be between -1 "
1068
+ "and the length of the array."
1069
+ )
1070
+
1071
+ if indices.max() >= len(self):
1072
+ raise IndexError("out of bounds value in 'indices'.")
1073
+
1074
+ if len(self) == 0:
1075
+ # Empty... Allow taking only if all empty
1076
+ if (indices == -1).all():
1077
+ dtype = np.result_type(self.sp_values, type(fill_value))
1078
+ taken = np.empty_like(indices, dtype=dtype)
1079
+ taken.fill(fill_value)
1080
+ return taken
1081
+ else:
1082
+ raise IndexError("cannot do a non-empty take from an empty axes.")
1083
+
1084
+ # sp_indexer may be -1 for two reasons
1085
+ # 1.) we took for an index of -1 (new)
1086
+ # 2.) we took a value that was self.fill_value (old)
1087
+ sp_indexer = self.sp_index.lookup_array(indices)
1088
+ new_fill_indices = indices == -1
1089
+ old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
1090
+
1091
+ if self.sp_index.npoints == 0 and old_fill_indices.all():
1092
+ # We've looked up all valid points on an all-sparse array.
1093
+ taken = np.full(
1094
+ sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype
1095
+ )
1096
+
1097
+ elif self.sp_index.npoints == 0:
1098
+ # Use the old fill_value unless we took for an index of -1
1099
+ _dtype = np.result_type(self.dtype.subtype, type(fill_value))
1100
+ taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype)
1101
+ taken[old_fill_indices] = self.fill_value
1102
+ else:
1103
+ taken = self.sp_values.take(sp_indexer)
1104
+
1105
+ # Fill in two steps.
1106
+ # Old fill values
1107
+ # New fill values
1108
+ # potentially coercing to a new dtype at each stage.
1109
+
1110
+ m0 = sp_indexer[old_fill_indices] < 0
1111
+ m1 = sp_indexer[new_fill_indices] < 0
1112
+
1113
+ result_type = taken.dtype
1114
+
1115
+ if m0.any():
1116
+ result_type = np.result_type(result_type, type(self.fill_value))
1117
+ taken = taken.astype(result_type)
1118
+ taken[old_fill_indices] = self.fill_value
1119
+
1120
+ if m1.any():
1121
+ result_type = np.result_type(result_type, type(fill_value))
1122
+ taken = taken.astype(result_type)
1123
+ taken[new_fill_indices] = fill_value
1124
+
1125
+ return taken
1126
+
1127
+ def _take_without_fill(self, indices) -> Self:
1128
+ to_shift = indices < 0
1129
+
1130
+ n = len(self)
1131
+
1132
+ if (indices.max() >= n) or (indices.min() < -n):
1133
+ if n == 0:
1134
+ raise IndexError("cannot do a non-empty take from an empty axes.")
1135
+ raise IndexError("out of bounds value in 'indices'.")
1136
+
1137
+ if to_shift.any():
1138
+ indices = indices.copy()
1139
+ indices[to_shift] += n
1140
+
1141
+ sp_indexer = self.sp_index.lookup_array(indices)
1142
+ value_mask = sp_indexer != -1
1143
+ new_sp_values = self.sp_values[sp_indexer[value_mask]]
1144
+
1145
+ value_indices = np.flatnonzero(value_mask).astype(np.int32, copy=False)
1146
+
1147
+ new_sp_index = make_sparse_index(len(indices), value_indices, kind=self.kind)
1148
+ return type(self)._simple_new(new_sp_values, new_sp_index, dtype=self.dtype)
1149
+
1150
+ def searchsorted(
1151
+ self,
1152
+ v: ArrayLike | object,
1153
+ side: Literal["left", "right"] = "left",
1154
+ sorter: NumpySorter | None = None,
1155
+ ) -> npt.NDArray[np.intp] | np.intp:
1156
+ msg = "searchsorted requires high memory usage."
1157
+ warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
1158
+ v = np.asarray(v)
1159
+ return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter)
1160
+
1161
+ def copy(self) -> Self:
1162
+ values = self.sp_values.copy()
1163
+ return self._simple_new(values, self.sp_index, self.dtype)
1164
+
1165
+ @classmethod
1166
+ def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self:
1167
+ fill_value = to_concat[0].fill_value
1168
+
1169
+ values = []
1170
+ length = 0
1171
+
1172
+ if to_concat:
1173
+ sp_kind = to_concat[0].kind
1174
+ else:
1175
+ sp_kind = "integer"
1176
+
1177
+ sp_index: SparseIndex
1178
+ if sp_kind == "integer":
1179
+ indices = []
1180
+
1181
+ for arr in to_concat:
1182
+ int_idx = arr.sp_index.indices.copy()
1183
+ int_idx += length # TODO: wraparound
1184
+ length += arr.sp_index.length
1185
+
1186
+ values.append(arr.sp_values)
1187
+ indices.append(int_idx)
1188
+
1189
+ data = np.concatenate(values)
1190
+ indices_arr = np.concatenate(indices)
1191
+ # error: Argument 2 to "IntIndex" has incompatible type
1192
+ # "ndarray[Any, dtype[signedinteger[_32Bit]]]";
1193
+ # expected "Sequence[int]"
1194
+ sp_index = IntIndex(length, indices_arr) # type: ignore[arg-type]
1195
+
1196
+ else:
1197
+ # when concatenating block indices, we don't claim that you'll
1198
+ # get an identical index as concatenating the values and then
1199
+ # creating a new index. We don't want to spend the time trying
1200
+ # to merge blocks across arrays in `to_concat`, so the resulting
1201
+ # BlockIndex may have more blocks.
1202
+ blengths = []
1203
+ blocs = []
1204
+
1205
+ for arr in to_concat:
1206
+ block_idx = arr.sp_index.to_block_index()
1207
+
1208
+ values.append(arr.sp_values)
1209
+ blocs.append(block_idx.blocs.copy() + length)
1210
+ blengths.append(block_idx.blengths)
1211
+ length += arr.sp_index.length
1212
+
1213
+ data = np.concatenate(values)
1214
+ blocs_arr = np.concatenate(blocs)
1215
+ blengths_arr = np.concatenate(blengths)
1216
+
1217
+ sp_index = BlockIndex(length, blocs_arr, blengths_arr)
1218
+
1219
+ return cls(data, sparse_index=sp_index, fill_value=fill_value)
1220
+
1221
+ def astype(self, dtype: AstypeArg | None = None, copy: bool = True):
1222
+ """
1223
+ Change the dtype of a SparseArray.
1224
+
1225
+ The output will always be a SparseArray. To convert to a dense
1226
+ ndarray with a certain dtype, use :meth:`numpy.asarray`.
1227
+
1228
+ Parameters
1229
+ ----------
1230
+ dtype : np.dtype or ExtensionDtype
1231
+ For SparseDtype, this changes the dtype of
1232
+ ``self.sp_values`` and the ``self.fill_value``.
1233
+
1234
+ For other dtypes, this only changes the dtype of
1235
+ ``self.sp_values``.
1236
+
1237
+ copy : bool, default True
1238
+ Whether to ensure a copy is made, even if not necessary.
1239
+
1240
+ Returns
1241
+ -------
1242
+ SparseArray
1243
+
1244
+ Examples
1245
+ --------
1246
+ >>> arr = pd.arrays.SparseArray([0, 0, 1, 2])
1247
+ >>> arr
1248
+ [0, 0, 1, 2]
1249
+ Fill: 0
1250
+ IntIndex
1251
+ Indices: array([2, 3], dtype=int32)
1252
+
1253
+ >>> arr.astype(SparseDtype(np.dtype('int32')))
1254
+ [0, 0, 1, 2]
1255
+ Fill: 0
1256
+ IntIndex
1257
+ Indices: array([2, 3], dtype=int32)
1258
+
1259
+ Using a NumPy dtype with a different kind (e.g. float) will coerce
1260
+ just ``self.sp_values``.
1261
+
1262
+ >>> arr.astype(SparseDtype(np.dtype('float64')))
1263
+ ... # doctest: +NORMALIZE_WHITESPACE
1264
+ [nan, nan, 1.0, 2.0]
1265
+ Fill: nan
1266
+ IntIndex
1267
+ Indices: array([2, 3], dtype=int32)
1268
+
1269
+ Using a SparseDtype, you can also change the fill value as well.
1270
+
1271
+ >>> arr.astype(SparseDtype("float64", fill_value=0.0))
1272
+ ... # doctest: +NORMALIZE_WHITESPACE
1273
+ [0.0, 0.0, 1.0, 2.0]
1274
+ Fill: 0.0
1275
+ IntIndex
1276
+ Indices: array([2, 3], dtype=int32)
1277
+ """
1278
+ if dtype == self._dtype:
1279
+ if not copy:
1280
+ return self
1281
+ else:
1282
+ return self.copy()
1283
+
1284
+ future_dtype = pandas_dtype(dtype)
1285
+ if not isinstance(future_dtype, SparseDtype):
1286
+ # GH#34457
1287
+ values = np.asarray(self)
1288
+ values = ensure_wrapped_if_datetimelike(values)
1289
+ return astype_array(values, dtype=future_dtype, copy=False)
1290
+
1291
+ dtype = self.dtype.update_dtype(dtype)
1292
+ subtype = pandas_dtype(dtype._subtype_with_str)
1293
+ subtype = cast(np.dtype, subtype) # ensured by update_dtype
1294
+ values = ensure_wrapped_if_datetimelike(self.sp_values)
1295
+ sp_values = astype_array(values, subtype, copy=copy)
1296
+ sp_values = np.asarray(sp_values)
1297
+
1298
+ return self._simple_new(sp_values, self.sp_index, dtype)
1299
+
1300
+ def map(self, mapper, na_action=None) -> Self:
1301
+ """
1302
+ Map categories using an input mapping or function.
1303
+
1304
+ Parameters
1305
+ ----------
1306
+ mapper : dict, Series, callable
1307
+ The correspondence from old values to new.
1308
+ na_action : {None, 'ignore'}, default None
1309
+ If 'ignore', propagate NA values, without passing them to the
1310
+ mapping correspondence.
1311
+
1312
+ Returns
1313
+ -------
1314
+ SparseArray
1315
+ The output array will have the same density as the input.
1316
+ The output fill value will be the result of applying the
1317
+ mapping to ``self.fill_value``
1318
+
1319
+ Examples
1320
+ --------
1321
+ >>> arr = pd.arrays.SparseArray([0, 1, 2])
1322
+ >>> arr.map(lambda x: x + 10)
1323
+ [10, 11, 12]
1324
+ Fill: 10
1325
+ IntIndex
1326
+ Indices: array([1, 2], dtype=int32)
1327
+
1328
+ >>> arr.map({0: 10, 1: 11, 2: 12})
1329
+ [10, 11, 12]
1330
+ Fill: 10
1331
+ IntIndex
1332
+ Indices: array([1, 2], dtype=int32)
1333
+
1334
+ >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))
1335
+ [10, 11, 12]
1336
+ Fill: 10
1337
+ IntIndex
1338
+ Indices: array([1, 2], dtype=int32)
1339
+ """
1340
+ is_map = isinstance(mapper, (abc.Mapping, ABCSeries))
1341
+
1342
+ fill_val = self.fill_value
1343
+
1344
+ if na_action is None or notna(fill_val):
1345
+ fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val)
1346
+
1347
+ def func(sp_val):
1348
+ new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val)
1349
+ # check identity and equality because nans are not equal to each other
1350
+ if new_sp_val is fill_val or new_sp_val == fill_val:
1351
+ msg = "fill value in the sparse values not supported"
1352
+ raise ValueError(msg)
1353
+ return new_sp_val
1354
+
1355
+ sp_values = [func(x) for x in self.sp_values]
1356
+
1357
+ return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val)
1358
+
1359
+ def to_dense(self) -> np.ndarray:
1360
+ """
1361
+ Convert SparseArray to a NumPy array.
1362
+
1363
+ Returns
1364
+ -------
1365
+ arr : NumPy array
1366
+ """
1367
+ return np.asarray(self, dtype=self.sp_values.dtype)
1368
+
1369
+ def _where(self, mask, value):
1370
+ # NB: may not preserve dtype, e.g. result may be Sparse[float64]
1371
+ # while self is Sparse[int64]
1372
+ naive_implementation = np.where(mask, self, value)
1373
+ dtype = SparseDtype(naive_implementation.dtype, fill_value=self.fill_value)
1374
+ result = type(self)._from_sequence(naive_implementation, dtype=dtype)
1375
+ return result
1376
+
1377
+ # ------------------------------------------------------------------------
1378
+ # IO
1379
+ # ------------------------------------------------------------------------
1380
+ def __setstate__(self, state) -> None:
1381
+ """Necessary for making this object picklable"""
1382
+ if isinstance(state, tuple):
1383
+ # Compat for pandas < 0.24.0
1384
+ nd_state, (fill_value, sp_index) = state
1385
+ sparse_values = np.array([])
1386
+ sparse_values.__setstate__(nd_state)
1387
+
1388
+ self._sparse_values = sparse_values
1389
+ self._sparse_index = sp_index
1390
+ self._dtype = SparseDtype(sparse_values.dtype, fill_value)
1391
+ else:
1392
+ self.__dict__.update(state)
1393
+
1394
+ def nonzero(self) -> tuple[npt.NDArray[np.int32]]:
1395
+ if self.fill_value == 0:
1396
+ return (self.sp_index.indices,)
1397
+ else:
1398
+ return (self.sp_index.indices[self.sp_values != 0],)
1399
+
1400
+ # ------------------------------------------------------------------------
1401
+ # Reductions
1402
+ # ------------------------------------------------------------------------
1403
+
1404
+ def _reduce(
1405
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
1406
+ ):
1407
+ method = getattr(self, name, None)
1408
+
1409
+ if method is None:
1410
+ raise TypeError(f"cannot perform {name} with type {self.dtype}")
1411
+
1412
+ if skipna:
1413
+ arr = self
1414
+ else:
1415
+ arr = self.dropna()
1416
+
1417
+ result = getattr(arr, name)(**kwargs)
1418
+
1419
+ if keepdims:
1420
+ return type(self)([result], dtype=self.dtype)
1421
+ else:
1422
+ return result
1423
+
1424
+ def all(self, axis=None, *args, **kwargs):
1425
+ """
1426
+ Tests whether all elements evaluate True
1427
+
1428
+ Returns
1429
+ -------
1430
+ all : bool
1431
+
1432
+ See Also
1433
+ --------
1434
+ numpy.all
1435
+ """
1436
+ nv.validate_all(args, kwargs)
1437
+
1438
+ values = self.sp_values
1439
+
1440
+ if len(values) != len(self) and not np.all(self.fill_value):
1441
+ return False
1442
+
1443
+ return values.all()
1444
+
1445
+ def any(self, axis: AxisInt = 0, *args, **kwargs) -> bool:
1446
+ """
1447
+ Tests whether at least one of elements evaluate True
1448
+
1449
+ Returns
1450
+ -------
1451
+ any : bool
1452
+
1453
+ See Also
1454
+ --------
1455
+ numpy.any
1456
+ """
1457
+ nv.validate_any(args, kwargs)
1458
+
1459
+ values = self.sp_values
1460
+
1461
+ if len(values) != len(self) and np.any(self.fill_value):
1462
+ return True
1463
+
1464
+ return values.any().item()
1465
+
1466
+ def sum(
1467
+ self,
1468
+ axis: AxisInt = 0,
1469
+ min_count: int = 0,
1470
+ skipna: bool = True,
1471
+ *args,
1472
+ **kwargs,
1473
+ ) -> Scalar:
1474
+ """
1475
+ Sum of non-NA/null values
1476
+
1477
+ Parameters
1478
+ ----------
1479
+ axis : int, default 0
1480
+ Not Used. NumPy compatibility.
1481
+ min_count : int, default 0
1482
+ The required number of valid values to perform the summation. If fewer
1483
+ than ``min_count`` valid values are present, the result will be the missing
1484
+ value indicator for subarray type.
1485
+ *args, **kwargs
1486
+ Not Used. NumPy compatibility.
1487
+
1488
+ Returns
1489
+ -------
1490
+ scalar
1491
+ """
1492
+ nv.validate_sum(args, kwargs)
1493
+ valid_vals = self._valid_sp_values
1494
+ sp_sum = valid_vals.sum()
1495
+ has_na = self.sp_index.ngaps > 0 and not self._null_fill_value
1496
+
1497
+ if has_na and not skipna:
1498
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1499
+
1500
+ if self._null_fill_value:
1501
+ if check_below_min_count(valid_vals.shape, None, min_count):
1502
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1503
+ return sp_sum
1504
+ else:
1505
+ nsparse = self.sp_index.ngaps
1506
+ if check_below_min_count(valid_vals.shape, None, min_count - nsparse):
1507
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1508
+ return sp_sum + self.fill_value * nsparse
1509
+
1510
+ def cumsum(self, axis: AxisInt = 0, *args, **kwargs) -> SparseArray:
1511
+ """
1512
+ Cumulative sum of non-NA/null values.
1513
+
1514
+ When performing the cumulative summation, any non-NA/null values will
1515
+ be skipped. The resulting SparseArray will preserve the locations of
1516
+ NaN values, but the fill value will be `np.nan` regardless.
1517
+
1518
+ Parameters
1519
+ ----------
1520
+ axis : int or None
1521
+ Axis over which to perform the cumulative summation. If None,
1522
+ perform cumulative summation over flattened array.
1523
+
1524
+ Returns
1525
+ -------
1526
+ cumsum : SparseArray
1527
+ """
1528
+ nv.validate_cumsum(args, kwargs)
1529
+
1530
+ if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
1531
+ raise ValueError(f"axis(={axis}) out of bounds")
1532
+
1533
+ if not self._null_fill_value:
1534
+ return SparseArray(self.to_dense()).cumsum()
1535
+
1536
+ return SparseArray(
1537
+ self.sp_values.cumsum(),
1538
+ sparse_index=self.sp_index,
1539
+ fill_value=self.fill_value,
1540
+ )
1541
+
1542
+ def mean(self, axis: Axis = 0, *args, **kwargs):
1543
+ """
1544
+ Mean of non-NA/null values
1545
+
1546
+ Returns
1547
+ -------
1548
+ mean : float
1549
+ """
1550
+ nv.validate_mean(args, kwargs)
1551
+ valid_vals = self._valid_sp_values
1552
+ sp_sum = valid_vals.sum()
1553
+ ct = len(valid_vals)
1554
+
1555
+ if self._null_fill_value:
1556
+ return sp_sum / ct
1557
+ else:
1558
+ nsparse = self.sp_index.ngaps
1559
+ return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
1560
+
1561
+ def max(self, *, axis: AxisInt | None = None, skipna: bool = True):
1562
+ """
1563
+ Max of array values, ignoring NA values if specified.
1564
+
1565
+ Parameters
1566
+ ----------
1567
+ axis : int, default 0
1568
+ Not Used. NumPy compatibility.
1569
+ skipna : bool, default True
1570
+ Whether to ignore NA values.
1571
+
1572
+ Returns
1573
+ -------
1574
+ scalar
1575
+ """
1576
+ nv.validate_minmax_axis(axis, self.ndim)
1577
+ return self._min_max("max", skipna=skipna)
1578
+
1579
+ def min(self, *, axis: AxisInt | None = None, skipna: bool = True):
1580
+ """
1581
+ Min of array values, ignoring NA values if specified.
1582
+
1583
+ Parameters
1584
+ ----------
1585
+ axis : int, default 0
1586
+ Not Used. NumPy compatibility.
1587
+ skipna : bool, default True
1588
+ Whether to ignore NA values.
1589
+
1590
+ Returns
1591
+ -------
1592
+ scalar
1593
+ """
1594
+ nv.validate_minmax_axis(axis, self.ndim)
1595
+ return self._min_max("min", skipna=skipna)
1596
+
1597
+ def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar:
1598
+ """
1599
+ Min/max of non-NA/null values
1600
+
1601
+ Parameters
1602
+ ----------
1603
+ kind : {"min", "max"}
1604
+ skipna : bool
1605
+
1606
+ Returns
1607
+ -------
1608
+ scalar
1609
+ """
1610
+ valid_vals = self._valid_sp_values
1611
+ has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0
1612
+
1613
+ if len(valid_vals) > 0:
1614
+ sp_min_max = getattr(valid_vals, kind)()
1615
+
1616
+ # If a non-null fill value is currently present, it might be the min/max
1617
+ if has_nonnull_fill_vals:
1618
+ func = max if kind == "max" else min
1619
+ return func(sp_min_max, self.fill_value)
1620
+ elif skipna:
1621
+ return sp_min_max
1622
+ elif self.sp_index.ngaps == 0:
1623
+ # No NAs present
1624
+ return sp_min_max
1625
+ else:
1626
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1627
+ elif has_nonnull_fill_vals:
1628
+ return self.fill_value
1629
+ else:
1630
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1631
+
1632
+ def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int:
1633
+ values = self._sparse_values
1634
+ index = self._sparse_index.indices
1635
+ mask = np.asarray(isna(values))
1636
+ func = np.argmax if kind == "argmax" else np.argmin
1637
+
1638
+ idx = np.arange(values.shape[0])
1639
+ non_nans = values[~mask]
1640
+ non_nan_idx = idx[~mask]
1641
+
1642
+ _candidate = non_nan_idx[func(non_nans)]
1643
+ candidate = index[_candidate]
1644
+
1645
+ if isna(self.fill_value):
1646
+ return candidate
1647
+ if kind == "argmin" and self[candidate] < self.fill_value:
1648
+ return candidate
1649
+ if kind == "argmax" and self[candidate] > self.fill_value:
1650
+ return candidate
1651
+ _loc = self._first_fill_value_loc()
1652
+ if _loc == -1:
1653
+ # fill_value doesn't exist
1654
+ return candidate
1655
+ else:
1656
+ return _loc
1657
+
1658
+ def argmax(self, skipna: bool = True) -> int:
1659
+ validate_bool_kwarg(skipna, "skipna")
1660
+ if not skipna and self._hasna:
1661
+ raise NotImplementedError
1662
+ return self._argmin_argmax("argmax")
1663
+
1664
+ def argmin(self, skipna: bool = True) -> int:
1665
+ validate_bool_kwarg(skipna, "skipna")
1666
+ if not skipna and self._hasna:
1667
+ raise NotImplementedError
1668
+ return self._argmin_argmax("argmin")
1669
+
1670
+ # ------------------------------------------------------------------------
1671
+ # Ufuncs
1672
+ # ------------------------------------------------------------------------
1673
+
1674
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
1675
+
1676
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
1677
+ out = kwargs.get("out", ())
1678
+
1679
+ for x in inputs + out:
1680
+ if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
1681
+ return NotImplemented
1682
+
1683
+ # for binary ops, use our custom dunder methods
1684
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
1685
+ self, ufunc, method, *inputs, **kwargs
1686
+ )
1687
+ if result is not NotImplemented:
1688
+ return result
1689
+
1690
+ if "out" in kwargs:
1691
+ # e.g. tests.arrays.sparse.test_arithmetics.test_ndarray_inplace
1692
+ res = arraylike.dispatch_ufunc_with_out(
1693
+ self, ufunc, method, *inputs, **kwargs
1694
+ )
1695
+ return res
1696
+
1697
+ if method == "reduce":
1698
+ result = arraylike.dispatch_reduction_ufunc(
1699
+ self, ufunc, method, *inputs, **kwargs
1700
+ )
1701
+ if result is not NotImplemented:
1702
+ # e.g. tests.series.test_ufunc.TestNumpyReductions
1703
+ return result
1704
+
1705
+ if len(inputs) == 1:
1706
+ # No alignment necessary.
1707
+ sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)
1708
+ fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)
1709
+
1710
+ if ufunc.nout > 1:
1711
+ # multiple outputs. e.g. modf
1712
+ arrays = tuple(
1713
+ self._simple_new(
1714
+ sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)
1715
+ )
1716
+ for sp_value, fv in zip(sp_values, fill_value)
1717
+ )
1718
+ return arrays
1719
+ elif method == "reduce":
1720
+ # e.g. reductions
1721
+ return sp_values
1722
+
1723
+ return self._simple_new(
1724
+ sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)
1725
+ )
1726
+
1727
+ new_inputs = tuple(np.asarray(x) for x in inputs)
1728
+ result = getattr(ufunc, method)(*new_inputs, **kwargs)
1729
+ if out:
1730
+ if len(out) == 1:
1731
+ out = out[0]
1732
+ return out
1733
+
1734
+ if ufunc.nout > 1:
1735
+ return tuple(type(self)(x) for x in result)
1736
+ elif method == "at":
1737
+ # no return value
1738
+ return None
1739
+ else:
1740
+ return type(self)(result)
1741
+
1742
+ # ------------------------------------------------------------------------
1743
+ # Ops
1744
+ # ------------------------------------------------------------------------
1745
+
1746
+ def _arith_method(self, other, op):
1747
+ op_name = op.__name__
1748
+
1749
+ if isinstance(other, SparseArray):
1750
+ return _sparse_array_op(self, other, op, op_name)
1751
+
1752
+ elif is_scalar(other):
1753
+ with np.errstate(all="ignore"):
1754
+ fill = op(_get_fill(self), np.asarray(other))
1755
+ result = op(self.sp_values, other)
1756
+
1757
+ if op_name == "divmod":
1758
+ left, right = result
1759
+ lfill, rfill = fill
1760
+ return (
1761
+ _wrap_result(op_name, left, self.sp_index, lfill),
1762
+ _wrap_result(op_name, right, self.sp_index, rfill),
1763
+ )
1764
+
1765
+ return _wrap_result(op_name, result, self.sp_index, fill)
1766
+
1767
+ else:
1768
+ other = np.asarray(other)
1769
+ with np.errstate(all="ignore"):
1770
+ if len(self) != len(other):
1771
+ raise AssertionError(
1772
+ f"length mismatch: {len(self)} vs. {len(other)}"
1773
+ )
1774
+ if not isinstance(other, SparseArray):
1775
+ dtype = getattr(other, "dtype", None)
1776
+ other = SparseArray(other, fill_value=self.fill_value, dtype=dtype)
1777
+ return _sparse_array_op(self, other, op, op_name)
1778
+
1779
+ def _cmp_method(self, other, op) -> SparseArray:
1780
+ if not is_scalar(other) and not isinstance(other, type(self)):
1781
+ # convert list-like to ndarray
1782
+ other = np.asarray(other)
1783
+
1784
+ if isinstance(other, np.ndarray):
1785
+ # TODO: make this more flexible than just ndarray...
1786
+ other = SparseArray(other, fill_value=self.fill_value)
1787
+
1788
+ if isinstance(other, SparseArray):
1789
+ if len(self) != len(other):
1790
+ raise ValueError(
1791
+ f"operands have mismatched length {len(self)} and {len(other)}"
1792
+ )
1793
+
1794
+ op_name = op.__name__.strip("_")
1795
+ return _sparse_array_op(self, other, op, op_name)
1796
+ else:
1797
+ # scalar
1798
+ fill_value = op(self.fill_value, other)
1799
+ result = np.full(len(self), fill_value, dtype=np.bool_)
1800
+ result[self.sp_index.indices] = op(self.sp_values, other)
1801
+
1802
+ return type(self)(
1803
+ result,
1804
+ fill_value=fill_value,
1805
+ dtype=np.bool_,
1806
+ )
1807
+
1808
+ _logical_method = _cmp_method
1809
+
1810
+ def _unary_method(self, op) -> SparseArray:
1811
+ fill_value = op(np.array(self.fill_value)).item()
1812
+ dtype = SparseDtype(self.dtype.subtype, fill_value)
1813
+ # NOTE: if fill_value doesn't change
1814
+ # we just have to apply op to sp_values
1815
+ if isna(self.fill_value) or fill_value == self.fill_value:
1816
+ values = op(self.sp_values)
1817
+ return type(self)._simple_new(values, self.sp_index, self.dtype)
1818
+ # In the other case we have to recalc indexes
1819
+ return type(self)(op(self.to_dense()), dtype=dtype)
1820
+
1821
+ def __pos__(self) -> SparseArray:
1822
+ return self._unary_method(operator.pos)
1823
+
1824
+ def __neg__(self) -> SparseArray:
1825
+ return self._unary_method(operator.neg)
1826
+
1827
+ def __invert__(self) -> SparseArray:
1828
+ return self._unary_method(operator.invert)
1829
+
1830
+ def __abs__(self) -> SparseArray:
1831
+ return self._unary_method(operator.abs)
1832
+
1833
+ # ----------
1834
+ # Formatting
1835
+ # -----------
1836
+ def __repr__(self) -> str:
1837
+ pp_str = printing.pprint_thing(self)
1838
+ pp_fill = printing.pprint_thing(self.fill_value)
1839
+ pp_index = printing.pprint_thing(self.sp_index)
1840
+ return f"{pp_str}\nFill: {pp_fill}\n{pp_index}"
1841
+
1842
+ def _formatter(self, boxed: bool = False):
1843
+ # Defer to the formatter from the GenericArrayFormatter calling us.
1844
+ # This will infer the correct formatter from the dtype of the values.
1845
+ return None
1846
+
1847
+
1848
+ def _make_sparse(
1849
+ arr: np.ndarray,
1850
+ kind: SparseIndexKind = "block",
1851
+ fill_value=None,
1852
+ dtype: np.dtype | None = None,
1853
+ ):
1854
+ """
1855
+ Convert ndarray to sparse format
1856
+
1857
+ Parameters
1858
+ ----------
1859
+ arr : ndarray
1860
+ kind : {'block', 'integer'}
1861
+ fill_value : NaN or another value
1862
+ dtype : np.dtype, optional
1863
+ copy : bool, default False
1864
+
1865
+ Returns
1866
+ -------
1867
+ (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
1868
+ """
1869
+ assert isinstance(arr, np.ndarray)
1870
+
1871
+ if arr.ndim > 1:
1872
+ raise TypeError("expected dimension <= 1 data")
1873
+
1874
+ if fill_value is None:
1875
+ fill_value = na_value_for_dtype(arr.dtype)
1876
+
1877
+ if isna(fill_value):
1878
+ mask = notna(arr)
1879
+ else:
1880
+ # cast to object comparison to be safe
1881
+ if is_string_dtype(arr.dtype):
1882
+ arr = arr.astype(object)
1883
+
1884
+ if is_object_dtype(arr.dtype):
1885
+ # element-wise equality check method in numpy doesn't treat
1886
+ # each element type, eg. 0, 0.0, and False are treated as
1887
+ # same. So we have to check the both of its type and value.
1888
+ mask = splib.make_mask_object_ndarray(arr, fill_value)
1889
+ else:
1890
+ mask = arr != fill_value
1891
+
1892
+ length = len(arr)
1893
+ if length != len(mask):
1894
+ # the arr is a SparseArray
1895
+ indices = mask.sp_index.indices
1896
+ else:
1897
+ indices = mask.nonzero()[0].astype(np.int32)
1898
+
1899
+ index = make_sparse_index(length, indices, kind)
1900
+ sparsified_values = arr[mask]
1901
+ if dtype is not None:
1902
+ sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values)
1903
+ sparsified_values = astype_array(sparsified_values, dtype=dtype)
1904
+ sparsified_values = np.asarray(sparsified_values)
1905
+
1906
+ # TODO: copy
1907
+ return sparsified_values, index, fill_value
1908
+
1909
+
1910
+ @overload
1911
+ def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex:
1912
+ ...
1913
+
1914
+
1915
+ @overload
1916
+ def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex:
1917
+ ...
1918
+
1919
+
1920
+ def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex:
1921
+ index: SparseIndex
1922
+ if kind == "block":
1923
+ locs, lens = splib.get_blocks(indices)
1924
+ index = BlockIndex(length, locs, lens)
1925
+ elif kind == "integer":
1926
+ index = IntIndex(length, indices)
1927
+ else: # pragma: no cover
1928
+ raise ValueError("must be block or integer type")
1929
+ return index
videollama2/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Interaction with scipy.sparse matrices.
3
+
4
+ Currently only includes to_coo helpers.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from typing import TYPE_CHECKING
9
+
10
+ from pandas._libs import lib
11
+
12
+ from pandas.core.dtypes.missing import notna
13
+
14
+ from pandas.core.algorithms import factorize
15
+ from pandas.core.indexes.api import MultiIndex
16
+ from pandas.core.series import Series
17
+
18
+ if TYPE_CHECKING:
19
+ from collections.abc import Iterable
20
+
21
+ import numpy as np
22
+ import scipy.sparse
23
+
24
+ from pandas._typing import (
25
+ IndexLabel,
26
+ npt,
27
+ )
28
+
29
+
30
+ def _check_is_partition(parts: Iterable, whole: Iterable):
31
+ whole = set(whole)
32
+ parts = [set(x) for x in parts]
33
+ if set.intersection(*parts) != set():
34
+ raise ValueError("Is not a partition because intersection is not null.")
35
+ if set.union(*parts) != whole:
36
+ raise ValueError("Is not a partition because union is not the whole.")
37
+
38
+
39
+ def _levels_to_axis(
40
+ ss,
41
+ levels: tuple[int] | list[int],
42
+ valid_ilocs: npt.NDArray[np.intp],
43
+ sort_labels: bool = False,
44
+ ) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]:
45
+ """
46
+ For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`,
47
+ where `ax_coords` are the coordinates along one of the two axes of the
48
+ destination sparse matrix, and `ax_labels` are the labels from `ss`' Index
49
+ which correspond to these coordinates.
50
+
51
+ Parameters
52
+ ----------
53
+ ss : Series
54
+ levels : tuple/list
55
+ valid_ilocs : numpy.ndarray
56
+ Array of integer positions of valid values for the sparse matrix in ss.
57
+ sort_labels : bool, default False
58
+ Sort the axis labels before forming the sparse matrix. When `levels`
59
+ refers to a single level, set to True for a faster execution.
60
+
61
+ Returns
62
+ -------
63
+ ax_coords : numpy.ndarray (axis coordinates)
64
+ ax_labels : list (axis labels)
65
+ """
66
+ # Since the labels are sorted in `Index.levels`, when we wish to sort and
67
+ # there is only one level of the MultiIndex for this axis, the desired
68
+ # output can be obtained in the following simpler, more efficient way.
69
+ if sort_labels and len(levels) == 1:
70
+ ax_coords = ss.index.codes[levels[0]][valid_ilocs]
71
+ ax_labels = ss.index.levels[levels[0]]
72
+
73
+ else:
74
+ levels_values = lib.fast_zip(
75
+ [ss.index.get_level_values(lvl).to_numpy() for lvl in levels]
76
+ )
77
+ codes, ax_labels = factorize(levels_values, sort=sort_labels)
78
+ ax_coords = codes[valid_ilocs]
79
+
80
+ ax_labels = ax_labels.tolist()
81
+ return ax_coords, ax_labels
82
+
83
+
84
+ def _to_ijv(
85
+ ss,
86
+ row_levels: tuple[int] | list[int] = (0,),
87
+ column_levels: tuple[int] | list[int] = (1,),
88
+ sort_labels: bool = False,
89
+ ) -> tuple[
90
+ np.ndarray,
91
+ npt.NDArray[np.intp],
92
+ npt.NDArray[np.intp],
93
+ list[IndexLabel],
94
+ list[IndexLabel],
95
+ ]:
96
+ """
97
+ For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels,
98
+ jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo
99
+ constructor, and ilabels and jlabels are the row and column labels
100
+ respectively.
101
+
102
+ Parameters
103
+ ----------
104
+ ss : Series
105
+ row_levels : tuple/list
106
+ column_levels : tuple/list
107
+ sort_labels : bool, default False
108
+ Sort the row and column labels before forming the sparse matrix.
109
+ When `row_levels` and/or `column_levels` refer to a single level,
110
+ set to `True` for a faster execution.
111
+
112
+ Returns
113
+ -------
114
+ values : numpy.ndarray
115
+ Valid values to populate a sparse matrix, extracted from
116
+ ss.
117
+ i_coords : numpy.ndarray (row coordinates of the values)
118
+ j_coords : numpy.ndarray (column coordinates of the values)
119
+ i_labels : list (row labels)
120
+ j_labels : list (column labels)
121
+ """
122
+ # index and column levels must be a partition of the index
123
+ _check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
124
+ # From the sparse Series, get the integer indices and data for valid sparse
125
+ # entries.
126
+ sp_vals = ss.array.sp_values
127
+ na_mask = notna(sp_vals)
128
+ values = sp_vals[na_mask]
129
+ valid_ilocs = ss.array.sp_index.indices[na_mask]
130
+
131
+ i_coords, i_labels = _levels_to_axis(
132
+ ss, row_levels, valid_ilocs, sort_labels=sort_labels
133
+ )
134
+
135
+ j_coords, j_labels = _levels_to_axis(
136
+ ss, column_levels, valid_ilocs, sort_labels=sort_labels
137
+ )
138
+
139
+ return values, i_coords, j_coords, i_labels, j_labels
140
+
141
+
142
+ def sparse_series_to_coo(
143
+ ss: Series,
144
+ row_levels: Iterable[int] = (0,),
145
+ column_levels: Iterable[int] = (1,),
146
+ sort_labels: bool = False,
147
+ ) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]:
148
+ """
149
+ Convert a sparse Series to a scipy.sparse.coo_matrix using index
150
+ levels row_levels, column_levels as the row and column
151
+ labels respectively. Returns the sparse_matrix, row and column labels.
152
+ """
153
+ import scipy.sparse
154
+
155
+ if ss.index.nlevels < 2:
156
+ raise ValueError("to_coo requires MultiIndex with nlevels >= 2.")
157
+ if not ss.index.is_unique:
158
+ raise ValueError(
159
+ "Duplicate index entries are not allowed in to_coo transformation."
160
+ )
161
+
162
+ # to keep things simple, only rely on integer indexing (not labels)
163
+ row_levels = [ss.index._get_level_number(x) for x in row_levels]
164
+ column_levels = [ss.index._get_level_number(x) for x in column_levels]
165
+
166
+ v, i, j, rows, columns = _to_ijv(
167
+ ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels
168
+ )
169
+ sparse_matrix = scipy.sparse.coo_matrix(
170
+ (v, (i, j)), shape=(len(rows), len(columns))
171
+ )
172
+ return sparse_matrix, rows, columns
173
+
174
+
175
+ def coo_to_sparse_series(
176
+ A: scipy.sparse.coo_matrix, dense_index: bool = False
177
+ ) -> Series:
178
+ """
179
+ Convert a scipy.sparse.coo_matrix to a Series with type sparse.
180
+
181
+ Parameters
182
+ ----------
183
+ A : scipy.sparse.coo_matrix
184
+ dense_index : bool, default False
185
+
186
+ Returns
187
+ -------
188
+ Series
189
+
190
+ Raises
191
+ ------
192
+ TypeError if A is not a coo_matrix
193
+ """
194
+ from pandas import SparseDtype
195
+
196
+ try:
197
+ ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False)
198
+ except AttributeError as err:
199
+ raise TypeError(
200
+ f"Expected coo_matrix. Got {type(A).__name__} instead."
201
+ ) from err
202
+ ser = ser.sort_index()
203
+ ser = ser.astype(SparseDtype(ser.dtype))
204
+ if dense_index:
205
+ ind = MultiIndex.from_product([A.row, A.col])
206
+ ser = ser.reindex(ind)
207
+ return ser