ZTWHHH commited on
Commit
030455c
·
verified ·
1 Parent(s): 47e431d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/share/terminfo/w/wy350-vb +0 -0
  2. parrot/share/terminfo/w/wy520-24 +0 -0
  3. parrot/share/terminfo/w/wy520-36 +0 -0
  4. parrot/share/terminfo/w/wy520-36wpc +0 -0
  5. parrot/share/terminfo/w/wy60-25 +0 -0
  6. parrot/share/terminfo/w/wy60-vb +0 -0
  7. parrot/share/terminfo/w/wy75-vb +0 -0
  8. parrot/share/terminfo/w/wy85-w +0 -0
  9. parrot/share/terminfo/w/wyse50-w +0 -0
  10. parrot/share/terminfo/w/wyse520-36pc +0 -0
  11. parrot/share/terminfo/w/wyse60-43 +0 -0
  12. parrot/share/terminfo/w/wyse60-vb +0 -0
  13. parrot/share/terminfo/w/wyse75-vb +0 -0
  14. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc +0 -0
  15. videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc +0 -0
  16. videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py +0 -0
  17. videollama2/lib/python3.10/site-packages/pandas/core/dtypes/astype.py +301 -0
  18. videollama2/lib/python3.10/site-packages/pandas/core/dtypes/cast.py +1973 -0
  19. videollama2/lib/python3.10/site-packages/pandas/core/dtypes/missing.py +810 -0
  20. videollama2/lib/python3.10/site-packages/pandas/core/groupby/__init__.py +15 -0
  21. videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc +0 -0
  22. videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc +0 -0
  23. videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc +0 -0
  24. videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc +0 -0
  25. videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc +0 -0
  26. videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc +0 -0
  27. videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc +0 -0
  28. videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc +0 -0
  29. videollama2/lib/python3.10/site-packages/pandas/core/groupby/base.py +121 -0
  30. videollama2/lib/python3.10/site-packages/pandas/core/groupby/categorical.py +87 -0
  31. videollama2/lib/python3.10/site-packages/pandas/core/groupby/generic.py +2852 -0
  32. videollama2/lib/python3.10/site-packages/pandas/core/groupby/groupby.py +0 -0
  33. videollama2/lib/python3.10/site-packages/pandas/core/groupby/grouper.py +1102 -0
  34. videollama2/lib/python3.10/site-packages/pandas/core/groupby/indexing.py +304 -0
  35. videollama2/lib/python3.10/site-packages/pandas/core/groupby/numba_.py +181 -0
  36. videollama2/lib/python3.10/site-packages/pandas/core/groupby/ops.py +1208 -0
  37. videollama2/lib/python3.10/site-packages/pandas/core/indexers/__init__.py +31 -0
  38. videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc +0 -0
  39. videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc +0 -0
  40. videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc +0 -0
  41. videollama2/lib/python3.10/site-packages/pandas/core/indexers/objects.py +453 -0
  42. videollama2/lib/python3.10/site-packages/pandas/core/indexers/utils.py +553 -0
  43. vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/read_stencil.cpython-310.pyc +0 -0
  44. vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/sRGB_formats.cpython-310.pyc +0 -0
  45. vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/blend_equation_advanced.py +23 -0
  46. vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/coverage_sample.py +23 -0
  47. vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/draw_buffers.py +26 -0
  48. vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/explicit_attrib_location.py +23 -0
  49. vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/framebuffer_multisample.py +23 -0
  50. vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/generate_mipmap_sRGB.py +23 -0
parrot/share/terminfo/w/wy350-vb ADDED
Binary file (1.49 kB). View file
 
parrot/share/terminfo/w/wy520-24 ADDED
Binary file (1.69 kB). View file
 
parrot/share/terminfo/w/wy520-36 ADDED
Binary file (1.69 kB). View file
 
parrot/share/terminfo/w/wy520-36wpc ADDED
Binary file (1.75 kB). View file
 
parrot/share/terminfo/w/wy60-25 ADDED
Binary file (1.57 kB). View file
 
parrot/share/terminfo/w/wy60-vb ADDED
Binary file (1.58 kB). View file
 
parrot/share/terminfo/w/wy75-vb ADDED
Binary file (1.68 kB). View file
 
parrot/share/terminfo/w/wy85-w ADDED
Binary file (1.68 kB). View file
 
parrot/share/terminfo/w/wyse50-w ADDED
Binary file (1.19 kB). View file
 
parrot/share/terminfo/w/wyse520-36pc ADDED
Binary file (1.73 kB). View file
 
parrot/share/terminfo/w/wyse60-43 ADDED
Binary file (1.57 kB). View file
 
parrot/share/terminfo/w/wyse60-vb ADDED
Binary file (1.58 kB). View file
 
parrot/share/terminfo/w/wyse75-vb ADDED
Binary file (1.68 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py ADDED
File without changes
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/astype.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for implementing 'astype' methods according to pandas conventions,
3
+ particularly ones that differ from numpy.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import inspect
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ overload,
11
+ )
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from pandas._libs import lib
17
+ from pandas._libs.tslibs.timedeltas import array_to_timedelta64
18
+ from pandas.errors import IntCastingNaNError
19
+
20
+ from pandas.core.dtypes.common import (
21
+ is_object_dtype,
22
+ is_string_dtype,
23
+ pandas_dtype,
24
+ )
25
+ from pandas.core.dtypes.dtypes import (
26
+ ExtensionDtype,
27
+ NumpyEADtype,
28
+ )
29
+
30
+ if TYPE_CHECKING:
31
+ from pandas._typing import (
32
+ ArrayLike,
33
+ DtypeObj,
34
+ IgnoreRaise,
35
+ )
36
+
37
+ from pandas.core.arrays import ExtensionArray
38
+
39
+ _dtype_obj = np.dtype(object)
40
+
41
+
42
+ @overload
43
+ def _astype_nansafe(
44
+ arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...
45
+ ) -> np.ndarray:
46
+ ...
47
+
48
+
49
+ @overload
50
+ def _astype_nansafe(
51
+ arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...
52
+ ) -> ExtensionArray:
53
+ ...
54
+
55
+
56
+ def _astype_nansafe(
57
+ arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False
58
+ ) -> ArrayLike:
59
+ """
60
+ Cast the elements of an array to a given dtype a nan-safe manner.
61
+
62
+ Parameters
63
+ ----------
64
+ arr : ndarray
65
+ dtype : np.dtype or ExtensionDtype
66
+ copy : bool, default True
67
+ If False, a view will be attempted but may fail, if
68
+ e.g. the item sizes don't align.
69
+ skipna: bool, default False
70
+ Whether or not we should skip NaN when casting as a string-type.
71
+
72
+ Raises
73
+ ------
74
+ ValueError
75
+ The dtype was a datetime64/timedelta64 dtype, but it had no unit.
76
+ """
77
+
78
+ # dispatch on extension dtype if needed
79
+ if isinstance(dtype, ExtensionDtype):
80
+ return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
81
+
82
+ elif not isinstance(dtype, np.dtype): # pragma: no cover
83
+ raise ValueError("dtype must be np.dtype or ExtensionDtype")
84
+
85
+ if arr.dtype.kind in "mM":
86
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
87
+
88
+ arr = ensure_wrapped_if_datetimelike(arr)
89
+ res = arr.astype(dtype, copy=copy)
90
+ return np.asarray(res)
91
+
92
+ if issubclass(dtype.type, str):
93
+ shape = arr.shape
94
+ if arr.ndim > 1:
95
+ arr = arr.ravel()
96
+ return lib.ensure_string_array(
97
+ arr, skipna=skipna, convert_na_value=False
98
+ ).reshape(shape)
99
+
100
+ elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in "iu":
101
+ return _astype_float_to_int_nansafe(arr, dtype, copy)
102
+
103
+ elif arr.dtype == object:
104
+ # if we have a datetime/timedelta array of objects
105
+ # then coerce to datetime64[ns] and use DatetimeArray.astype
106
+
107
+ if lib.is_np_dtype(dtype, "M"):
108
+ from pandas.core.arrays import DatetimeArray
109
+
110
+ dta = DatetimeArray._from_sequence(arr, dtype=dtype)
111
+ return dta._ndarray
112
+
113
+ elif lib.is_np_dtype(dtype, "m"):
114
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
115
+
116
+ # bc we know arr.dtype == object, this is equivalent to
117
+ # `np.asarray(to_timedelta(arr))`, but using a lower-level API that
118
+ # does not require a circular import.
119
+ tdvals = array_to_timedelta64(arr).view("m8[ns]")
120
+
121
+ tda = ensure_wrapped_if_datetimelike(tdvals)
122
+ return tda.astype(dtype, copy=False)._ndarray
123
+
124
+ if dtype.name in ("datetime64", "timedelta64"):
125
+ msg = (
126
+ f"The '{dtype.name}' dtype has no unit. Please pass in "
127
+ f"'{dtype.name}[ns]' instead."
128
+ )
129
+ raise ValueError(msg)
130
+
131
+ if copy or arr.dtype == object or dtype == object:
132
+ # Explicit copy, or required since NumPy can't view from / to object.
133
+ return arr.astype(dtype, copy=True)
134
+
135
+ return arr.astype(dtype, copy=copy)
136
+
137
+
138
+ def _astype_float_to_int_nansafe(
139
+ values: np.ndarray, dtype: np.dtype, copy: bool
140
+ ) -> np.ndarray:
141
+ """
142
+ astype with a check preventing converting NaN to an meaningless integer value.
143
+ """
144
+ if not np.isfinite(values).all():
145
+ raise IntCastingNaNError(
146
+ "Cannot convert non-finite values (NA or inf) to integer"
147
+ )
148
+ if dtype.kind == "u":
149
+ # GH#45151
150
+ if not (values >= 0).all():
151
+ raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}")
152
+ with warnings.catch_warnings():
153
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
154
+ return values.astype(dtype, copy=copy)
155
+
156
+
157
+ def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike:
158
+ """
159
+ Cast array (ndarray or ExtensionArray) to the new dtype.
160
+
161
+ Parameters
162
+ ----------
163
+ values : ndarray or ExtensionArray
164
+ dtype : dtype object
165
+ copy : bool, default False
166
+ copy if indicated
167
+
168
+ Returns
169
+ -------
170
+ ndarray or ExtensionArray
171
+ """
172
+ if values.dtype == dtype:
173
+ if copy:
174
+ return values.copy()
175
+ return values
176
+
177
+ if not isinstance(values, np.ndarray):
178
+ # i.e. ExtensionArray
179
+ values = values.astype(dtype, copy=copy)
180
+
181
+ else:
182
+ values = _astype_nansafe(values, dtype, copy=copy)
183
+
184
+ # in pandas we don't store numpy str dtypes, so convert to object
185
+ if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str):
186
+ values = np.array(values, dtype=object)
187
+
188
+ return values
189
+
190
+
191
+ def astype_array_safe(
192
+ values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise"
193
+ ) -> ArrayLike:
194
+ """
195
+ Cast array (ndarray or ExtensionArray) to the new dtype.
196
+
197
+ This basically is the implementation for DataFrame/Series.astype and
198
+ includes all custom logic for pandas (NaN-safety, converting str to object,
199
+ not allowing )
200
+
201
+ Parameters
202
+ ----------
203
+ values : ndarray or ExtensionArray
204
+ dtype : str, dtype convertible
205
+ copy : bool, default False
206
+ copy if indicated
207
+ errors : str, {'raise', 'ignore'}, default 'raise'
208
+ - ``raise`` : allow exceptions to be raised
209
+ - ``ignore`` : suppress exceptions. On error return original object
210
+
211
+ Returns
212
+ -------
213
+ ndarray or ExtensionArray
214
+ """
215
+ errors_legal_values = ("raise", "ignore")
216
+
217
+ if errors not in errors_legal_values:
218
+ invalid_arg = (
219
+ "Expected value of kwarg 'errors' to be one of "
220
+ f"{list(errors_legal_values)}. Supplied value is '{errors}'"
221
+ )
222
+ raise ValueError(invalid_arg)
223
+
224
+ if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):
225
+ msg = (
226
+ f"Expected an instance of {dtype.__name__}, "
227
+ "but got the class instead. Try instantiating 'dtype'."
228
+ )
229
+ raise TypeError(msg)
230
+
231
+ dtype = pandas_dtype(dtype)
232
+ if isinstance(dtype, NumpyEADtype):
233
+ # Ensure we don't end up with a NumpyExtensionArray
234
+ dtype = dtype.numpy_dtype
235
+
236
+ try:
237
+ new_values = astype_array(values, dtype, copy=copy)
238
+ except (ValueError, TypeError):
239
+ # e.g. _astype_nansafe can fail on object-dtype of strings
240
+ # trying to convert to float
241
+ if errors == "ignore":
242
+ new_values = values
243
+ else:
244
+ raise
245
+
246
+ return new_values
247
+
248
+
249
+ def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool:
250
+ """Checks if astype avoided copying the data.
251
+
252
+ Parameters
253
+ ----------
254
+ dtype : Original dtype
255
+ new_dtype : target dtype
256
+
257
+ Returns
258
+ -------
259
+ True if new data is a view or not guaranteed to be a copy, False otherwise
260
+ """
261
+ if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype):
262
+ new_dtype, dtype = dtype, new_dtype
263
+
264
+ if dtype == new_dtype:
265
+ return True
266
+
267
+ elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype):
268
+ # Only equal numpy dtypes avoid a copy
269
+ return False
270
+
271
+ elif is_string_dtype(dtype) and is_string_dtype(new_dtype):
272
+ # Potentially! a view when converting from object to string
273
+ return True
274
+
275
+ elif is_object_dtype(dtype) and new_dtype.kind == "O":
276
+ # When the underlying array has dtype object, we don't have to make a copy
277
+ return True
278
+
279
+ elif dtype.kind in "mM" and new_dtype.kind in "mM":
280
+ dtype = getattr(dtype, "numpy_dtype", dtype)
281
+ new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype)
282
+ return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None)
283
+
284
+ numpy_dtype = getattr(dtype, "numpy_dtype", None)
285
+ new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None)
286
+
287
+ if numpy_dtype is None and isinstance(dtype, np.dtype):
288
+ numpy_dtype = dtype
289
+
290
+ if new_numpy_dtype is None and isinstance(new_dtype, np.dtype):
291
+ new_numpy_dtype = new_dtype
292
+
293
+ if numpy_dtype is not None and new_numpy_dtype is not None:
294
+ # if both have NumPy dtype or one of them is a numpy dtype
295
+ # they are only a view when the numpy dtypes are equal, e.g.
296
+ # int64 -> Int64 or int64[pyarrow]
297
+ # int64 -> Int32 copies
298
+ return numpy_dtype == new_numpy_dtype
299
+
300
+ # Assume this is a view since we don't know for sure if a copy was made
301
+ return True
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/cast.py ADDED
@@ -0,0 +1,1973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Routines for casting.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import datetime as dt
8
+ import functools
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Literal,
13
+ TypeVar,
14
+ cast,
15
+ overload,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+
21
+ from pandas._config import using_pyarrow_string_dtype
22
+
23
+ from pandas._libs import (
24
+ Interval,
25
+ Period,
26
+ lib,
27
+ )
28
+ from pandas._libs.missing import (
29
+ NA,
30
+ NAType,
31
+ checknull,
32
+ )
33
+ from pandas._libs.tslibs import (
34
+ NaT,
35
+ OutOfBoundsDatetime,
36
+ OutOfBoundsTimedelta,
37
+ Timedelta,
38
+ Timestamp,
39
+ is_supported_dtype,
40
+ )
41
+ from pandas._libs.tslibs.timedeltas import array_to_timedelta64
42
+ from pandas.compat.numpy import np_version_gt2
43
+ from pandas.errors import (
44
+ IntCastingNaNError,
45
+ LossySetitemError,
46
+ )
47
+
48
+ from pandas.core.dtypes.common import (
49
+ ensure_int8,
50
+ ensure_int16,
51
+ ensure_int32,
52
+ ensure_int64,
53
+ ensure_object,
54
+ ensure_str,
55
+ is_bool,
56
+ is_complex,
57
+ is_float,
58
+ is_integer,
59
+ is_object_dtype,
60
+ is_scalar,
61
+ is_string_dtype,
62
+ pandas_dtype as pandas_dtype_func,
63
+ )
64
+ from pandas.core.dtypes.dtypes import (
65
+ ArrowDtype,
66
+ BaseMaskedDtype,
67
+ CategoricalDtype,
68
+ DatetimeTZDtype,
69
+ ExtensionDtype,
70
+ IntervalDtype,
71
+ PandasExtensionDtype,
72
+ PeriodDtype,
73
+ )
74
+ from pandas.core.dtypes.generic import (
75
+ ABCExtensionArray,
76
+ ABCIndex,
77
+ ABCSeries,
78
+ )
79
+ from pandas.core.dtypes.inference import is_list_like
80
+ from pandas.core.dtypes.missing import (
81
+ is_valid_na_for_dtype,
82
+ isna,
83
+ na_value_for_dtype,
84
+ notna,
85
+ )
86
+
87
+ from pandas.io._util import _arrow_dtype_mapping
88
+
89
+ if TYPE_CHECKING:
90
+ from collections.abc import (
91
+ Sequence,
92
+ Sized,
93
+ )
94
+
95
+ from pandas._typing import (
96
+ ArrayLike,
97
+ Dtype,
98
+ DtypeObj,
99
+ NumpyIndexT,
100
+ Scalar,
101
+ npt,
102
+ )
103
+
104
+ from pandas import Index
105
+ from pandas.core.arrays import (
106
+ Categorical,
107
+ DatetimeArray,
108
+ ExtensionArray,
109
+ IntervalArray,
110
+ PeriodArray,
111
+ TimedeltaArray,
112
+ )
113
+
114
+
115
+ _int8_max = np.iinfo(np.int8).max
116
+ _int16_max = np.iinfo(np.int16).max
117
+ _int32_max = np.iinfo(np.int32).max
118
+
119
+ _dtype_obj = np.dtype(object)
120
+
121
+ NumpyArrayT = TypeVar("NumpyArrayT", bound=np.ndarray)
122
+
123
+
124
+ def maybe_convert_platform(
125
+ values: list | tuple | range | np.ndarray | ExtensionArray,
126
+ ) -> ArrayLike:
127
+ """try to do platform conversion, allow ndarray or list here"""
128
+ arr: ArrayLike
129
+
130
+ if isinstance(values, (list, tuple, range)):
131
+ arr = construct_1d_object_array_from_listlike(values)
132
+ else:
133
+ # The caller is responsible for ensuring that we have np.ndarray
134
+ # or ExtensionArray here.
135
+ arr = values
136
+
137
+ if arr.dtype == _dtype_obj:
138
+ arr = cast(np.ndarray, arr)
139
+ arr = lib.maybe_convert_objects(arr)
140
+
141
+ return arr
142
+
143
+
144
+ def is_nested_object(obj) -> bool:
145
+ """
146
+ return a boolean if we have a nested object, e.g. a Series with 1 or
147
+ more Series elements
148
+
149
+ This may not be necessarily be performant.
150
+
151
+ """
152
+ return bool(
153
+ isinstance(obj, ABCSeries)
154
+ and is_object_dtype(obj.dtype)
155
+ and any(isinstance(v, ABCSeries) for v in obj._values)
156
+ )
157
+
158
+
159
+ def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar:
160
+ """
161
+ Cast scalar to Timestamp or Timedelta if scalar is datetime-like
162
+ and dtype is not object.
163
+
164
+ Parameters
165
+ ----------
166
+ value : scalar
167
+ dtype : Dtype, optional
168
+
169
+ Returns
170
+ -------
171
+ scalar
172
+ """
173
+ if dtype == _dtype_obj:
174
+ pass
175
+ elif isinstance(value, (np.datetime64, dt.datetime)):
176
+ value = Timestamp(value)
177
+ elif isinstance(value, (np.timedelta64, dt.timedelta)):
178
+ value = Timedelta(value)
179
+
180
+ return value
181
+
182
+
183
+ def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType:
184
+ """
185
+ If passed a scalar cast the scalar to a python native type.
186
+
187
+ Parameters
188
+ ----------
189
+ value : scalar or Series
190
+
191
+ Returns
192
+ -------
193
+ scalar or Series
194
+ """
195
+ if is_float(value):
196
+ value = float(value)
197
+ elif is_integer(value):
198
+ value = int(value)
199
+ elif is_bool(value):
200
+ value = bool(value)
201
+ elif isinstance(value, (np.datetime64, np.timedelta64)):
202
+ value = maybe_box_datetimelike(value)
203
+ elif value is NA:
204
+ value = None
205
+ return value
206
+
207
+
208
+ def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:
209
+ """
210
+ Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting
211
+ into a numpy array. Failing to unbox would risk dropping nanoseconds.
212
+
213
+ Notes
214
+ -----
215
+ Caller is responsible for checking dtype.kind in "mM"
216
+ """
217
+ if is_valid_na_for_dtype(value, dtype):
218
+ # GH#36541: can't fill array directly with pd.NaT
219
+ # > np.empty(10, dtype="datetime64[ns]").fill(pd.NaT)
220
+ # ValueError: cannot convert float NaN to integer
221
+ value = dtype.type("NaT", "ns")
222
+ elif isinstance(value, Timestamp):
223
+ if value.tz is None:
224
+ value = value.to_datetime64()
225
+ elif not isinstance(dtype, DatetimeTZDtype):
226
+ raise TypeError("Cannot unbox tzaware Timestamp to tznaive dtype")
227
+ elif isinstance(value, Timedelta):
228
+ value = value.to_timedelta64()
229
+
230
+ _disallow_mismatched_datetimelike(value, dtype)
231
+ return value
232
+
233
+
234
+ def _disallow_mismatched_datetimelike(value, dtype: DtypeObj):
235
+ """
236
+ numpy allows np.array(dt64values, dtype="timedelta64[ns]") and
237
+ vice-versa, but we do not want to allow this, so we need to
238
+ check explicitly
239
+ """
240
+ vdtype = getattr(value, "dtype", None)
241
+ if vdtype is None:
242
+ return
243
+ elif (vdtype.kind == "m" and dtype.kind == "M") or (
244
+ vdtype.kind == "M" and dtype.kind == "m"
245
+ ):
246
+ raise TypeError(f"Cannot cast {repr(value)} to {dtype}")
247
+
248
+
249
+ @overload
250
+ def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray:
251
+ ...
252
+
253
+
254
+ @overload
255
+ def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike:
256
+ ...
257
+
258
+
259
+ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike:
260
+ """
261
+ try to cast to the specified dtype (e.g. convert back to bool/int
262
+ or could be an astype of float64->float32
263
+ """
264
+ if isinstance(result, ABCSeries):
265
+ result = result._values
266
+ do_round = False
267
+
268
+ if isinstance(dtype, str):
269
+ if dtype == "infer":
270
+ inferred_type = lib.infer_dtype(result, skipna=False)
271
+ if inferred_type == "boolean":
272
+ dtype = "bool"
273
+ elif inferred_type == "integer":
274
+ dtype = "int64"
275
+ elif inferred_type == "datetime64":
276
+ dtype = "datetime64[ns]"
277
+ elif inferred_type in ["timedelta", "timedelta64"]:
278
+ dtype = "timedelta64[ns]"
279
+
280
+ # try to upcast here
281
+ elif inferred_type == "floating":
282
+ dtype = "int64"
283
+ if issubclass(result.dtype.type, np.number):
284
+ do_round = True
285
+
286
+ else:
287
+ # TODO: complex? what if result is already non-object?
288
+ dtype = "object"
289
+
290
+ dtype = np.dtype(dtype)
291
+
292
+ if not isinstance(dtype, np.dtype):
293
+ # enforce our signature annotation
294
+ raise TypeError(dtype) # pragma: no cover
295
+
296
+ converted = maybe_downcast_numeric(result, dtype, do_round)
297
+ if converted is not result:
298
+ return converted
299
+
300
+ # a datetimelike
301
+ # GH12821, iNaT is cast to float
302
+ if dtype.kind in "mM" and result.dtype.kind in "if":
303
+ result = result.astype(dtype)
304
+
305
+ elif dtype.kind == "m" and result.dtype == _dtype_obj:
306
+ # test_where_downcast_to_td64
307
+ result = cast(np.ndarray, result)
308
+ result = array_to_timedelta64(result)
309
+
310
+ elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj:
311
+ result = cast(np.ndarray, result)
312
+ return np.asarray(maybe_cast_to_datetime(result, dtype=dtype))
313
+
314
+ return result
315
+
316
+
317
+ @overload
318
+ def maybe_downcast_numeric(
319
+ result: np.ndarray, dtype: np.dtype, do_round: bool = False
320
+ ) -> np.ndarray:
321
+ ...
322
+
323
+
324
+ @overload
325
+ def maybe_downcast_numeric(
326
+ result: ExtensionArray, dtype: DtypeObj, do_round: bool = False
327
+ ) -> ArrayLike:
328
+ ...
329
+
330
+
331
+ def maybe_downcast_numeric(
332
+ result: ArrayLike, dtype: DtypeObj, do_round: bool = False
333
+ ) -> ArrayLike:
334
+ """
335
+ Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
336
+
337
+ Parameters
338
+ ----------
339
+ result : ndarray or ExtensionArray
340
+ dtype : np.dtype or ExtensionDtype
341
+ do_round : bool
342
+
343
+ Returns
344
+ -------
345
+ ndarray or ExtensionArray
346
+ """
347
+ if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype):
348
+ # e.g. SparseDtype has no itemsize attr
349
+ return result
350
+
351
+ def trans(x):
352
+ if do_round:
353
+ return x.round()
354
+ return x
355
+
356
+ if dtype.kind == result.dtype.kind:
357
+ # don't allow upcasts here (except if empty)
358
+ if result.dtype.itemsize <= dtype.itemsize and result.size:
359
+ return result
360
+
361
+ if dtype.kind in "biu":
362
+ if not result.size:
363
+ # if we don't have any elements, just astype it
364
+ return trans(result).astype(dtype)
365
+
366
+ if isinstance(result, np.ndarray):
367
+ element = result.item(0)
368
+ else:
369
+ element = result.iloc[0]
370
+ if not isinstance(element, (np.integer, np.floating, int, float, bool)):
371
+ # a comparable, e.g. a Decimal may slip in here
372
+ return result
373
+
374
+ if (
375
+ issubclass(result.dtype.type, (np.object_, np.number))
376
+ and notna(result).all()
377
+ ):
378
+ new_result = trans(result).astype(dtype)
379
+ if new_result.dtype.kind == "O" or result.dtype.kind == "O":
380
+ # np.allclose may raise TypeError on object-dtype
381
+ if (new_result == result).all():
382
+ return new_result
383
+ else:
384
+ if np.allclose(new_result, result, rtol=0):
385
+ return new_result
386
+
387
+ elif (
388
+ issubclass(dtype.type, np.floating)
389
+ and result.dtype.kind != "b"
390
+ and not is_string_dtype(result.dtype)
391
+ ):
392
+ with warnings.catch_warnings():
393
+ warnings.filterwarnings(
394
+ "ignore", "overflow encountered in cast", RuntimeWarning
395
+ )
396
+ new_result = result.astype(dtype)
397
+
398
+ # Adjust tolerances based on floating point size
399
+ size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16}
400
+
401
+ atol = size_tols.get(new_result.dtype.itemsize, 0.0)
402
+
403
+ # Check downcast float values are still equal within 7 digits when
404
+ # converting from float64 to float32
405
+ if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol):
406
+ return new_result
407
+
408
+ elif dtype.kind == result.dtype.kind == "c":
409
+ new_result = result.astype(dtype)
410
+
411
+ if np.array_equal(new_result, result, equal_nan=True):
412
+ # TODO: use tolerance like we do for float?
413
+ return new_result
414
+
415
+ return result
416
+
417
+
418
+ def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT:
419
+ """
420
+ If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit.
421
+
422
+ Parameters
423
+ ----------
424
+ arr : ndarray or ExtensionArray
425
+
426
+ Returns
427
+ -------
428
+ ndarray or ExtensionArray
429
+ """
430
+ dtype = arr.dtype
431
+ if dtype.kind == "i" and dtype != np.int64:
432
+ return arr.astype(np.int64)
433
+ elif dtype.kind == "u" and dtype != np.uint64:
434
+ return arr.astype(np.uint64)
435
+ elif dtype.kind == "f" and dtype != np.float64:
436
+ return arr.astype(np.float64)
437
+ else:
438
+ return arr
439
+
440
+
441
+ def maybe_cast_pointwise_result(
442
+ result: ArrayLike,
443
+ dtype: DtypeObj,
444
+ numeric_only: bool = False,
445
+ same_dtype: bool = True,
446
+ ) -> ArrayLike:
447
+ """
448
+ Try casting result of a pointwise operation back to the original dtype if
449
+ appropriate.
450
+
451
+ Parameters
452
+ ----------
453
+ result : array-like
454
+ Result to cast.
455
+ dtype : np.dtype or ExtensionDtype
456
+ Input Series from which result was calculated.
457
+ numeric_only : bool, default False
458
+ Whether to cast only numerics or datetimes as well.
459
+ same_dtype : bool, default True
460
+ Specify dtype when calling _from_sequence
461
+
462
+ Returns
463
+ -------
464
+ result : array-like
465
+ result maybe casted to the dtype.
466
+ """
467
+
468
+ if isinstance(dtype, ExtensionDtype):
469
+ cls = dtype.construct_array_type()
470
+ if same_dtype:
471
+ result = _maybe_cast_to_extension_array(cls, result, dtype=dtype)
472
+ else:
473
+ result = _maybe_cast_to_extension_array(cls, result)
474
+
475
+ elif (numeric_only and dtype.kind in "iufcb") or not numeric_only:
476
+ result = maybe_downcast_to_dtype(result, dtype)
477
+
478
+ return result
479
+
480
+
481
+ def _maybe_cast_to_extension_array(
482
+ cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None
483
+ ) -> ArrayLike:
484
+ """
485
+ Call to `_from_sequence` that returns the object unchanged on Exception.
486
+
487
+ Parameters
488
+ ----------
489
+ cls : class, subclass of ExtensionArray
490
+ obj : arraylike
491
+ Values to pass to cls._from_sequence
492
+ dtype : ExtensionDtype, optional
493
+
494
+ Returns
495
+ -------
496
+ ExtensionArray or obj
497
+ """
498
+ result: ArrayLike
499
+
500
+ if dtype is not None:
501
+ try:
502
+ result = cls._from_scalars(obj, dtype=dtype)
503
+ except (TypeError, ValueError):
504
+ return obj
505
+ return result
506
+
507
+ try:
508
+ result = cls._from_sequence(obj, dtype=dtype)
509
+ except Exception:
510
+ # We can't predict what downstream EA constructors may raise
511
+ result = obj
512
+ return result
513
+
514
+
515
+ @overload
516
+ def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype:
517
+ ...
518
+
519
+
520
+ @overload
521
+ def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype:
522
+ ...
523
+
524
+
525
+ def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:
526
+ """
527
+ If we have a dtype that cannot hold NA values, find the best match that can.
528
+ """
529
+ if isinstance(dtype, ExtensionDtype):
530
+ if dtype._can_hold_na:
531
+ return dtype
532
+ elif isinstance(dtype, IntervalDtype):
533
+ # TODO(GH#45349): don't special-case IntervalDtype, allow
534
+ # overriding instead of returning object below.
535
+ return IntervalDtype(np.float64, closed=dtype.closed)
536
+ return _dtype_obj
537
+ elif dtype.kind == "b":
538
+ return _dtype_obj
539
+ elif dtype.kind in "iu":
540
+ return np.dtype(np.float64)
541
+ return dtype
542
+
543
+
544
+ _canonical_nans = {
545
+ np.datetime64: np.datetime64("NaT", "ns"),
546
+ np.timedelta64: np.timedelta64("NaT", "ns"),
547
+ type(np.nan): np.nan,
548
+ }
549
+
550
+
551
+ def maybe_promote(dtype: np.dtype, fill_value=np.nan):
552
+ """
553
+ Find the minimal dtype that can hold both the given dtype and fill_value.
554
+
555
+ Parameters
556
+ ----------
557
+ dtype : np.dtype
558
+ fill_value : scalar, default np.nan
559
+
560
+ Returns
561
+ -------
562
+ dtype
563
+ Upcasted from dtype argument if necessary.
564
+ fill_value
565
+ Upcasted from fill_value argument if necessary.
566
+
567
+ Raises
568
+ ------
569
+ ValueError
570
+ If fill_value is a non-scalar and dtype is not object.
571
+ """
572
+ orig = fill_value
573
+ orig_is_nat = False
574
+ if checknull(fill_value):
575
+ # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740
576
+ # avoid cache misses with NaN/NaT values that are not singletons
577
+ if fill_value is not NA:
578
+ try:
579
+ orig_is_nat = np.isnat(fill_value)
580
+ except TypeError:
581
+ pass
582
+
583
+ fill_value = _canonical_nans.get(type(fill_value), fill_value)
584
+
585
+ # for performance, we are using a cached version of the actual implementation
586
+ # of the function in _maybe_promote. However, this doesn't always work (in case
587
+ # of non-hashable arguments), so we fallback to the actual implementation if needed
588
+ try:
589
+ # error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type
590
+ # "Type[Any]"; expected "Hashable" [arg-type]
591
+ dtype, fill_value = _maybe_promote_cached(
592
+ dtype, fill_value, type(fill_value) # type: ignore[arg-type]
593
+ )
594
+ except TypeError:
595
+ # if fill_value is not hashable (required for caching)
596
+ dtype, fill_value = _maybe_promote(dtype, fill_value)
597
+
598
+ if (dtype == _dtype_obj and orig is not None) or (
599
+ orig_is_nat and np.datetime_data(orig)[0] != "ns"
600
+ ):
601
+ # GH#51592,53497 restore our potentially non-canonical fill_value
602
+ fill_value = orig
603
+ return dtype, fill_value
604
+
605
+
606
+ @functools.lru_cache
607
+ def _maybe_promote_cached(dtype, fill_value, fill_value_type):
608
+ # The cached version of _maybe_promote below
609
+ # This also use fill_value_type as (unused) argument to use this in the
610
+ # cache lookup -> to differentiate 1 and True
611
+ return _maybe_promote(dtype, fill_value)
612
+
613
+
614
+ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
615
+ # The actual implementation of the function, use `maybe_promote` above for
616
+ # a cached version.
617
+ if not is_scalar(fill_value):
618
+ # with object dtype there is nothing to promote, and the user can
619
+ # pass pretty much any weird fill_value they like
620
+ if dtype != object:
621
+ # with object dtype there is nothing to promote, and the user can
622
+ # pass pretty much any weird fill_value they like
623
+ raise ValueError("fill_value must be a scalar")
624
+ dtype = _dtype_obj
625
+ return dtype, fill_value
626
+
627
+ if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in "iufcmM":
628
+ dtype = ensure_dtype_can_hold_na(dtype)
629
+ fv = na_value_for_dtype(dtype)
630
+ return dtype, fv
631
+
632
+ elif isinstance(dtype, CategoricalDtype):
633
+ if fill_value in dtype.categories or isna(fill_value):
634
+ return dtype, fill_value
635
+ else:
636
+ return object, ensure_object(fill_value)
637
+
638
+ elif isna(fill_value):
639
+ dtype = _dtype_obj
640
+ if fill_value is None:
641
+ # but we retain e.g. pd.NA
642
+ fill_value = np.nan
643
+ return dtype, fill_value
644
+
645
+ # returns tuple of (dtype, fill_value)
646
+ if issubclass(dtype.type, np.datetime64):
647
+ inferred, fv = infer_dtype_from_scalar(fill_value)
648
+ if inferred == dtype:
649
+ return dtype, fv
650
+
651
+ from pandas.core.arrays import DatetimeArray
652
+
653
+ dta = DatetimeArray._from_sequence([], dtype="M8[ns]")
654
+ try:
655
+ fv = dta._validate_setitem_value(fill_value)
656
+ return dta.dtype, fv
657
+ except (ValueError, TypeError):
658
+ return _dtype_obj, fill_value
659
+
660
+ elif issubclass(dtype.type, np.timedelta64):
661
+ inferred, fv = infer_dtype_from_scalar(fill_value)
662
+ if inferred == dtype:
663
+ return dtype, fv
664
+
665
+ elif inferred.kind == "m":
666
+ # different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns]
667
+ # see if we can losslessly cast it to our dtype
668
+ unit = np.datetime_data(dtype)[0]
669
+ try:
670
+ td = Timedelta(fill_value).as_unit(unit, round_ok=False)
671
+ except OutOfBoundsTimedelta:
672
+ return _dtype_obj, fill_value
673
+ else:
674
+ return dtype, td.asm8
675
+
676
+ return _dtype_obj, fill_value
677
+
678
+ elif is_float(fill_value):
679
+ if issubclass(dtype.type, np.bool_):
680
+ dtype = np.dtype(np.object_)
681
+
682
+ elif issubclass(dtype.type, np.integer):
683
+ dtype = np.dtype(np.float64)
684
+
685
+ elif dtype.kind == "f":
686
+ mst = np.min_scalar_type(fill_value)
687
+ if mst > dtype:
688
+ # e.g. mst is np.float64 and dtype is np.float32
689
+ dtype = mst
690
+
691
+ elif dtype.kind == "c":
692
+ mst = np.min_scalar_type(fill_value)
693
+ dtype = np.promote_types(dtype, mst)
694
+
695
+ elif is_bool(fill_value):
696
+ if not issubclass(dtype.type, np.bool_):
697
+ dtype = np.dtype(np.object_)
698
+
699
+ elif is_integer(fill_value):
700
+ if issubclass(dtype.type, np.bool_):
701
+ dtype = np.dtype(np.object_)
702
+
703
+ elif issubclass(dtype.type, np.integer):
704
+ if not np_can_cast_scalar(fill_value, dtype): # type: ignore[arg-type]
705
+ # upcast to prevent overflow
706
+ mst = np.min_scalar_type(fill_value)
707
+ dtype = np.promote_types(dtype, mst)
708
+ if dtype.kind == "f":
709
+ # Case where we disagree with numpy
710
+ dtype = np.dtype(np.object_)
711
+
712
+ elif is_complex(fill_value):
713
+ if issubclass(dtype.type, np.bool_):
714
+ dtype = np.dtype(np.object_)
715
+
716
+ elif issubclass(dtype.type, (np.integer, np.floating)):
717
+ mst = np.min_scalar_type(fill_value)
718
+ dtype = np.promote_types(dtype, mst)
719
+
720
+ elif dtype.kind == "c":
721
+ mst = np.min_scalar_type(fill_value)
722
+ if mst > dtype:
723
+ # e.g. mst is np.complex128 and dtype is np.complex64
724
+ dtype = mst
725
+
726
+ else:
727
+ dtype = np.dtype(np.object_)
728
+
729
+ # in case we have a string that looked like a number
730
+ if issubclass(dtype.type, (bytes, str)):
731
+ dtype = np.dtype(np.object_)
732
+
733
+ fill_value = _ensure_dtype_type(fill_value, dtype)
734
+ return dtype, fill_value
735
+
736
+
737
+ def _ensure_dtype_type(value, dtype: np.dtype):
738
+ """
739
+ Ensure that the given value is an instance of the given dtype.
740
+
741
+ e.g. if out dtype is np.complex64_, we should have an instance of that
742
+ as opposed to a python complex object.
743
+
744
+ Parameters
745
+ ----------
746
+ value : object
747
+ dtype : np.dtype
748
+
749
+ Returns
750
+ -------
751
+ object
752
+ """
753
+ # Start with exceptions in which we do _not_ cast to numpy types
754
+
755
+ if dtype == _dtype_obj:
756
+ return value
757
+
758
+ # Note: before we get here we have already excluded isna(value)
759
+ return dtype.type(value)
760
+
761
+
762
+ def infer_dtype_from(val) -> tuple[DtypeObj, Any]:
763
+ """
764
+ Interpret the dtype from a scalar or array.
765
+
766
+ Parameters
767
+ ----------
768
+ val : object
769
+ """
770
+ if not is_list_like(val):
771
+ return infer_dtype_from_scalar(val)
772
+ return infer_dtype_from_array(val)
773
+
774
+
775
+ def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]:
776
+ """
777
+ Interpret the dtype from a scalar.
778
+
779
+ Parameters
780
+ ----------
781
+ val : object
782
+ """
783
+ dtype: DtypeObj = _dtype_obj
784
+
785
+ # a 1-element ndarray
786
+ if isinstance(val, np.ndarray):
787
+ if val.ndim != 0:
788
+ msg = "invalid ndarray passed to infer_dtype_from_scalar"
789
+ raise ValueError(msg)
790
+
791
+ dtype = val.dtype
792
+ val = lib.item_from_zerodim(val)
793
+
794
+ elif isinstance(val, str):
795
+ # If we create an empty array using a string to infer
796
+ # the dtype, NumPy will only allocate one character per entry
797
+ # so this is kind of bad. Alternately we could use np.repeat
798
+ # instead of np.empty (but then you still don't want things
799
+ # coming out as np.str_!
800
+
801
+ dtype = _dtype_obj
802
+ if using_pyarrow_string_dtype():
803
+ from pandas.core.arrays.string_ import StringDtype
804
+
805
+ dtype = StringDtype(storage="pyarrow_numpy")
806
+
807
+ elif isinstance(val, (np.datetime64, dt.datetime)):
808
+ try:
809
+ val = Timestamp(val)
810
+ except OutOfBoundsDatetime:
811
+ return _dtype_obj, val
812
+
813
+ if val is NaT or val.tz is None:
814
+ val = val.to_datetime64()
815
+ dtype = val.dtype
816
+ # TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes
817
+ else:
818
+ dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz)
819
+
820
+ elif isinstance(val, (np.timedelta64, dt.timedelta)):
821
+ try:
822
+ val = Timedelta(val)
823
+ except (OutOfBoundsTimedelta, OverflowError):
824
+ dtype = _dtype_obj
825
+ else:
826
+ if val is NaT:
827
+ val = np.timedelta64("NaT", "ns")
828
+ else:
829
+ val = val.asm8
830
+ dtype = val.dtype
831
+
832
+ elif is_bool(val):
833
+ dtype = np.dtype(np.bool_)
834
+
835
+ elif is_integer(val):
836
+ if isinstance(val, np.integer):
837
+ dtype = np.dtype(type(val))
838
+ else:
839
+ dtype = np.dtype(np.int64)
840
+
841
+ try:
842
+ np.array(val, dtype=dtype)
843
+ except OverflowError:
844
+ dtype = np.array(val).dtype
845
+
846
+ elif is_float(val):
847
+ if isinstance(val, np.floating):
848
+ dtype = np.dtype(type(val))
849
+ else:
850
+ dtype = np.dtype(np.float64)
851
+
852
+ elif is_complex(val):
853
+ dtype = np.dtype(np.complex128)
854
+
855
+ if isinstance(val, Period):
856
+ dtype = PeriodDtype(freq=val.freq)
857
+ elif isinstance(val, Interval):
858
+ subtype = infer_dtype_from_scalar(val.left)[0]
859
+ dtype = IntervalDtype(subtype=subtype, closed=val.closed)
860
+
861
+ return dtype, val
862
+
863
+
864
+ def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]:
865
+ """
866
+ Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
867
+
868
+ Parameters
869
+ ----------
870
+ d: dict-like object
871
+
872
+ Returns
873
+ -------
874
+ dict
875
+ """
876
+ return {maybe_box_datetimelike(key): value for key, value in d.items()}
877
+
878
+
879
+ def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]:
880
+ """
881
+ Infer the dtype from an array.
882
+
883
+ Parameters
884
+ ----------
885
+ arr : array
886
+
887
+ Returns
888
+ -------
889
+ tuple (pandas-compat dtype, array)
890
+
891
+
892
+ Examples
893
+ --------
894
+ >>> np.asarray([1, '1'])
895
+ array(['1', '1'], dtype='<U21')
896
+
897
+ >>> infer_dtype_from_array([1, '1'])
898
+ (dtype('O'), [1, '1'])
899
+ """
900
+ if isinstance(arr, np.ndarray):
901
+ return arr.dtype, arr
902
+
903
+ if not is_list_like(arr):
904
+ raise TypeError("'arr' must be list-like")
905
+
906
+ arr_dtype = getattr(arr, "dtype", None)
907
+ if isinstance(arr_dtype, ExtensionDtype):
908
+ return arr.dtype, arr
909
+
910
+ elif isinstance(arr, ABCSeries):
911
+ return arr.dtype, np.asarray(arr)
912
+
913
+ # don't force numpy coerce with nan's
914
+ inferred = lib.infer_dtype(arr, skipna=False)
915
+ if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
916
+ return (np.dtype(np.object_), arr)
917
+
918
+ arr = np.asarray(arr)
919
+ return arr.dtype, arr
920
+
921
+
922
+ def _maybe_infer_dtype_type(element):
923
+ """
924
+ Try to infer an object's dtype, for use in arithmetic ops.
925
+
926
+ Uses `element.dtype` if that's available.
927
+ Objects implementing the iterator protocol are cast to a NumPy array,
928
+ and from there the array's type is used.
929
+
930
+ Parameters
931
+ ----------
932
+ element : object
933
+ Possibly has a `.dtype` attribute, and possibly the iterator
934
+ protocol.
935
+
936
+ Returns
937
+ -------
938
+ tipo : type
939
+
940
+ Examples
941
+ --------
942
+ >>> from collections import namedtuple
943
+ >>> Foo = namedtuple("Foo", "dtype")
944
+ >>> _maybe_infer_dtype_type(Foo(np.dtype("i8")))
945
+ dtype('int64')
946
+ """
947
+ tipo = None
948
+ if hasattr(element, "dtype"):
949
+ tipo = element.dtype
950
+ elif is_list_like(element):
951
+ element = np.asarray(element)
952
+ tipo = element.dtype
953
+ return tipo
954
+
955
+
956
+ def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None:
957
+ """
958
+ Change string like dtypes to object for
959
+ ``DataFrame.select_dtypes()``.
960
+ """
961
+ # error: Argument 1 to <set> has incompatible type "Type[generic]"; expected
962
+ # "Union[dtype[Any], ExtensionDtype, None]"
963
+ # error: Argument 2 to <set> has incompatible type "Type[generic]"; expected
964
+ # "Union[dtype[Any], ExtensionDtype, None]"
965
+ non_string_dtypes = dtype_set - {
966
+ np.dtype("S").type, # type: ignore[arg-type]
967
+ np.dtype("<U").type, # type: ignore[arg-type]
968
+ }
969
+ if non_string_dtypes != dtype_set:
970
+ raise TypeError("string dtypes are not allowed, use 'object' instead")
971
+
972
+
973
+ def coerce_indexer_dtype(indexer, categories) -> np.ndarray:
974
+ """coerce the indexer input array to the smallest dtype possible"""
975
+ length = len(categories)
976
+ if length < _int8_max:
977
+ return ensure_int8(indexer)
978
+ elif length < _int16_max:
979
+ return ensure_int16(indexer)
980
+ elif length < _int32_max:
981
+ return ensure_int32(indexer)
982
+ return ensure_int64(indexer)
983
+
984
+
985
+ def convert_dtypes(
986
+ input_array: ArrayLike,
987
+ convert_string: bool = True,
988
+ convert_integer: bool = True,
989
+ convert_boolean: bool = True,
990
+ convert_floating: bool = True,
991
+ infer_objects: bool = False,
992
+ dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable",
993
+ ) -> DtypeObj:
994
+ """
995
+ Convert objects to best possible type, and optionally,
996
+ to types supporting ``pd.NA``.
997
+
998
+ Parameters
999
+ ----------
1000
+ input_array : ExtensionArray or np.ndarray
1001
+ convert_string : bool, default True
1002
+ Whether object dtypes should be converted to ``StringDtype()``.
1003
+ convert_integer : bool, default True
1004
+ Whether, if possible, conversion can be done to integer extension types.
1005
+ convert_boolean : bool, defaults True
1006
+ Whether object dtypes should be converted to ``BooleanDtypes()``.
1007
+ convert_floating : bool, defaults True
1008
+ Whether, if possible, conversion can be done to floating extension types.
1009
+ If `convert_integer` is also True, preference will be give to integer
1010
+ dtypes if the floats can be faithfully casted to integers.
1011
+ infer_objects : bool, defaults False
1012
+ Whether to also infer objects to float/int if possible. Is only hit if the
1013
+ object array contains pd.NA.
1014
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
1015
+ Back-end data type applied to the resultant :class:`DataFrame`
1016
+ (still experimental). Behaviour is as follows:
1017
+
1018
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
1019
+ (default).
1020
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
1021
+ DataFrame.
1022
+
1023
+ .. versionadded:: 2.0
1024
+
1025
+ Returns
1026
+ -------
1027
+ np.dtype, or ExtensionDtype
1028
+ """
1029
+ inferred_dtype: str | DtypeObj
1030
+
1031
+ if (
1032
+ convert_string or convert_integer or convert_boolean or convert_floating
1033
+ ) and isinstance(input_array, np.ndarray):
1034
+ if input_array.dtype == object:
1035
+ inferred_dtype = lib.infer_dtype(input_array)
1036
+ else:
1037
+ inferred_dtype = input_array.dtype
1038
+
1039
+ if is_string_dtype(inferred_dtype):
1040
+ if not convert_string or inferred_dtype == "bytes":
1041
+ inferred_dtype = input_array.dtype
1042
+ else:
1043
+ inferred_dtype = pandas_dtype_func("string")
1044
+
1045
+ if convert_integer:
1046
+ target_int_dtype = pandas_dtype_func("Int64")
1047
+
1048
+ if input_array.dtype.kind in "iu":
1049
+ from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
1050
+
1051
+ inferred_dtype = NUMPY_INT_TO_DTYPE.get(
1052
+ input_array.dtype, target_int_dtype
1053
+ )
1054
+ elif input_array.dtype.kind in "fcb":
1055
+ # TODO: de-dup with maybe_cast_to_integer_array?
1056
+ arr = input_array[notna(input_array)]
1057
+ if (arr.astype(int) == arr).all():
1058
+ inferred_dtype = target_int_dtype
1059
+ else:
1060
+ inferred_dtype = input_array.dtype
1061
+ elif (
1062
+ infer_objects
1063
+ and input_array.dtype == object
1064
+ and (isinstance(inferred_dtype, str) and inferred_dtype == "integer")
1065
+ ):
1066
+ inferred_dtype = target_int_dtype
1067
+
1068
+ if convert_floating:
1069
+ if input_array.dtype.kind in "fcb":
1070
+ # i.e. numeric but not integer
1071
+ from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE
1072
+
1073
+ inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get(
1074
+ input_array.dtype, pandas_dtype_func("Float64")
1075
+ )
1076
+ # if we could also convert to integer, check if all floats
1077
+ # are actually integers
1078
+ if convert_integer:
1079
+ # TODO: de-dup with maybe_cast_to_integer_array?
1080
+ arr = input_array[notna(input_array)]
1081
+ if (arr.astype(int) == arr).all():
1082
+ inferred_dtype = pandas_dtype_func("Int64")
1083
+ else:
1084
+ inferred_dtype = inferred_float_dtype
1085
+ else:
1086
+ inferred_dtype = inferred_float_dtype
1087
+ elif (
1088
+ infer_objects
1089
+ and input_array.dtype == object
1090
+ and (
1091
+ isinstance(inferred_dtype, str)
1092
+ and inferred_dtype == "mixed-integer-float"
1093
+ )
1094
+ ):
1095
+ inferred_dtype = pandas_dtype_func("Float64")
1096
+
1097
+ if convert_boolean:
1098
+ if input_array.dtype.kind == "b":
1099
+ inferred_dtype = pandas_dtype_func("boolean")
1100
+ elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
1101
+ inferred_dtype = pandas_dtype_func("boolean")
1102
+
1103
+ if isinstance(inferred_dtype, str):
1104
+ # If we couldn't do anything else, then we retain the dtype
1105
+ inferred_dtype = input_array.dtype
1106
+
1107
+ else:
1108
+ inferred_dtype = input_array.dtype
1109
+
1110
+ if dtype_backend == "pyarrow":
1111
+ from pandas.core.arrays.arrow.array import to_pyarrow_type
1112
+ from pandas.core.arrays.string_ import StringDtype
1113
+
1114
+ assert not isinstance(inferred_dtype, str)
1115
+
1116
+ if (
1117
+ (convert_integer and inferred_dtype.kind in "iu")
1118
+ or (convert_floating and inferred_dtype.kind in "fc")
1119
+ or (convert_boolean and inferred_dtype.kind == "b")
1120
+ or (convert_string and isinstance(inferred_dtype, StringDtype))
1121
+ or (
1122
+ inferred_dtype.kind not in "iufcb"
1123
+ and not isinstance(inferred_dtype, StringDtype)
1124
+ )
1125
+ ):
1126
+ if isinstance(inferred_dtype, PandasExtensionDtype) and not isinstance(
1127
+ inferred_dtype, DatetimeTZDtype
1128
+ ):
1129
+ base_dtype = inferred_dtype.base
1130
+ elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)):
1131
+ base_dtype = inferred_dtype.numpy_dtype
1132
+ elif isinstance(inferred_dtype, StringDtype):
1133
+ base_dtype = np.dtype(str)
1134
+ else:
1135
+ base_dtype = inferred_dtype
1136
+ if (
1137
+ base_dtype.kind == "O" # type: ignore[union-attr]
1138
+ and input_array.size > 0
1139
+ and isna(input_array).all()
1140
+ ):
1141
+ import pyarrow as pa
1142
+
1143
+ pa_type = pa.null()
1144
+ else:
1145
+ pa_type = to_pyarrow_type(base_dtype)
1146
+ if pa_type is not None:
1147
+ inferred_dtype = ArrowDtype(pa_type)
1148
+ elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype):
1149
+ # GH 53648
1150
+ inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype]
1151
+
1152
+ # error: Incompatible return value type (got "Union[str, Union[dtype[Any],
1153
+ # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]")
1154
+ return inferred_dtype # type: ignore[return-value]
1155
+
1156
+
1157
+ def maybe_infer_to_datetimelike(
1158
+ value: npt.NDArray[np.object_],
1159
+ ) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray:
1160
+ """
1161
+ we might have a array (or single object) that is datetime like,
1162
+ and no dtype is passed don't change the value unless we find a
1163
+ datetime/timedelta set
1164
+
1165
+ this is pretty strict in that a datetime/timedelta is REQUIRED
1166
+ in addition to possible nulls/string likes
1167
+
1168
+ Parameters
1169
+ ----------
1170
+ value : np.ndarray[object]
1171
+
1172
+ Returns
1173
+ -------
1174
+ np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray
1175
+
1176
+ """
1177
+ if not isinstance(value, np.ndarray) or value.dtype != object:
1178
+ # Caller is responsible for passing only ndarray[object]
1179
+ raise TypeError(type(value)) # pragma: no cover
1180
+ if value.ndim != 1:
1181
+ # Caller is responsible
1182
+ raise ValueError(value.ndim) # pragma: no cover
1183
+
1184
+ if not len(value):
1185
+ return value
1186
+
1187
+ # error: Incompatible return value type (got "Union[ExtensionArray,
1188
+ # ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray,
1189
+ # TimedeltaArray, PeriodArray, IntervalArray]")
1190
+ return lib.maybe_convert_objects( # type: ignore[return-value]
1191
+ value,
1192
+ # Here we do not convert numeric dtypes, as if we wanted that,
1193
+ # numpy would have done it for us.
1194
+ convert_numeric=False,
1195
+ convert_non_numeric=True,
1196
+ dtype_if_all_nat=np.dtype("M8[ns]"),
1197
+ )
1198
+
1199
+
1200
+ def maybe_cast_to_datetime(
1201
+ value: np.ndarray | list, dtype: np.dtype
1202
+ ) -> ExtensionArray | np.ndarray:
1203
+ """
1204
+ try to cast the array/value to a datetimelike dtype, converting float
1205
+ nan to iNaT
1206
+
1207
+ Caller is responsible for handling ExtensionDtype cases and non dt64/td64
1208
+ cases.
1209
+ """
1210
+ from pandas.core.arrays.datetimes import DatetimeArray
1211
+ from pandas.core.arrays.timedeltas import TimedeltaArray
1212
+
1213
+ assert dtype.kind in "mM"
1214
+ if not is_list_like(value):
1215
+ raise TypeError("value must be listlike")
1216
+
1217
+ # TODO: _from_sequence would raise ValueError in cases where
1218
+ # _ensure_nanosecond_dtype raises TypeError
1219
+ _ensure_nanosecond_dtype(dtype)
1220
+
1221
+ if lib.is_np_dtype(dtype, "m"):
1222
+ res = TimedeltaArray._from_sequence(value, dtype=dtype)
1223
+ return res
1224
+ else:
1225
+ try:
1226
+ dta = DatetimeArray._from_sequence(value, dtype=dtype)
1227
+ except ValueError as err:
1228
+ # We can give a Series-specific exception message.
1229
+ if "cannot supply both a tz and a timezone-naive dtype" in str(err):
1230
+ raise ValueError(
1231
+ "Cannot convert timezone-aware data to "
1232
+ "timezone-naive dtype. Use "
1233
+ "pd.Series(values).dt.tz_localize(None) instead."
1234
+ ) from err
1235
+ raise
1236
+
1237
+ return dta
1238
+
1239
+
1240
+ def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None:
1241
+ """
1242
+ Convert dtypes with granularity less than nanosecond to nanosecond
1243
+
1244
+ >>> _ensure_nanosecond_dtype(np.dtype("M8[us]"))
1245
+
1246
+ >>> _ensure_nanosecond_dtype(np.dtype("M8[D]"))
1247
+ Traceback (most recent call last):
1248
+ ...
1249
+ TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'
1250
+
1251
+ >>> _ensure_nanosecond_dtype(np.dtype("m8[ps]"))
1252
+ Traceback (most recent call last):
1253
+ ...
1254
+ TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'
1255
+ """ # noqa: E501
1256
+ msg = (
1257
+ f"The '{dtype.name}' dtype has no unit. "
1258
+ f"Please pass in '{dtype.name}[ns]' instead."
1259
+ )
1260
+
1261
+ # unpack e.g. SparseDtype
1262
+ dtype = getattr(dtype, "subtype", dtype)
1263
+
1264
+ if not isinstance(dtype, np.dtype):
1265
+ # i.e. datetime64tz
1266
+ pass
1267
+
1268
+ elif dtype.kind in "mM":
1269
+ if not is_supported_dtype(dtype):
1270
+ # pre-2.0 we would silently swap in nanos for lower-resolutions,
1271
+ # raise for above-nano resolutions
1272
+ if dtype.name in ["datetime64", "timedelta64"]:
1273
+ raise ValueError(msg)
1274
+ # TODO: ValueError or TypeError? existing test
1275
+ # test_constructor_generic_timestamp_bad_frequency expects TypeError
1276
+ raise TypeError(
1277
+ f"dtype={dtype} is not supported. Supported resolutions are 's', "
1278
+ "'ms', 'us', and 'ns'"
1279
+ )
1280
+
1281
+
1282
+ # TODO: other value-dependent functions to standardize here include
1283
+ # Index._find_common_type_compat
1284
+ def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj:
1285
+ """
1286
+ Find the type/dtype for the result of an operation between objects.
1287
+
1288
+ This is similar to find_common_type, but looks at the right object instead
1289
+ of just its dtype. This can be useful in particular when the right
1290
+ object does not have a `dtype`.
1291
+
1292
+ Parameters
1293
+ ----------
1294
+ left_dtype : np.dtype or ExtensionDtype
1295
+ right : Any
1296
+
1297
+ Returns
1298
+ -------
1299
+ np.dtype or ExtensionDtype
1300
+
1301
+ See also
1302
+ --------
1303
+ find_common_type
1304
+ numpy.result_type
1305
+ """
1306
+ new_dtype: DtypeObj
1307
+
1308
+ if (
1309
+ isinstance(left_dtype, np.dtype)
1310
+ and left_dtype.kind in "iuc"
1311
+ and (lib.is_integer(right) or lib.is_float(right))
1312
+ ):
1313
+ # e.g. with int8 dtype and right=512, we want to end up with
1314
+ # np.int16, whereas infer_dtype_from(512) gives np.int64,
1315
+ # which will make us upcast too far.
1316
+ if lib.is_float(right) and right.is_integer() and left_dtype.kind != "f":
1317
+ right = int(right)
1318
+ # After NEP 50, numpy won't inspect Python scalars
1319
+ # TODO: do we need to recreate numpy's inspection logic for floats too
1320
+ # (this breaks some tests)
1321
+ if isinstance(right, int) and not isinstance(right, np.integer):
1322
+ # This gives an unsigned type by default
1323
+ # (if our number is positive)
1324
+
1325
+ # If our left dtype is signed, we might not want this since
1326
+ # this might give us 1 dtype too big
1327
+ # We should check if the corresponding int dtype (e.g. int64 for uint64)
1328
+ # can hold the number
1329
+ right_dtype = np.min_scalar_type(right)
1330
+ if right == 0:
1331
+ # Special case 0
1332
+ right = left_dtype
1333
+ elif (
1334
+ not np.issubdtype(left_dtype, np.unsignedinteger)
1335
+ and 0 < right <= np.iinfo(right_dtype).max
1336
+ ):
1337
+ # If left dtype isn't unsigned, check if it fits in the signed dtype
1338
+ right = np.dtype(f"i{right_dtype.itemsize}")
1339
+ else:
1340
+ right = right_dtype
1341
+
1342
+ new_dtype = np.result_type(left_dtype, right)
1343
+
1344
+ elif is_valid_na_for_dtype(right, left_dtype):
1345
+ # e.g. IntervalDtype[int] and None/np.nan
1346
+ new_dtype = ensure_dtype_can_hold_na(left_dtype)
1347
+
1348
+ else:
1349
+ dtype, _ = infer_dtype_from(right)
1350
+ new_dtype = find_common_type([left_dtype, dtype])
1351
+
1352
+ return new_dtype
1353
+
1354
+
1355
+ def common_dtype_categorical_compat(
1356
+ objs: Sequence[Index | ArrayLike], dtype: DtypeObj
1357
+ ) -> DtypeObj:
1358
+ """
1359
+ Update the result of find_common_type to account for NAs in a Categorical.
1360
+
1361
+ Parameters
1362
+ ----------
1363
+ objs : list[np.ndarray | ExtensionArray | Index]
1364
+ dtype : np.dtype or ExtensionDtype
1365
+
1366
+ Returns
1367
+ -------
1368
+ np.dtype or ExtensionDtype
1369
+ """
1370
+ # GH#38240
1371
+
1372
+ # TODO: more generally, could do `not can_hold_na(dtype)`
1373
+ if lib.is_np_dtype(dtype, "iu"):
1374
+ for obj in objs:
1375
+ # We don't want to accientally allow e.g. "categorical" str here
1376
+ obj_dtype = getattr(obj, "dtype", None)
1377
+ if isinstance(obj_dtype, CategoricalDtype):
1378
+ if isinstance(obj, ABCIndex):
1379
+ # This check may already be cached
1380
+ hasnas = obj.hasnans
1381
+ else:
1382
+ # Categorical
1383
+ hasnas = cast("Categorical", obj)._hasna
1384
+
1385
+ if hasnas:
1386
+ # see test_union_int_categorical_with_nan
1387
+ dtype = np.dtype(np.float64)
1388
+ break
1389
+ return dtype
1390
+
1391
+
1392
+ def np_find_common_type(*dtypes: np.dtype) -> np.dtype:
1393
+ """
1394
+ np.find_common_type implementation pre-1.25 deprecation using np.result_type
1395
+ https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065
1396
+
1397
+ Parameters
1398
+ ----------
1399
+ dtypes : np.dtypes
1400
+
1401
+ Returns
1402
+ -------
1403
+ np.dtype
1404
+ """
1405
+ try:
1406
+ common_dtype = np.result_type(*dtypes)
1407
+ if common_dtype.kind in "mMSU":
1408
+ # NumPy promotion currently (1.25) misbehaves for for times and strings,
1409
+ # so fall back to object (find_common_dtype did unless there
1410
+ # was only one dtype)
1411
+ common_dtype = np.dtype("O")
1412
+
1413
+ except TypeError:
1414
+ common_dtype = np.dtype("O")
1415
+ return common_dtype
1416
+
1417
+
1418
+ @overload
1419
+ def find_common_type(types: list[np.dtype]) -> np.dtype:
1420
+ ...
1421
+
1422
+
1423
+ @overload
1424
+ def find_common_type(types: list[ExtensionDtype]) -> DtypeObj:
1425
+ ...
1426
+
1427
+
1428
+ @overload
1429
+ def find_common_type(types: list[DtypeObj]) -> DtypeObj:
1430
+ ...
1431
+
1432
+
1433
+ def find_common_type(types):
1434
+ """
1435
+ Find a common data type among the given dtypes.
1436
+
1437
+ Parameters
1438
+ ----------
1439
+ types : list of dtypes
1440
+
1441
+ Returns
1442
+ -------
1443
+ pandas extension or numpy dtype
1444
+
1445
+ See Also
1446
+ --------
1447
+ numpy.find_common_type
1448
+
1449
+ """
1450
+ if not types:
1451
+ raise ValueError("no types given")
1452
+
1453
+ first = types[0]
1454
+
1455
+ # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
1456
+ # => object
1457
+ if lib.dtypes_all_equal(list(types)):
1458
+ return first
1459
+
1460
+ # get unique types (dict.fromkeys is used as order-preserving set())
1461
+ types = list(dict.fromkeys(types).keys())
1462
+
1463
+ if any(isinstance(t, ExtensionDtype) for t in types):
1464
+ for t in types:
1465
+ if isinstance(t, ExtensionDtype):
1466
+ res = t._get_common_dtype(types)
1467
+ if res is not None:
1468
+ return res
1469
+ return np.dtype("object")
1470
+
1471
+ # take lowest unit
1472
+ if all(lib.is_np_dtype(t, "M") for t in types):
1473
+ return np.dtype(max(types))
1474
+ if all(lib.is_np_dtype(t, "m") for t in types):
1475
+ return np.dtype(max(types))
1476
+
1477
+ # don't mix bool / int or float or complex
1478
+ # this is different from numpy, which casts bool with float/int as int
1479
+ has_bools = any(t.kind == "b" for t in types)
1480
+ if has_bools:
1481
+ for t in types:
1482
+ if t.kind in "iufc":
1483
+ return np.dtype("object")
1484
+
1485
+ return np_find_common_type(*types)
1486
+
1487
+
1488
+ def construct_2d_arraylike_from_scalar(
1489
+ value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool
1490
+ ) -> np.ndarray:
1491
+ shape = (length, width)
1492
+
1493
+ if dtype.kind in "mM":
1494
+ value = _maybe_box_and_unbox_datetimelike(value, dtype)
1495
+ elif dtype == _dtype_obj:
1496
+ if isinstance(value, (np.timedelta64, np.datetime64)):
1497
+ # calling np.array below would cast to pytimedelta/pydatetime
1498
+ out = np.empty(shape, dtype=object)
1499
+ out.fill(value)
1500
+ return out
1501
+
1502
+ # Attempt to coerce to a numpy array
1503
+ try:
1504
+ if not copy:
1505
+ arr = np.asarray(value, dtype=dtype)
1506
+ else:
1507
+ arr = np.array(value, dtype=dtype, copy=copy)
1508
+ except (ValueError, TypeError) as err:
1509
+ raise TypeError(
1510
+ f"DataFrame constructor called with incompatible data and dtype: {err}"
1511
+ ) from err
1512
+
1513
+ if arr.ndim != 0:
1514
+ raise ValueError("DataFrame constructor not properly called!")
1515
+
1516
+ return np.full(shape, arr)
1517
+
1518
+
1519
+ def construct_1d_arraylike_from_scalar(
1520
+ value: Scalar, length: int, dtype: DtypeObj | None
1521
+ ) -> ArrayLike:
1522
+ """
1523
+ create a np.ndarray / pandas type of specified shape and dtype
1524
+ filled with values
1525
+
1526
+ Parameters
1527
+ ----------
1528
+ value : scalar value
1529
+ length : int
1530
+ dtype : pandas_dtype or np.dtype
1531
+
1532
+ Returns
1533
+ -------
1534
+ np.ndarray / pandas type of length, filled with value
1535
+
1536
+ """
1537
+
1538
+ if dtype is None:
1539
+ try:
1540
+ dtype, value = infer_dtype_from_scalar(value)
1541
+ except OutOfBoundsDatetime:
1542
+ dtype = _dtype_obj
1543
+
1544
+ if isinstance(dtype, ExtensionDtype):
1545
+ cls = dtype.construct_array_type()
1546
+ seq = [] if length == 0 else [value]
1547
+ subarr = cls._from_sequence(seq, dtype=dtype).repeat(length)
1548
+
1549
+ else:
1550
+ if length and dtype.kind in "iu" and isna(value):
1551
+ # coerce if we have nan for an integer dtype
1552
+ dtype = np.dtype("float64")
1553
+ elif lib.is_np_dtype(dtype, "US"):
1554
+ # we need to coerce to object dtype to avoid
1555
+ # to allow numpy to take our string as a scalar value
1556
+ dtype = np.dtype("object")
1557
+ if not isna(value):
1558
+ value = ensure_str(value)
1559
+ elif dtype.kind in "mM":
1560
+ value = _maybe_box_and_unbox_datetimelike(value, dtype)
1561
+
1562
+ subarr = np.empty(length, dtype=dtype)
1563
+ if length:
1564
+ # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes
1565
+ subarr.fill(value)
1566
+
1567
+ return subarr
1568
+
1569
+
1570
+ def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj):
1571
+ # Caller is responsible for checking dtype.kind in "mM"
1572
+
1573
+ if isinstance(value, dt.datetime):
1574
+ # we dont want to box dt64, in particular datetime64("NaT")
1575
+ value = maybe_box_datetimelike(value, dtype)
1576
+
1577
+ return _maybe_unbox_datetimelike(value, dtype)
1578
+
1579
+
1580
+ def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray:
1581
+ """
1582
+ Transform any list-like object in a 1-dimensional numpy array of object
1583
+ dtype.
1584
+
1585
+ Parameters
1586
+ ----------
1587
+ values : any iterable which has a len()
1588
+
1589
+ Raises
1590
+ ------
1591
+ TypeError
1592
+ * If `values` does not have a len()
1593
+
1594
+ Returns
1595
+ -------
1596
+ 1-dimensional numpy array of dtype object
1597
+ """
1598
+ # numpy will try to interpret nested lists as further dimensions, hence
1599
+ # making a 1D array that contains list-likes is a bit tricky:
1600
+ result = np.empty(len(values), dtype="object")
1601
+ result[:] = values
1602
+ return result
1603
+
1604
+
1605
+ def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray:
1606
+ """
1607
+ Takes any dtype and returns the casted version, raising for when data is
1608
+ incompatible with integer/unsigned integer dtypes.
1609
+
1610
+ Parameters
1611
+ ----------
1612
+ arr : np.ndarray or list
1613
+ The array to cast.
1614
+ dtype : np.dtype
1615
+ The integer dtype to cast the array to.
1616
+
1617
+ Returns
1618
+ -------
1619
+ ndarray
1620
+ Array of integer or unsigned integer dtype.
1621
+
1622
+ Raises
1623
+ ------
1624
+ OverflowError : the dtype is incompatible with the data
1625
+ ValueError : loss of precision has occurred during casting
1626
+
1627
+ Examples
1628
+ --------
1629
+ If you try to coerce negative values to unsigned integers, it raises:
1630
+
1631
+ >>> pd.Series([-1], dtype="uint64")
1632
+ Traceback (most recent call last):
1633
+ ...
1634
+ OverflowError: Trying to coerce negative values to unsigned integers
1635
+
1636
+ Also, if you try to coerce float values to integers, it raises:
1637
+
1638
+ >>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64"))
1639
+ Traceback (most recent call last):
1640
+ ...
1641
+ ValueError: Trying to coerce float values to integers
1642
+ """
1643
+ assert dtype.kind in "iu"
1644
+
1645
+ try:
1646
+ if not isinstance(arr, np.ndarray):
1647
+ with warnings.catch_warnings():
1648
+ # We already disallow dtype=uint w/ negative numbers
1649
+ # (test_constructor_coercion_signed_to_unsigned) so safe to ignore.
1650
+ if not np_version_gt2:
1651
+ warnings.filterwarnings(
1652
+ "ignore",
1653
+ "NumPy will stop allowing conversion of "
1654
+ "out-of-bound Python int",
1655
+ DeprecationWarning,
1656
+ )
1657
+ casted = np.asarray(arr, dtype=dtype)
1658
+ else:
1659
+ with warnings.catch_warnings():
1660
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
1661
+ casted = arr.astype(dtype, copy=False)
1662
+ except OverflowError as err:
1663
+ raise OverflowError(
1664
+ "The elements provided in the data cannot all be "
1665
+ f"casted to the dtype {dtype}"
1666
+ ) from err
1667
+
1668
+ if isinstance(arr, np.ndarray) and arr.dtype == dtype:
1669
+ # avoid expensive array_equal check
1670
+ return casted
1671
+
1672
+ with warnings.catch_warnings():
1673
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
1674
+ warnings.filterwarnings(
1675
+ "ignore", "elementwise comparison failed", FutureWarning
1676
+ )
1677
+ if np.array_equal(arr, casted):
1678
+ return casted
1679
+
1680
+ # We do this casting to allow for proper
1681
+ # data and dtype checking.
1682
+ #
1683
+ # We didn't do this earlier because NumPy
1684
+ # doesn't handle `uint64` correctly.
1685
+ arr = np.asarray(arr)
1686
+
1687
+ if np.issubdtype(arr.dtype, str):
1688
+ # TODO(numpy-2.0 min): This case will raise an OverflowError above
1689
+ if (casted.astype(str) == arr).all():
1690
+ return casted
1691
+ raise ValueError(f"string values cannot be losslessly cast to {dtype}")
1692
+
1693
+ if dtype.kind == "u" and (arr < 0).any():
1694
+ # TODO: can this be hit anymore after numpy 2.0?
1695
+ raise OverflowError("Trying to coerce negative values to unsigned integers")
1696
+
1697
+ if arr.dtype.kind == "f":
1698
+ if not np.isfinite(arr).all():
1699
+ raise IntCastingNaNError(
1700
+ "Cannot convert non-finite values (NA or inf) to integer"
1701
+ )
1702
+ raise ValueError("Trying to coerce float values to integers")
1703
+ if arr.dtype == object:
1704
+ raise ValueError("Trying to coerce float values to integers")
1705
+
1706
+ if casted.dtype < arr.dtype:
1707
+ # TODO: Can this path be hit anymore with numpy > 2
1708
+ # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows
1709
+ raise ValueError(
1710
+ f"Values are too large to be losslessly converted to {dtype}. "
1711
+ f"To cast anyway, use pd.Series(values).astype({dtype})"
1712
+ )
1713
+
1714
+ if arr.dtype.kind in "mM":
1715
+ # test_constructor_maskedarray_nonfloat
1716
+ raise TypeError(
1717
+ f"Constructing a Series or DataFrame from {arr.dtype} values and "
1718
+ f"dtype={dtype} is not supported. Use values.view({dtype}) instead."
1719
+ )
1720
+
1721
+ # No known cases that get here, but raising explicitly to cover our bases.
1722
+ raise ValueError(f"values cannot be losslessly cast to {dtype}")
1723
+
1724
+
1725
+ def can_hold_element(arr: ArrayLike, element: Any) -> bool:
1726
+ """
1727
+ Can we do an inplace setitem with this element in an array with this dtype?
1728
+
1729
+ Parameters
1730
+ ----------
1731
+ arr : np.ndarray or ExtensionArray
1732
+ element : Any
1733
+
1734
+ Returns
1735
+ -------
1736
+ bool
1737
+ """
1738
+ dtype = arr.dtype
1739
+ if not isinstance(dtype, np.dtype) or dtype.kind in "mM":
1740
+ if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)):
1741
+ # np.dtype here catches datetime64ns and timedelta64ns; we assume
1742
+ # in this case that we have DatetimeArray/TimedeltaArray
1743
+ arr = cast(
1744
+ "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr
1745
+ )
1746
+ try:
1747
+ arr._validate_setitem_value(element)
1748
+ return True
1749
+ except (ValueError, TypeError):
1750
+ return False
1751
+
1752
+ # This is technically incorrect, but maintains the behavior of
1753
+ # ExtensionBlock._can_hold_element
1754
+ return True
1755
+
1756
+ try:
1757
+ np_can_hold_element(dtype, element)
1758
+ return True
1759
+ except (TypeError, LossySetitemError):
1760
+ return False
1761
+
1762
+
1763
+ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any:
1764
+ """
1765
+ Raise if we cannot losslessly set this element into an ndarray with this dtype.
1766
+
1767
+ Specifically about places where we disagree with numpy. i.e. there are
1768
+ cases where numpy will raise in doing the setitem that we do not check
1769
+ for here, e.g. setting str "X" into a numeric ndarray.
1770
+
1771
+ Returns
1772
+ -------
1773
+ Any
1774
+ The element, potentially cast to the dtype.
1775
+
1776
+ Raises
1777
+ ------
1778
+ ValueError : If we cannot losslessly store this element with this dtype.
1779
+ """
1780
+ if dtype == _dtype_obj:
1781
+ return element
1782
+
1783
+ tipo = _maybe_infer_dtype_type(element)
1784
+
1785
+ if dtype.kind in "iu":
1786
+ if isinstance(element, range):
1787
+ if _dtype_can_hold_range(element, dtype):
1788
+ return element
1789
+ raise LossySetitemError
1790
+
1791
+ if is_integer(element) or (is_float(element) and element.is_integer()):
1792
+ # e.g. test_setitem_series_int8 if we have a python int 1
1793
+ # tipo may be np.int32, despite the fact that it will fit
1794
+ # in smaller int dtypes.
1795
+ info = np.iinfo(dtype)
1796
+ if info.min <= element <= info.max:
1797
+ return dtype.type(element)
1798
+ raise LossySetitemError
1799
+
1800
+ if tipo is not None:
1801
+ if tipo.kind not in "iu":
1802
+ if isinstance(element, np.ndarray) and element.dtype.kind == "f":
1803
+ # If all can be losslessly cast to integers, then we can hold them
1804
+ with np.errstate(invalid="ignore"):
1805
+ # We check afterwards if cast was losslessly, so no need to show
1806
+ # the warning
1807
+ casted = element.astype(dtype)
1808
+ comp = casted == element
1809
+ if comp.all():
1810
+ # Return the casted values bc they can be passed to
1811
+ # np.putmask, whereas the raw values cannot.
1812
+ # see TestSetitemFloatNDarrayIntoIntegerSeries
1813
+ return casted
1814
+ raise LossySetitemError
1815
+
1816
+ elif isinstance(element, ABCExtensionArray) and isinstance(
1817
+ element.dtype, CategoricalDtype
1818
+ ):
1819
+ # GH#52927 setting Categorical value into non-EA frame
1820
+ # TODO: general-case for EAs?
1821
+ try:
1822
+ casted = element.astype(dtype)
1823
+ except (ValueError, TypeError):
1824
+ raise LossySetitemError
1825
+ # Check for cases of either
1826
+ # a) lossy overflow/rounding or
1827
+ # b) semantic changes like dt64->int64
1828
+ comp = casted == element
1829
+ if not comp.all():
1830
+ raise LossySetitemError
1831
+ return casted
1832
+
1833
+ # Anything other than integer we cannot hold
1834
+ raise LossySetitemError
1835
+ if (
1836
+ dtype.kind == "u"
1837
+ and isinstance(element, np.ndarray)
1838
+ and element.dtype.kind == "i"
1839
+ ):
1840
+ # see test_where_uint64
1841
+ casted = element.astype(dtype)
1842
+ if (casted == element).all():
1843
+ # TODO: faster to check (element >=0).all()? potential
1844
+ # itemsize issues there?
1845
+ return casted
1846
+ raise LossySetitemError
1847
+ if dtype.itemsize < tipo.itemsize:
1848
+ raise LossySetitemError
1849
+ if not isinstance(tipo, np.dtype):
1850
+ # i.e. nullable IntegerDtype; we can put this into an ndarray
1851
+ # losslessly iff it has no NAs
1852
+ arr = element._values if isinstance(element, ABCSeries) else element
1853
+ if arr._hasna:
1854
+ raise LossySetitemError
1855
+ return element
1856
+
1857
+ return element
1858
+
1859
+ raise LossySetitemError
1860
+
1861
+ if dtype.kind == "f":
1862
+ if lib.is_integer(element) or lib.is_float(element):
1863
+ casted = dtype.type(element)
1864
+ if np.isnan(casted) or casted == element:
1865
+ return casted
1866
+ # otherwise e.g. overflow see TestCoercionFloat32
1867
+ raise LossySetitemError
1868
+
1869
+ if tipo is not None:
1870
+ # TODO: itemsize check?
1871
+ if tipo.kind not in "iuf":
1872
+ # Anything other than float/integer we cannot hold
1873
+ raise LossySetitemError
1874
+ if not isinstance(tipo, np.dtype):
1875
+ # i.e. nullable IntegerDtype or FloatingDtype;
1876
+ # we can put this into an ndarray losslessly iff it has no NAs
1877
+ if element._hasna:
1878
+ raise LossySetitemError
1879
+ return element
1880
+ elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind:
1881
+ if isinstance(element, np.ndarray):
1882
+ # e.g. TestDataFrameIndexingWhere::test_where_alignment
1883
+ casted = element.astype(dtype)
1884
+ if np.array_equal(casted, element, equal_nan=True):
1885
+ return casted
1886
+ raise LossySetitemError
1887
+
1888
+ return element
1889
+
1890
+ raise LossySetitemError
1891
+
1892
+ if dtype.kind == "c":
1893
+ if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element):
1894
+ if np.isnan(element):
1895
+ # see test_where_complex GH#6345
1896
+ return dtype.type(element)
1897
+
1898
+ with warnings.catch_warnings():
1899
+ warnings.filterwarnings("ignore")
1900
+ casted = dtype.type(element)
1901
+ if casted == element:
1902
+ return casted
1903
+ # otherwise e.g. overflow see test_32878_complex_itemsize
1904
+ raise LossySetitemError
1905
+
1906
+ if tipo is not None:
1907
+ if tipo.kind in "iufc":
1908
+ return element
1909
+ raise LossySetitemError
1910
+ raise LossySetitemError
1911
+
1912
+ if dtype.kind == "b":
1913
+ if tipo is not None:
1914
+ if tipo.kind == "b":
1915
+ if not isinstance(tipo, np.dtype):
1916
+ # i.e. we have a BooleanArray
1917
+ if element._hasna:
1918
+ # i.e. there are pd.NA elements
1919
+ raise LossySetitemError
1920
+ return element
1921
+ raise LossySetitemError
1922
+ if lib.is_bool(element):
1923
+ return element
1924
+ raise LossySetitemError
1925
+
1926
+ if dtype.kind == "S":
1927
+ # TODO: test tests.frame.methods.test_replace tests get here,
1928
+ # need more targeted tests. xref phofl has a PR about this
1929
+ if tipo is not None:
1930
+ if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize:
1931
+ return element
1932
+ raise LossySetitemError
1933
+ if isinstance(element, bytes) and len(element) <= dtype.itemsize:
1934
+ return element
1935
+ raise LossySetitemError
1936
+
1937
+ if dtype.kind == "V":
1938
+ # i.e. np.void, which cannot hold _anything_
1939
+ raise LossySetitemError
1940
+
1941
+ raise NotImplementedError(dtype)
1942
+
1943
+
1944
+ def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool:
1945
+ """
1946
+ _maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints),
1947
+ but in many cases a range can be held by a smaller integer dtype.
1948
+ Check if this is one of those cases.
1949
+ """
1950
+ if not len(rng):
1951
+ return True
1952
+ return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype)
1953
+
1954
+
1955
+ def np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool:
1956
+ """
1957
+ np.can_cast pandas-equivalent for pre 2-0 behavior that allowed scalar
1958
+ inference
1959
+
1960
+ Parameters
1961
+ ----------
1962
+ element : Scalar
1963
+ dtype : np.dtype
1964
+
1965
+ Returns
1966
+ -------
1967
+ bool
1968
+ """
1969
+ try:
1970
+ np_can_hold_element(dtype, element)
1971
+ return True
1972
+ except (LossySetitemError, NotImplementedError):
1973
+ return False
videollama2/lib/python3.10/site-packages/pandas/core/dtypes/missing.py ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ missing types & inference
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from decimal import Decimal
7
+ from functools import partial
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ overload,
11
+ )
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from pandas._config import get_option
17
+
18
+ from pandas._libs import lib
19
+ import pandas._libs.missing as libmissing
20
+ from pandas._libs.tslibs import (
21
+ NaT,
22
+ iNaT,
23
+ )
24
+
25
+ from pandas.core.dtypes.common import (
26
+ DT64NS_DTYPE,
27
+ TD64NS_DTYPE,
28
+ ensure_object,
29
+ is_scalar,
30
+ is_string_or_object_np_dtype,
31
+ )
32
+ from pandas.core.dtypes.dtypes import (
33
+ CategoricalDtype,
34
+ DatetimeTZDtype,
35
+ ExtensionDtype,
36
+ IntervalDtype,
37
+ PeriodDtype,
38
+ )
39
+ from pandas.core.dtypes.generic import (
40
+ ABCDataFrame,
41
+ ABCExtensionArray,
42
+ ABCIndex,
43
+ ABCMultiIndex,
44
+ ABCSeries,
45
+ )
46
+ from pandas.core.dtypes.inference import is_list_like
47
+
48
+ if TYPE_CHECKING:
49
+ from re import Pattern
50
+
51
+ from pandas._typing import (
52
+ ArrayLike,
53
+ DtypeObj,
54
+ NDFrame,
55
+ NDFrameT,
56
+ Scalar,
57
+ npt,
58
+ )
59
+
60
+ from pandas import Series
61
+ from pandas.core.indexes.base import Index
62
+
63
+
64
+ isposinf_scalar = libmissing.isposinf_scalar
65
+ isneginf_scalar = libmissing.isneginf_scalar
66
+
67
+ nan_checker = np.isnan
68
+ INF_AS_NA = False
69
+ _dtype_object = np.dtype("object")
70
+ _dtype_str = np.dtype(str)
71
+
72
+
73
+ @overload
74
+ def isna(obj: Scalar | Pattern) -> bool:
75
+ ...
76
+
77
+
78
+ @overload
79
+ def isna(
80
+ obj: ArrayLike | Index | list,
81
+ ) -> npt.NDArray[np.bool_]:
82
+ ...
83
+
84
+
85
+ @overload
86
+ def isna(obj: NDFrameT) -> NDFrameT:
87
+ ...
88
+
89
+
90
+ # handle unions
91
+ @overload
92
+ def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
93
+ ...
94
+
95
+
96
+ @overload
97
+ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
98
+ ...
99
+
100
+
101
+ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
102
+ """
103
+ Detect missing values for an array-like object.
104
+
105
+ This function takes a scalar or array-like object and indicates
106
+ whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
107
+ in object arrays, ``NaT`` in datetimelike).
108
+
109
+ Parameters
110
+ ----------
111
+ obj : scalar or array-like
112
+ Object to check for null or missing values.
113
+
114
+ Returns
115
+ -------
116
+ bool or array-like of bool
117
+ For scalar input, returns a scalar boolean.
118
+ For array input, returns an array of boolean indicating whether each
119
+ corresponding element is missing.
120
+
121
+ See Also
122
+ --------
123
+ notna : Boolean inverse of pandas.isna.
124
+ Series.isna : Detect missing values in a Series.
125
+ DataFrame.isna : Detect missing values in a DataFrame.
126
+ Index.isna : Detect missing values in an Index.
127
+
128
+ Examples
129
+ --------
130
+ Scalar arguments (including strings) result in a scalar boolean.
131
+
132
+ >>> pd.isna('dog')
133
+ False
134
+
135
+ >>> pd.isna(pd.NA)
136
+ True
137
+
138
+ >>> pd.isna(np.nan)
139
+ True
140
+
141
+ ndarrays result in an ndarray of booleans.
142
+
143
+ >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
144
+ >>> array
145
+ array([[ 1., nan, 3.],
146
+ [ 4., 5., nan]])
147
+ >>> pd.isna(array)
148
+ array([[False, True, False],
149
+ [False, False, True]])
150
+
151
+ For indexes, an ndarray of booleans is returned.
152
+
153
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
154
+ ... "2017-07-08"])
155
+ >>> index
156
+ DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
157
+ dtype='datetime64[ns]', freq=None)
158
+ >>> pd.isna(index)
159
+ array([False, False, True, False])
160
+
161
+ For Series and DataFrame, the same type is returned, containing booleans.
162
+
163
+ >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
164
+ >>> df
165
+ 0 1 2
166
+ 0 ant bee cat
167
+ 1 dog None fly
168
+ >>> pd.isna(df)
169
+ 0 1 2
170
+ 0 False False False
171
+ 1 False True False
172
+
173
+ >>> pd.isna(df[1])
174
+ 0 False
175
+ 1 True
176
+ Name: 1, dtype: bool
177
+ """
178
+ return _isna(obj)
179
+
180
+
181
+ isnull = isna
182
+
183
+
184
+ def _isna(obj, inf_as_na: bool = False):
185
+ """
186
+ Detect missing values, treating None, NaN or NA as null. Infinite
187
+ values will also be treated as null if inf_as_na is True.
188
+
189
+ Parameters
190
+ ----------
191
+ obj: ndarray or object value
192
+ Input array or scalar value.
193
+ inf_as_na: bool
194
+ Whether to treat infinity as null.
195
+
196
+ Returns
197
+ -------
198
+ boolean ndarray or boolean
199
+ """
200
+ if is_scalar(obj):
201
+ return libmissing.checknull(obj, inf_as_na=inf_as_na)
202
+ elif isinstance(obj, ABCMultiIndex):
203
+ raise NotImplementedError("isna is not defined for MultiIndex")
204
+ elif isinstance(obj, type):
205
+ return False
206
+ elif isinstance(obj, (np.ndarray, ABCExtensionArray)):
207
+ return _isna_array(obj, inf_as_na=inf_as_na)
208
+ elif isinstance(obj, ABCIndex):
209
+ # Try to use cached isna, which also short-circuits for integer dtypes
210
+ # and avoids materializing RangeIndex._values
211
+ if not obj._can_hold_na:
212
+ return obj.isna()
213
+ return _isna_array(obj._values, inf_as_na=inf_as_na)
214
+
215
+ elif isinstance(obj, ABCSeries):
216
+ result = _isna_array(obj._values, inf_as_na=inf_as_na)
217
+ # box
218
+ result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
219
+ return result
220
+ elif isinstance(obj, ABCDataFrame):
221
+ return obj.isna()
222
+ elif isinstance(obj, list):
223
+ return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)
224
+ elif hasattr(obj, "__array__"):
225
+ return _isna_array(np.asarray(obj), inf_as_na=inf_as_na)
226
+ else:
227
+ return False
228
+
229
+
230
+ def _use_inf_as_na(key) -> None:
231
+ """
232
+ Option change callback for na/inf behaviour.
233
+
234
+ Choose which replacement for numpy.isnan / -numpy.isfinite is used.
235
+
236
+ Parameters
237
+ ----------
238
+ flag: bool
239
+ True means treat None, NaN, INF, -INF as null (old way),
240
+ False means None and NaN are null, but INF, -INF are not null
241
+ (new way).
242
+
243
+ Notes
244
+ -----
245
+ This approach to setting global module values is discussed and
246
+ approved here:
247
+
248
+ * https://stackoverflow.com/questions/4859217/
249
+ programmatically-creating-variables-in-python/4859312#4859312
250
+ """
251
+ inf_as_na = get_option(key)
252
+ globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)
253
+ if inf_as_na:
254
+ globals()["nan_checker"] = lambda x: ~np.isfinite(x)
255
+ globals()["INF_AS_NA"] = True
256
+ else:
257
+ globals()["nan_checker"] = np.isnan
258
+ globals()["INF_AS_NA"] = False
259
+
260
+
261
+ def _isna_array(values: ArrayLike, inf_as_na: bool = False):
262
+ """
263
+ Return an array indicating which values of the input array are NaN / NA.
264
+
265
+ Parameters
266
+ ----------
267
+ obj: ndarray or ExtensionArray
268
+ The input array whose elements are to be checked.
269
+ inf_as_na: bool
270
+ Whether or not to treat infinite values as NA.
271
+
272
+ Returns
273
+ -------
274
+ array-like
275
+ Array of boolean values denoting the NA status of each element.
276
+ """
277
+ dtype = values.dtype
278
+
279
+ if not isinstance(values, np.ndarray):
280
+ # i.e. ExtensionArray
281
+ if inf_as_na and isinstance(dtype, CategoricalDtype):
282
+ result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na)
283
+ else:
284
+ # error: Incompatible types in assignment (expression has type
285
+ # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has
286
+ # type "ndarray[Any, dtype[bool_]]")
287
+ result = values.isna() # type: ignore[assignment]
288
+ elif isinstance(values, np.rec.recarray):
289
+ # GH 48526
290
+ result = _isna_recarray_dtype(values, inf_as_na=inf_as_na)
291
+ elif is_string_or_object_np_dtype(values.dtype):
292
+ result = _isna_string_dtype(values, inf_as_na=inf_as_na)
293
+ elif dtype.kind in "mM":
294
+ # this is the NaT pattern
295
+ result = values.view("i8") == iNaT
296
+ else:
297
+ if inf_as_na:
298
+ result = ~np.isfinite(values)
299
+ else:
300
+ result = np.isnan(values)
301
+
302
+ return result
303
+
304
+
305
+ def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> npt.NDArray[np.bool_]:
306
+ # Working around NumPy ticket 1542
307
+ dtype = values.dtype
308
+
309
+ if dtype.kind in ("S", "U"):
310
+ result = np.zeros(values.shape, dtype=bool)
311
+ else:
312
+ if values.ndim in {1, 2}:
313
+ result = libmissing.isnaobj(values, inf_as_na=inf_as_na)
314
+ else:
315
+ # 0-D, reached via e.g. mask_missing
316
+ result = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na)
317
+ result = result.reshape(values.shape)
318
+
319
+ return result
320
+
321
+
322
+ def _has_record_inf_value(record_as_array: np.ndarray) -> np.bool_:
323
+ is_inf_in_record = np.zeros(len(record_as_array), dtype=bool)
324
+ for i, value in enumerate(record_as_array):
325
+ is_element_inf = False
326
+ try:
327
+ is_element_inf = np.isinf(value)
328
+ except TypeError:
329
+ is_element_inf = False
330
+ is_inf_in_record[i] = is_element_inf
331
+
332
+ return np.any(is_inf_in_record)
333
+
334
+
335
+ def _isna_recarray_dtype(
336
+ values: np.rec.recarray, inf_as_na: bool
337
+ ) -> npt.NDArray[np.bool_]:
338
+ result = np.zeros(values.shape, dtype=bool)
339
+ for i, record in enumerate(values):
340
+ record_as_array = np.array(record.tolist())
341
+ does_record_contain_nan = isna_all(record_as_array)
342
+ does_record_contain_inf = False
343
+ if inf_as_na:
344
+ does_record_contain_inf = bool(_has_record_inf_value(record_as_array))
345
+ result[i] = np.any(
346
+ np.logical_or(does_record_contain_nan, does_record_contain_inf)
347
+ )
348
+
349
+ return result
350
+
351
+
352
+ @overload
353
+ def notna(obj: Scalar) -> bool:
354
+ ...
355
+
356
+
357
+ @overload
358
+ def notna(
359
+ obj: ArrayLike | Index | list,
360
+ ) -> npt.NDArray[np.bool_]:
361
+ ...
362
+
363
+
364
+ @overload
365
+ def notna(obj: NDFrameT) -> NDFrameT:
366
+ ...
367
+
368
+
369
+ # handle unions
370
+ @overload
371
+ def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
372
+ ...
373
+
374
+
375
+ @overload
376
+ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
377
+ ...
378
+
379
+
380
+ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
381
+ """
382
+ Detect non-missing values for an array-like object.
383
+
384
+ This function takes a scalar or array-like object and indicates
385
+ whether values are valid (not missing, which is ``NaN`` in numeric
386
+ arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
387
+
388
+ Parameters
389
+ ----------
390
+ obj : array-like or object value
391
+ Object to check for *not* null or *non*-missing values.
392
+
393
+ Returns
394
+ -------
395
+ bool or array-like of bool
396
+ For scalar input, returns a scalar boolean.
397
+ For array input, returns an array of boolean indicating whether each
398
+ corresponding element is valid.
399
+
400
+ See Also
401
+ --------
402
+ isna : Boolean inverse of pandas.notna.
403
+ Series.notna : Detect valid values in a Series.
404
+ DataFrame.notna : Detect valid values in a DataFrame.
405
+ Index.notna : Detect valid values in an Index.
406
+
407
+ Examples
408
+ --------
409
+ Scalar arguments (including strings) result in a scalar boolean.
410
+
411
+ >>> pd.notna('dog')
412
+ True
413
+
414
+ >>> pd.notna(pd.NA)
415
+ False
416
+
417
+ >>> pd.notna(np.nan)
418
+ False
419
+
420
+ ndarrays result in an ndarray of booleans.
421
+
422
+ >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
423
+ >>> array
424
+ array([[ 1., nan, 3.],
425
+ [ 4., 5., nan]])
426
+ >>> pd.notna(array)
427
+ array([[ True, False, True],
428
+ [ True, True, False]])
429
+
430
+ For indexes, an ndarray of booleans is returned.
431
+
432
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
433
+ ... "2017-07-08"])
434
+ >>> index
435
+ DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
436
+ dtype='datetime64[ns]', freq=None)
437
+ >>> pd.notna(index)
438
+ array([ True, True, False, True])
439
+
440
+ For Series and DataFrame, the same type is returned, containing booleans.
441
+
442
+ >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
443
+ >>> df
444
+ 0 1 2
445
+ 0 ant bee cat
446
+ 1 dog None fly
447
+ >>> pd.notna(df)
448
+ 0 1 2
449
+ 0 True True True
450
+ 1 True False True
451
+
452
+ >>> pd.notna(df[1])
453
+ 0 True
454
+ 1 False
455
+ Name: 1, dtype: bool
456
+ """
457
+ res = isna(obj)
458
+ if isinstance(res, bool):
459
+ return not res
460
+ return ~res
461
+
462
+
463
+ notnull = notna
464
+
465
+
466
+ def array_equivalent(
467
+ left,
468
+ right,
469
+ strict_nan: bool = False,
470
+ dtype_equal: bool = False,
471
+ ) -> bool:
472
+ """
473
+ True if two arrays, left and right, have equal non-NaN elements, and NaNs
474
+ in corresponding locations. False otherwise. It is assumed that left and
475
+ right are NumPy arrays of the same dtype. The behavior of this function
476
+ (particularly with respect to NaNs) is not defined if the dtypes are
477
+ different.
478
+
479
+ Parameters
480
+ ----------
481
+ left, right : ndarrays
482
+ strict_nan : bool, default False
483
+ If True, consider NaN and None to be different.
484
+ dtype_equal : bool, default False
485
+ Whether `left` and `right` are known to have the same dtype
486
+ according to `is_dtype_equal`. Some methods like `BlockManager.equals`.
487
+ require that the dtypes match. Setting this to ``True`` can improve
488
+ performance, but will give different results for arrays that are
489
+ equal but different dtypes.
490
+
491
+ Returns
492
+ -------
493
+ b : bool
494
+ Returns True if the arrays are equivalent.
495
+
496
+ Examples
497
+ --------
498
+ >>> array_equivalent(
499
+ ... np.array([1, 2, np.nan]),
500
+ ... np.array([1, 2, np.nan]))
501
+ True
502
+ >>> array_equivalent(
503
+ ... np.array([1, np.nan, 2]),
504
+ ... np.array([1, 2, np.nan]))
505
+ False
506
+ """
507
+ left, right = np.asarray(left), np.asarray(right)
508
+
509
+ # shape compat
510
+ if left.shape != right.shape:
511
+ return False
512
+
513
+ if dtype_equal:
514
+ # fastpath when we require that the dtypes match (Block.equals)
515
+ if left.dtype.kind in "fc":
516
+ return _array_equivalent_float(left, right)
517
+ elif left.dtype.kind in "mM":
518
+ return _array_equivalent_datetimelike(left, right)
519
+ elif is_string_or_object_np_dtype(left.dtype):
520
+ # TODO: fastpath for pandas' StringDtype
521
+ return _array_equivalent_object(left, right, strict_nan)
522
+ else:
523
+ return np.array_equal(left, right)
524
+
525
+ # Slow path when we allow comparing different dtypes.
526
+ # Object arrays can contain None, NaN and NaT.
527
+ # string dtypes must be come to this path for NumPy 1.7.1 compat
528
+ if left.dtype.kind in "OSU" or right.dtype.kind in "OSU":
529
+ # Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]`
530
+ # or `in ("O", "S", "U")`
531
+ return _array_equivalent_object(left, right, strict_nan)
532
+
533
+ # NaNs can occur in float and complex arrays.
534
+ if left.dtype.kind in "fc":
535
+ if not (left.size and right.size):
536
+ return True
537
+ return ((left == right) | (isna(left) & isna(right))).all()
538
+
539
+ elif left.dtype.kind in "mM" or right.dtype.kind in "mM":
540
+ # datetime64, timedelta64, Period
541
+ if left.dtype != right.dtype:
542
+ return False
543
+
544
+ left = left.view("i8")
545
+ right = right.view("i8")
546
+
547
+ # if we have structured dtypes, compare first
548
+ if (
549
+ left.dtype.type is np.void or right.dtype.type is np.void
550
+ ) and left.dtype != right.dtype:
551
+ return False
552
+
553
+ return np.array_equal(left, right)
554
+
555
+
556
+ def _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool:
557
+ return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all())
558
+
559
+
560
+ def _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray):
561
+ return np.array_equal(left.view("i8"), right.view("i8"))
562
+
563
+
564
+ def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool):
565
+ left = ensure_object(left)
566
+ right = ensure_object(right)
567
+
568
+ mask: npt.NDArray[np.bool_] | None = None
569
+ if strict_nan:
570
+ mask = isna(left) & isna(right)
571
+ if not mask.any():
572
+ mask = None
573
+
574
+ try:
575
+ if mask is None:
576
+ return lib.array_equivalent_object(left, right)
577
+ if not lib.array_equivalent_object(left[~mask], right[~mask]):
578
+ return False
579
+ left_remaining = left[mask]
580
+ right_remaining = right[mask]
581
+ except ValueError:
582
+ # can raise a ValueError if left and right cannot be
583
+ # compared (e.g. nested arrays)
584
+ left_remaining = left
585
+ right_remaining = right
586
+
587
+ for left_value, right_value in zip(left_remaining, right_remaining):
588
+ if left_value is NaT and right_value is not NaT:
589
+ return False
590
+
591
+ elif left_value is libmissing.NA and right_value is not libmissing.NA:
592
+ return False
593
+
594
+ elif isinstance(left_value, float) and np.isnan(left_value):
595
+ if not isinstance(right_value, float) or not np.isnan(right_value):
596
+ return False
597
+ else:
598
+ with warnings.catch_warnings():
599
+ # suppress numpy's "elementwise comparison failed"
600
+ warnings.simplefilter("ignore", DeprecationWarning)
601
+ try:
602
+ if np.any(np.asarray(left_value != right_value)):
603
+ return False
604
+ except TypeError as err:
605
+ if "boolean value of NA is ambiguous" in str(err):
606
+ return False
607
+ raise
608
+ except ValueError:
609
+ # numpy can raise a ValueError if left and right cannot be
610
+ # compared (e.g. nested arrays)
611
+ return False
612
+ return True
613
+
614
+
615
+ def array_equals(left: ArrayLike, right: ArrayLike) -> bool:
616
+ """
617
+ ExtensionArray-compatible implementation of array_equivalent.
618
+ """
619
+ if left.dtype != right.dtype:
620
+ return False
621
+ elif isinstance(left, ABCExtensionArray):
622
+ return left.equals(right)
623
+ else:
624
+ return array_equivalent(left, right, dtype_equal=True)
625
+
626
+
627
+ def infer_fill_value(val):
628
+ """
629
+ infer the fill value for the nan/NaT from the provided
630
+ scalar/ndarray/list-like if we are a NaT, return the correct dtyped
631
+ element to provide proper block construction
632
+ """
633
+ if not is_list_like(val):
634
+ val = [val]
635
+ val = np.asarray(val)
636
+ if val.dtype.kind in "mM":
637
+ return np.array("NaT", dtype=val.dtype)
638
+ elif val.dtype == object:
639
+ dtype = lib.infer_dtype(ensure_object(val), skipna=False)
640
+ if dtype in ["datetime", "datetime64"]:
641
+ return np.array("NaT", dtype=DT64NS_DTYPE)
642
+ elif dtype in ["timedelta", "timedelta64"]:
643
+ return np.array("NaT", dtype=TD64NS_DTYPE)
644
+ return np.array(np.nan, dtype=object)
645
+ elif val.dtype.kind == "U":
646
+ return np.array(np.nan, dtype=val.dtype)
647
+ return np.nan
648
+
649
+
650
+ def construct_1d_array_from_inferred_fill_value(
651
+ value: object, length: int
652
+ ) -> ArrayLike:
653
+ # Find our empty_value dtype by constructing an array
654
+ # from our value and doing a .take on it
655
+ from pandas.core.algorithms import take_nd
656
+ from pandas.core.construction import sanitize_array
657
+ from pandas.core.indexes.base import Index
658
+
659
+ arr = sanitize_array(value, Index(range(1)), copy=False)
660
+ taker = -1 * np.ones(length, dtype=np.intp)
661
+ return take_nd(arr, taker)
662
+
663
+
664
+ def maybe_fill(arr: np.ndarray) -> np.ndarray:
665
+ """
666
+ Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.
667
+ """
668
+ if arr.dtype.kind not in "iub":
669
+ arr.fill(np.nan)
670
+ return arr
671
+
672
+
673
+ def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
674
+ """
675
+ Return a dtype compat na value
676
+
677
+ Parameters
678
+ ----------
679
+ dtype : string / dtype
680
+ compat : bool, default True
681
+
682
+ Returns
683
+ -------
684
+ np.dtype or a pandas dtype
685
+
686
+ Examples
687
+ --------
688
+ >>> na_value_for_dtype(np.dtype('int64'))
689
+ 0
690
+ >>> na_value_for_dtype(np.dtype('int64'), compat=False)
691
+ nan
692
+ >>> na_value_for_dtype(np.dtype('float64'))
693
+ nan
694
+ >>> na_value_for_dtype(np.dtype('bool'))
695
+ False
696
+ >>> na_value_for_dtype(np.dtype('datetime64[ns]'))
697
+ numpy.datetime64('NaT')
698
+ """
699
+
700
+ if isinstance(dtype, ExtensionDtype):
701
+ return dtype.na_value
702
+ elif dtype.kind in "mM":
703
+ unit = np.datetime_data(dtype)[0]
704
+ return dtype.type("NaT", unit)
705
+ elif dtype.kind == "f":
706
+ return np.nan
707
+ elif dtype.kind in "iu":
708
+ if compat:
709
+ return 0
710
+ return np.nan
711
+ elif dtype.kind == "b":
712
+ if compat:
713
+ return False
714
+ return np.nan
715
+ return np.nan
716
+
717
+
718
+ def remove_na_arraylike(arr: Series | Index | np.ndarray):
719
+ """
720
+ Return array-like containing only true/non-NaN values, possibly empty.
721
+ """
722
+ if isinstance(arr.dtype, ExtensionDtype):
723
+ return arr[notna(arr)]
724
+ else:
725
+ return arr[notna(np.asarray(arr))]
726
+
727
+
728
+ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:
729
+ """
730
+ isna check that excludes incompatible dtypes
731
+
732
+ Parameters
733
+ ----------
734
+ obj : object
735
+ dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
736
+
737
+ Returns
738
+ -------
739
+ bool
740
+ """
741
+ if not lib.is_scalar(obj) or not isna(obj):
742
+ return False
743
+ elif dtype.kind == "M":
744
+ if isinstance(dtype, np.dtype):
745
+ # i.e. not tzaware
746
+ return not isinstance(obj, (np.timedelta64, Decimal))
747
+ # we have to rule out tznaive dt64("NaT")
748
+ return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal))
749
+ elif dtype.kind == "m":
750
+ return not isinstance(obj, (np.datetime64, Decimal))
751
+ elif dtype.kind in "iufc":
752
+ # Numeric
753
+ return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64))
754
+ elif dtype.kind == "b":
755
+ # We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype)
756
+ return lib.is_float(obj) or obj is None or obj is libmissing.NA
757
+
758
+ elif dtype == _dtype_str:
759
+ # numpy string dtypes to avoid float np.nan
760
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float))
761
+
762
+ elif dtype == _dtype_object:
763
+ # This is needed for Categorical, but is kind of weird
764
+ return True
765
+
766
+ elif isinstance(dtype, PeriodDtype):
767
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
768
+
769
+ elif isinstance(dtype, IntervalDtype):
770
+ return lib.is_float(obj) or obj is None or obj is libmissing.NA
771
+
772
+ elif isinstance(dtype, CategoricalDtype):
773
+ return is_valid_na_for_dtype(obj, dtype.categories.dtype)
774
+
775
+ # fallback, default to allowing NaN, None, NA, NaT
776
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
777
+
778
+
779
+ def isna_all(arr: ArrayLike) -> bool:
780
+ """
781
+ Optimized equivalent to isna(arr).all()
782
+ """
783
+ total_len = len(arr)
784
+
785
+ # Usually it's enough to check but a small fraction of values to see if
786
+ # a block is NOT null, chunks should help in such cases.
787
+ # parameters 1000 and 40 were chosen arbitrarily
788
+ chunk_len = max(total_len // 40, 1000)
789
+
790
+ dtype = arr.dtype
791
+ if lib.is_np_dtype(dtype, "f"):
792
+ checker = nan_checker
793
+
794
+ elif (lib.is_np_dtype(dtype, "mM")) or isinstance(
795
+ dtype, (DatetimeTZDtype, PeriodDtype)
796
+ ):
797
+ # error: Incompatible types in assignment (expression has type
798
+ # "Callable[[Any], Any]", variable has type "ufunc")
799
+ checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment]
800
+
801
+ else:
802
+ # error: Incompatible types in assignment (expression has type "Callable[[Any],
803
+ # Any]", variable has type "ufunc")
804
+ checker = lambda x: _isna_array( # type: ignore[assignment]
805
+ x, inf_as_na=INF_AS_NA
806
+ )
807
+
808
+ return all(
809
+ checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len)
810
+ )
videollama2/lib/python3.10/site-packages/pandas/core/groupby/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.groupby.generic import (
2
+ DataFrameGroupBy,
3
+ NamedAgg,
4
+ SeriesGroupBy,
5
+ )
6
+ from pandas.core.groupby.groupby import GroupBy
7
+ from pandas.core.groupby.grouper import Grouper
8
+
9
+ __all__ = [
10
+ "DataFrameGroupBy",
11
+ "NamedAgg",
12
+ "SeriesGroupBy",
13
+ "GroupBy",
14
+ "Grouper",
15
+ ]
videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (451 Bytes). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc ADDED
Binary file (78.3 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc ADDED
Binary file (26.3 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc ADDED
Binary file (9.74 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc ADDED
Binary file (4.5 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc ADDED
Binary file (29 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/groupby/base.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provide basic components for groupby.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import TYPE_CHECKING
8
+
9
+ if TYPE_CHECKING:
10
+ from collections.abc import Hashable
11
+
12
+
13
+ @dataclasses.dataclass(order=True, frozen=True)
14
+ class OutputKey:
15
+ label: Hashable
16
+ position: int
17
+
18
+
19
+ # special case to prevent duplicate plots when catching exceptions when
20
+ # forwarding methods from NDFrames
21
+ plotting_methods = frozenset(["plot", "hist"])
22
+
23
+ # cythonized transformations or canned "agg+broadcast", which do not
24
+ # require postprocessing of the result by transform.
25
+ cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
26
+
27
+ # List of aggregation/reduction functions.
28
+ # These map each group to a single numeric value
29
+ reduction_kernels = frozenset(
30
+ [
31
+ "all",
32
+ "any",
33
+ "corrwith",
34
+ "count",
35
+ "first",
36
+ "idxmax",
37
+ "idxmin",
38
+ "last",
39
+ "max",
40
+ "mean",
41
+ "median",
42
+ "min",
43
+ "nunique",
44
+ "prod",
45
+ # as long as `quantile`'s signature accepts only
46
+ # a single quantile value, it's a reduction.
47
+ # GH#27526 might change that.
48
+ "quantile",
49
+ "sem",
50
+ "size",
51
+ "skew",
52
+ "std",
53
+ "sum",
54
+ "var",
55
+ ]
56
+ )
57
+
58
+ # List of transformation functions.
59
+ # a transformation is a function that, for each group,
60
+ # produces a result that has the same shape as the group.
61
+
62
+
63
+ transformation_kernels = frozenset(
64
+ [
65
+ "bfill",
66
+ "cumcount",
67
+ "cummax",
68
+ "cummin",
69
+ "cumprod",
70
+ "cumsum",
71
+ "diff",
72
+ "ffill",
73
+ "fillna",
74
+ "ngroup",
75
+ "pct_change",
76
+ "rank",
77
+ "shift",
78
+ ]
79
+ )
80
+
81
+ # these are all the public methods on Grouper which don't belong
82
+ # in either of the above lists
83
+ groupby_other_methods = frozenset(
84
+ [
85
+ "agg",
86
+ "aggregate",
87
+ "apply",
88
+ "boxplot",
89
+ # corr and cov return ngroups*ncolumns rows, so they
90
+ # are neither a transformation nor a reduction
91
+ "corr",
92
+ "cov",
93
+ "describe",
94
+ "dtypes",
95
+ "expanding",
96
+ "ewm",
97
+ "filter",
98
+ "get_group",
99
+ "groups",
100
+ "head",
101
+ "hist",
102
+ "indices",
103
+ "ndim",
104
+ "ngroups",
105
+ "nth",
106
+ "ohlc",
107
+ "pipe",
108
+ "plot",
109
+ "resample",
110
+ "rolling",
111
+ "tail",
112
+ "take",
113
+ "transform",
114
+ "sample",
115
+ "value_counts",
116
+ ]
117
+ )
118
+ # Valid values of `name` for `groupby.transform(name)`
119
+ # NOTE: do NOT edit this directly. New additions should be inserted
120
+ # into the appropriate list above.
121
+ transform_kernel_allowlist = reduction_kernels | transformation_kernels
videollama2/lib/python3.10/site-packages/pandas/core/groupby/categorical.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+
5
+ from pandas.core.algorithms import unique1d
6
+ from pandas.core.arrays.categorical import (
7
+ Categorical,
8
+ CategoricalDtype,
9
+ recode_for_categories,
10
+ )
11
+
12
+
13
+ def recode_for_groupby(
14
+ c: Categorical, sort: bool, observed: bool
15
+ ) -> tuple[Categorical, Categorical | None]:
16
+ """
17
+ Code the categories to ensure we can groupby for categoricals.
18
+
19
+ If observed=True, we return a new Categorical with the observed
20
+ categories only.
21
+
22
+ If sort=False, return a copy of self, coded with categories as
23
+ returned by .unique(), followed by any categories not appearing in
24
+ the data. If sort=True, return self.
25
+
26
+ This method is needed solely to ensure the categorical index of the
27
+ GroupBy result has categories in the order of appearance in the data
28
+ (GH-8868).
29
+
30
+ Parameters
31
+ ----------
32
+ c : Categorical
33
+ sort : bool
34
+ The value of the sort parameter groupby was called with.
35
+ observed : bool
36
+ Account only for the observed values
37
+
38
+ Returns
39
+ -------
40
+ Categorical
41
+ If sort=False, the new categories are set to the order of
42
+ appearance in codes (unless ordered=True, in which case the
43
+ original order is preserved), followed by any unrepresented
44
+ categories in the original order.
45
+ Categorical or None
46
+ If we are observed, return the original categorical, otherwise None
47
+ """
48
+ # we only care about observed values
49
+ if observed:
50
+ # In cases with c.ordered, this is equivalent to
51
+ # return c.remove_unused_categories(), c
52
+
53
+ unique_codes = unique1d(c.codes)
54
+
55
+ take_codes = unique_codes[unique_codes != -1]
56
+ if sort:
57
+ take_codes = np.sort(take_codes)
58
+
59
+ # we recode according to the uniques
60
+ categories = c.categories.take(take_codes)
61
+ codes = recode_for_categories(c.codes, c.categories, categories)
62
+
63
+ # return a new categorical that maps our new codes
64
+ # and categories
65
+ dtype = CategoricalDtype(categories, ordered=c.ordered)
66
+ return Categorical._simple_new(codes, dtype=dtype), c
67
+
68
+ # Already sorted according to c.categories; all is fine
69
+ if sort:
70
+ return c, None
71
+
72
+ # sort=False should order groups in as-encountered order (GH-8868)
73
+
74
+ # xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories
75
+ all_codes = np.arange(c.categories.nunique())
76
+ # GH 38140: exclude nan from indexer for categories
77
+ unique_notnan_codes = unique1d(c.codes[c.codes != -1])
78
+ if sort:
79
+ unique_notnan_codes = np.sort(unique_notnan_codes)
80
+ if len(all_codes) > len(unique_notnan_codes):
81
+ # GH 13179: All categories need to be present, even if missing from the data
82
+ missing_codes = np.setdiff1d(all_codes, unique_notnan_codes, assume_unique=True)
83
+ take_codes = np.concatenate((unique_notnan_codes, missing_codes))
84
+ else:
85
+ take_codes = unique_notnan_codes
86
+
87
+ return Categorical(c, c.unique().categories.take(take_codes)), None
videollama2/lib/python3.10/site-packages/pandas/core/groupby/generic.py ADDED
@@ -0,0 +1,2852 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Define the SeriesGroupBy and DataFrameGroupBy
3
+ classes that hold the groupby interfaces (and some implementations).
4
+
5
+ These are user facing as the result of the ``df.groupby(...)`` operations,
6
+ which here returns a DataFrameGroupBy object.
7
+ """
8
+ from __future__ import annotations
9
+
10
+ from collections import abc
11
+ from functools import partial
12
+ from textwrap import dedent
13
+ from typing import (
14
+ TYPE_CHECKING,
15
+ Any,
16
+ Callable,
17
+ Literal,
18
+ NamedTuple,
19
+ TypeVar,
20
+ Union,
21
+ cast,
22
+ )
23
+ import warnings
24
+
25
+ import numpy as np
26
+
27
+ from pandas._libs import (
28
+ Interval,
29
+ lib,
30
+ )
31
+ from pandas._libs.hashtable import duplicated
32
+ from pandas.errors import SpecificationError
33
+ from pandas.util._decorators import (
34
+ Appender,
35
+ Substitution,
36
+ doc,
37
+ )
38
+ from pandas.util._exceptions import find_stack_level
39
+
40
+ from pandas.core.dtypes.common import (
41
+ ensure_int64,
42
+ is_bool,
43
+ is_dict_like,
44
+ is_integer_dtype,
45
+ is_list_like,
46
+ is_numeric_dtype,
47
+ is_scalar,
48
+ )
49
+ from pandas.core.dtypes.dtypes import (
50
+ CategoricalDtype,
51
+ IntervalDtype,
52
+ )
53
+ from pandas.core.dtypes.inference import is_hashable
54
+ from pandas.core.dtypes.missing import (
55
+ isna,
56
+ notna,
57
+ )
58
+
59
+ from pandas.core import algorithms
60
+ from pandas.core.apply import (
61
+ GroupByApply,
62
+ maybe_mangle_lambdas,
63
+ reconstruct_func,
64
+ validate_func_kwargs,
65
+ warn_alias_replacement,
66
+ )
67
+ import pandas.core.common as com
68
+ from pandas.core.frame import DataFrame
69
+ from pandas.core.groupby import (
70
+ base,
71
+ ops,
72
+ )
73
+ from pandas.core.groupby.groupby import (
74
+ GroupBy,
75
+ GroupByPlot,
76
+ _agg_template_frame,
77
+ _agg_template_series,
78
+ _apply_docs,
79
+ _transform_template,
80
+ )
81
+ from pandas.core.indexes.api import (
82
+ Index,
83
+ MultiIndex,
84
+ all_indexes_same,
85
+ default_index,
86
+ )
87
+ from pandas.core.series import Series
88
+ from pandas.core.sorting import get_group_index
89
+ from pandas.core.util.numba_ import maybe_use_numba
90
+
91
+ from pandas.plotting import boxplot_frame_groupby
92
+
93
+ if TYPE_CHECKING:
94
+ from collections.abc import (
95
+ Hashable,
96
+ Mapping,
97
+ Sequence,
98
+ )
99
+
100
+ from pandas._typing import (
101
+ ArrayLike,
102
+ Axis,
103
+ AxisInt,
104
+ CorrelationMethod,
105
+ FillnaOptions,
106
+ IndexLabel,
107
+ Manager,
108
+ Manager2D,
109
+ SingleManager,
110
+ TakeIndexer,
111
+ )
112
+
113
+ from pandas import Categorical
114
+ from pandas.core.generic import NDFrame
115
+
116
+ # TODO(typing) the return value on this callable should be any *scalar*.
117
+ AggScalar = Union[str, Callable[..., Any]]
118
+ # TODO: validate types on ScalarResult and move to _typing
119
+ # Blocked from using by https://github.com/python/mypy/issues/1484
120
+ # See note at _mangle_lambda_list
121
+ ScalarResult = TypeVar("ScalarResult")
122
+
123
+
124
+ class NamedAgg(NamedTuple):
125
+ """
126
+ Helper for column specific aggregation with control over output column names.
127
+
128
+ Subclass of typing.NamedTuple.
129
+
130
+ Parameters
131
+ ----------
132
+ column : Hashable
133
+ Column label in the DataFrame to apply aggfunc.
134
+ aggfunc : function or str
135
+ Function to apply to the provided column. If string, the name of a built-in
136
+ pandas function.
137
+
138
+ Examples
139
+ --------
140
+ >>> df = pd.DataFrame({"key": [1, 1, 2], "a": [-1, 0, 1], 1: [10, 11, 12]})
141
+ >>> agg_a = pd.NamedAgg(column="a", aggfunc="min")
142
+ >>> agg_1 = pd.NamedAgg(column=1, aggfunc=lambda x: np.mean(x))
143
+ >>> df.groupby("key").agg(result_a=agg_a, result_1=agg_1)
144
+ result_a result_1
145
+ key
146
+ 1 -1 10.5
147
+ 2 1 12.0
148
+ """
149
+
150
+ column: Hashable
151
+ aggfunc: AggScalar
152
+
153
+
154
+ class SeriesGroupBy(GroupBy[Series]):
155
+ def _wrap_agged_manager(self, mgr: Manager) -> Series:
156
+ out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes)
157
+ out._name = self.obj.name
158
+ return out
159
+
160
+ def _get_data_to_aggregate(
161
+ self, *, numeric_only: bool = False, name: str | None = None
162
+ ) -> SingleManager:
163
+ ser = self._obj_with_exclusions
164
+ single = ser._mgr
165
+ if numeric_only and not is_numeric_dtype(ser.dtype):
166
+ # GH#41291 match Series behavior
167
+ kwd_name = "numeric_only"
168
+ raise TypeError(
169
+ f"Cannot use {kwd_name}=True with "
170
+ f"{type(self).__name__}.{name} and non-numeric dtypes."
171
+ )
172
+ return single
173
+
174
+ _agg_examples_doc = dedent(
175
+ """
176
+ Examples
177
+ --------
178
+ >>> s = pd.Series([1, 2, 3, 4])
179
+
180
+ >>> s
181
+ 0 1
182
+ 1 2
183
+ 2 3
184
+ 3 4
185
+ dtype: int64
186
+
187
+ >>> s.groupby([1, 1, 2, 2]).min()
188
+ 1 1
189
+ 2 3
190
+ dtype: int64
191
+
192
+ >>> s.groupby([1, 1, 2, 2]).agg('min')
193
+ 1 1
194
+ 2 3
195
+ dtype: int64
196
+
197
+ >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
198
+ min max
199
+ 1 1 2
200
+ 2 3 4
201
+
202
+ The output column names can be controlled by passing
203
+ the desired column names and aggregations as keyword arguments.
204
+
205
+ >>> s.groupby([1, 1, 2, 2]).agg(
206
+ ... minimum='min',
207
+ ... maximum='max',
208
+ ... )
209
+ minimum maximum
210
+ 1 1 2
211
+ 2 3 4
212
+
213
+ .. versionchanged:: 1.3.0
214
+
215
+ The resulting dtype will reflect the return value of the aggregating function.
216
+
217
+ >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
218
+ 1 1.0
219
+ 2 3.0
220
+ dtype: float64
221
+ """
222
+ )
223
+
224
+ @Appender(
225
+ _apply_docs["template"].format(
226
+ input="series", examples=_apply_docs["series_examples"]
227
+ )
228
+ )
229
+ def apply(self, func, *args, **kwargs) -> Series:
230
+ return super().apply(func, *args, **kwargs)
231
+
232
+ @doc(_agg_template_series, examples=_agg_examples_doc, klass="Series")
233
+ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
234
+ relabeling = func is None
235
+ columns = None
236
+ if relabeling:
237
+ columns, func = validate_func_kwargs(kwargs)
238
+ kwargs = {}
239
+
240
+ if isinstance(func, str):
241
+ if maybe_use_numba(engine) and engine is not None:
242
+ # Not all agg functions support numba, only propagate numba kwargs
243
+ # if user asks for numba, and engine is not None
244
+ # (if engine is None, the called function will handle the case where
245
+ # numba is requested via the global option)
246
+ kwargs["engine"] = engine
247
+ if engine_kwargs is not None:
248
+ kwargs["engine_kwargs"] = engine_kwargs
249
+ return getattr(self, func)(*args, **kwargs)
250
+
251
+ elif isinstance(func, abc.Iterable):
252
+ # Catch instances of lists / tuples
253
+ # but not the class list / tuple itself.
254
+ func = maybe_mangle_lambdas(func)
255
+ kwargs["engine"] = engine
256
+ kwargs["engine_kwargs"] = engine_kwargs
257
+ ret = self._aggregate_multiple_funcs(func, *args, **kwargs)
258
+ if relabeling:
259
+ # columns is not narrowed by mypy from relabeling flag
260
+ assert columns is not None # for mypy
261
+ ret.columns = columns
262
+ if not self.as_index:
263
+ ret = ret.reset_index()
264
+ return ret
265
+
266
+ else:
267
+ cyfunc = com.get_cython_func(func)
268
+ if cyfunc and not args and not kwargs:
269
+ warn_alias_replacement(self, func, cyfunc)
270
+ return getattr(self, cyfunc)()
271
+
272
+ if maybe_use_numba(engine):
273
+ return self._aggregate_with_numba(
274
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
275
+ )
276
+
277
+ if self.ngroups == 0:
278
+ # e.g. test_evaluate_with_empty_groups without any groups to
279
+ # iterate over, we have no output on which to do dtype
280
+ # inference. We default to using the existing dtype.
281
+ # xref GH#51445
282
+ obj = self._obj_with_exclusions
283
+ return self.obj._constructor(
284
+ [],
285
+ name=self.obj.name,
286
+ index=self._grouper.result_index,
287
+ dtype=obj.dtype,
288
+ )
289
+
290
+ if self._grouper.nkeys > 1:
291
+ return self._python_agg_general(func, *args, **kwargs)
292
+
293
+ try:
294
+ return self._python_agg_general(func, *args, **kwargs)
295
+ except KeyError:
296
+ # KeyError raised in test_groupby.test_basic is bc the func does
297
+ # a dictionary lookup on group.name, but group name is not
298
+ # pinned in _python_agg_general, only in _aggregate_named
299
+ result = self._aggregate_named(func, *args, **kwargs)
300
+
301
+ warnings.warn(
302
+ "Pinning the groupby key to each group in "
303
+ f"{type(self).__name__}.agg is deprecated, and cases that "
304
+ "relied on it will raise in a future version. "
305
+ "If your operation requires utilizing the groupby keys, "
306
+ "iterate over the groupby object instead.",
307
+ FutureWarning,
308
+ stacklevel=find_stack_level(),
309
+ )
310
+
311
+ # result is a dict whose keys are the elements of result_index
312
+ result = Series(result, index=self._grouper.result_index)
313
+ result = self._wrap_aggregated_output(result)
314
+ return result
315
+
316
+ agg = aggregate
317
+
318
+ def _python_agg_general(self, func, *args, **kwargs):
319
+ orig_func = func
320
+ func = com.is_builtin_func(func)
321
+ if orig_func != func:
322
+ alias = com._builtin_table_alias[func]
323
+ warn_alias_replacement(self, orig_func, alias)
324
+ f = lambda x: func(x, *args, **kwargs)
325
+
326
+ obj = self._obj_with_exclusions
327
+ result = self._grouper.agg_series(obj, f)
328
+ res = obj._constructor(result, name=obj.name)
329
+ return self._wrap_aggregated_output(res)
330
+
331
+ def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:
332
+ if isinstance(arg, dict):
333
+ if self.as_index:
334
+ # GH 15931
335
+ raise SpecificationError("nested renamer is not supported")
336
+ else:
337
+ # GH#50684 - This accidentally worked in 1.x
338
+ msg = (
339
+ "Passing a dictionary to SeriesGroupBy.agg is deprecated "
340
+ "and will raise in a future version of pandas. Pass a list "
341
+ "of aggregations instead."
342
+ )
343
+ warnings.warn(
344
+ message=msg,
345
+ category=FutureWarning,
346
+ stacklevel=find_stack_level(),
347
+ )
348
+ arg = list(arg.items())
349
+ elif any(isinstance(x, (tuple, list)) for x in arg):
350
+ arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
351
+ else:
352
+ # list of functions / function names
353
+ columns = (com.get_callable_name(f) or f for f in arg)
354
+ arg = zip(columns, arg)
355
+
356
+ results: dict[base.OutputKey, DataFrame | Series] = {}
357
+ with com.temp_setattr(self, "as_index", True):
358
+ # Combine results using the index, need to adjust index after
359
+ # if as_index=False (GH#50724)
360
+ for idx, (name, func) in enumerate(arg):
361
+ key = base.OutputKey(label=name, position=idx)
362
+ results[key] = self.aggregate(func, *args, **kwargs)
363
+
364
+ if any(isinstance(x, DataFrame) for x in results.values()):
365
+ from pandas import concat
366
+
367
+ res_df = concat(
368
+ results.values(), axis=1, keys=[key.label for key in results]
369
+ )
370
+ return res_df
371
+
372
+ indexed_output = {key.position: val for key, val in results.items()}
373
+ output = self.obj._constructor_expanddim(indexed_output, index=None)
374
+ output.columns = Index(key.label for key in results)
375
+
376
+ return output
377
+
378
+ def _wrap_applied_output(
379
+ self,
380
+ data: Series,
381
+ values: list[Any],
382
+ not_indexed_same: bool = False,
383
+ is_transform: bool = False,
384
+ ) -> DataFrame | Series:
385
+ """
386
+ Wrap the output of SeriesGroupBy.apply into the expected result.
387
+
388
+ Parameters
389
+ ----------
390
+ data : Series
391
+ Input data for groupby operation.
392
+ values : List[Any]
393
+ Applied output for each group.
394
+ not_indexed_same : bool, default False
395
+ Whether the applied outputs are not indexed the same as the group axes.
396
+
397
+ Returns
398
+ -------
399
+ DataFrame or Series
400
+ """
401
+ if len(values) == 0:
402
+ # GH #6265
403
+ if is_transform:
404
+ # GH#47787 see test_group_on_empty_multiindex
405
+ res_index = data.index
406
+ else:
407
+ res_index = self._grouper.result_index
408
+
409
+ return self.obj._constructor(
410
+ [],
411
+ name=self.obj.name,
412
+ index=res_index,
413
+ dtype=data.dtype,
414
+ )
415
+ assert values is not None
416
+
417
+ if isinstance(values[0], dict):
418
+ # GH #823 #24880
419
+ index = self._grouper.result_index
420
+ res_df = self.obj._constructor_expanddim(values, index=index)
421
+ res_df = self._reindex_output(res_df)
422
+ # if self.observed is False,
423
+ # keep all-NaN rows created while re-indexing
424
+ res_ser = res_df.stack(future_stack=True)
425
+ res_ser.name = self.obj.name
426
+ return res_ser
427
+ elif isinstance(values[0], (Series, DataFrame)):
428
+ result = self._concat_objects(
429
+ values,
430
+ not_indexed_same=not_indexed_same,
431
+ is_transform=is_transform,
432
+ )
433
+ if isinstance(result, Series):
434
+ result.name = self.obj.name
435
+ if not self.as_index and not_indexed_same:
436
+ result = self._insert_inaxis_grouper(result)
437
+ result.index = default_index(len(result))
438
+ return result
439
+ else:
440
+ # GH #6265 #24880
441
+ result = self.obj._constructor(
442
+ data=values, index=self._grouper.result_index, name=self.obj.name
443
+ )
444
+ if not self.as_index:
445
+ result = self._insert_inaxis_grouper(result)
446
+ result.index = default_index(len(result))
447
+ return self._reindex_output(result)
448
+
449
+ def _aggregate_named(self, func, *args, **kwargs):
450
+ # Note: this is very similar to _aggregate_series_pure_python,
451
+ # but that does not pin group.name
452
+ result = {}
453
+ initialized = False
454
+
455
+ for name, group in self._grouper.get_iterator(
456
+ self._obj_with_exclusions, axis=self.axis
457
+ ):
458
+ # needed for pandas/tests/groupby/test_groupby.py::test_basic_aggregations
459
+ object.__setattr__(group, "name", name)
460
+
461
+ output = func(group, *args, **kwargs)
462
+ output = ops.extract_result(output)
463
+ if not initialized:
464
+ # We only do this validation on the first iteration
465
+ ops.check_result_array(output, group.dtype)
466
+ initialized = True
467
+ result[name] = output
468
+
469
+ return result
470
+
471
+ __examples_series_doc = dedent(
472
+ """
473
+ >>> ser = pd.Series([390.0, 350.0, 30.0, 20.0],
474
+ ... index=["Falcon", "Falcon", "Parrot", "Parrot"],
475
+ ... name="Max Speed")
476
+ >>> grouped = ser.groupby([1, 1, 2, 2])
477
+ >>> grouped.transform(lambda x: (x - x.mean()) / x.std())
478
+ Falcon 0.707107
479
+ Falcon -0.707107
480
+ Parrot 0.707107
481
+ Parrot -0.707107
482
+ Name: Max Speed, dtype: float64
483
+
484
+ Broadcast result of the transformation
485
+
486
+ >>> grouped.transform(lambda x: x.max() - x.min())
487
+ Falcon 40.0
488
+ Falcon 40.0
489
+ Parrot 10.0
490
+ Parrot 10.0
491
+ Name: Max Speed, dtype: float64
492
+
493
+ >>> grouped.transform("mean")
494
+ Falcon 370.0
495
+ Falcon 370.0
496
+ Parrot 25.0
497
+ Parrot 25.0
498
+ Name: Max Speed, dtype: float64
499
+
500
+ .. versionchanged:: 1.3.0
501
+
502
+ The resulting dtype will reflect the return value of the passed ``func``,
503
+ for example:
504
+
505
+ >>> grouped.transform(lambda x: x.astype(int).max())
506
+ Falcon 390
507
+ Falcon 390
508
+ Parrot 30
509
+ Parrot 30
510
+ Name: Max Speed, dtype: int64
511
+ """
512
+ )
513
+
514
+ @Substitution(klass="Series", example=__examples_series_doc)
515
+ @Appender(_transform_template)
516
+ def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
517
+ return self._transform(
518
+ func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
519
+ )
520
+
521
+ def _cython_transform(
522
+ self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs
523
+ ):
524
+ assert axis == 0 # handled by caller
525
+
526
+ obj = self._obj_with_exclusions
527
+
528
+ try:
529
+ result = self._grouper._cython_operation(
530
+ "transform", obj._values, how, axis, **kwargs
531
+ )
532
+ except NotImplementedError as err:
533
+ # e.g. test_groupby_raises_string
534
+ raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
535
+
536
+ return obj._constructor(result, index=self.obj.index, name=obj.name)
537
+
538
+ def _transform_general(
539
+ self, func: Callable, engine, engine_kwargs, *args, **kwargs
540
+ ) -> Series:
541
+ """
542
+ Transform with a callable `func`.
543
+ """
544
+ if maybe_use_numba(engine):
545
+ return self._transform_with_numba(
546
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
547
+ )
548
+ assert callable(func)
549
+ klass = type(self.obj)
550
+
551
+ results = []
552
+ for name, group in self._grouper.get_iterator(
553
+ self._obj_with_exclusions, axis=self.axis
554
+ ):
555
+ # this setattr is needed for test_transform_lambda_with_datetimetz
556
+ object.__setattr__(group, "name", name)
557
+ res = func(group, *args, **kwargs)
558
+
559
+ results.append(klass(res, index=group.index))
560
+
561
+ # check for empty "results" to avoid concat ValueError
562
+ if results:
563
+ from pandas.core.reshape.concat import concat
564
+
565
+ concatenated = concat(results)
566
+ result = self._set_result_index_ordered(concatenated)
567
+ else:
568
+ result = self.obj._constructor(dtype=np.float64)
569
+
570
+ result.name = self.obj.name
571
+ return result
572
+
573
+ def filter(self, func, dropna: bool = True, *args, **kwargs):
574
+ """
575
+ Filter elements from groups that don't satisfy a criterion.
576
+
577
+ Elements from groups are filtered if they do not satisfy the
578
+ boolean criterion specified by func.
579
+
580
+ Parameters
581
+ ----------
582
+ func : function
583
+ Criterion to apply to each group. Should return True or False.
584
+ dropna : bool
585
+ Drop groups that do not pass the filter. True by default; if False,
586
+ groups that evaluate False are filled with NaNs.
587
+
588
+ Returns
589
+ -------
590
+ Series
591
+
592
+ Notes
593
+ -----
594
+ Functions that mutate the passed object can produce unexpected
595
+ behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
596
+ for more details.
597
+
598
+ Examples
599
+ --------
600
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
601
+ ... 'foo', 'bar'],
602
+ ... 'B' : [1, 2, 3, 4, 5, 6],
603
+ ... 'C' : [2.0, 5., 8., 1., 2., 9.]})
604
+ >>> grouped = df.groupby('A')
605
+ >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
606
+ 1 2
607
+ 3 4
608
+ 5 6
609
+ Name: B, dtype: int64
610
+ """
611
+ if isinstance(func, str):
612
+ wrapper = lambda x: getattr(x, func)(*args, **kwargs)
613
+ else:
614
+ wrapper = lambda x: func(x, *args, **kwargs)
615
+
616
+ # Interpret np.nan as False.
617
+ def true_and_notna(x) -> bool:
618
+ b = wrapper(x)
619
+ return notna(b) and b
620
+
621
+ try:
622
+ indices = [
623
+ self._get_index(name)
624
+ for name, group in self._grouper.get_iterator(
625
+ self._obj_with_exclusions, axis=self.axis
626
+ )
627
+ if true_and_notna(group)
628
+ ]
629
+ except (ValueError, TypeError) as err:
630
+ raise TypeError("the filter must return a boolean result") from err
631
+
632
+ filtered = self._apply_filter(indices, dropna)
633
+ return filtered
634
+
635
+ def nunique(self, dropna: bool = True) -> Series | DataFrame:
636
+ """
637
+ Return number of unique elements in the group.
638
+
639
+ Returns
640
+ -------
641
+ Series
642
+ Number of unique values within each group.
643
+
644
+ Examples
645
+ --------
646
+ For SeriesGroupby:
647
+
648
+ >>> lst = ['a', 'a', 'b', 'b']
649
+ >>> ser = pd.Series([1, 2, 3, 3], index=lst)
650
+ >>> ser
651
+ a 1
652
+ a 2
653
+ b 3
654
+ b 3
655
+ dtype: int64
656
+ >>> ser.groupby(level=0).nunique()
657
+ a 2
658
+ b 1
659
+ dtype: int64
660
+
661
+ For Resampler:
662
+
663
+ >>> ser = pd.Series([1, 2, 3, 3], index=pd.DatetimeIndex(
664
+ ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
665
+ >>> ser
666
+ 2023-01-01 1
667
+ 2023-01-15 2
668
+ 2023-02-01 3
669
+ 2023-02-15 3
670
+ dtype: int64
671
+ >>> ser.resample('MS').nunique()
672
+ 2023-01-01 2
673
+ 2023-02-01 1
674
+ Freq: MS, dtype: int64
675
+ """
676
+ ids, _, ngroups = self._grouper.group_info
677
+ val = self.obj._values
678
+ codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False)
679
+
680
+ if self._grouper.has_dropped_na:
681
+ mask = ids >= 0
682
+ ids = ids[mask]
683
+ codes = codes[mask]
684
+
685
+ group_index = get_group_index(
686
+ labels=[ids, codes],
687
+ shape=(ngroups, len(uniques)),
688
+ sort=False,
689
+ xnull=dropna,
690
+ )
691
+
692
+ if dropna:
693
+ mask = group_index >= 0
694
+ if (~mask).any():
695
+ ids = ids[mask]
696
+ group_index = group_index[mask]
697
+
698
+ mask = duplicated(group_index, "first")
699
+ res = np.bincount(ids[~mask], minlength=ngroups)
700
+ res = ensure_int64(res)
701
+
702
+ ri = self._grouper.result_index
703
+ result: Series | DataFrame = self.obj._constructor(
704
+ res, index=ri, name=self.obj.name
705
+ )
706
+ if not self.as_index:
707
+ result = self._insert_inaxis_grouper(result)
708
+ result.index = default_index(len(result))
709
+ return self._reindex_output(result, fill_value=0)
710
+
711
+ @doc(Series.describe)
712
+ def describe(self, percentiles=None, include=None, exclude=None) -> Series:
713
+ return super().describe(
714
+ percentiles=percentiles, include=include, exclude=exclude
715
+ )
716
+
717
+ def value_counts(
718
+ self,
719
+ normalize: bool = False,
720
+ sort: bool = True,
721
+ ascending: bool = False,
722
+ bins=None,
723
+ dropna: bool = True,
724
+ ) -> Series | DataFrame:
725
+ name = "proportion" if normalize else "count"
726
+
727
+ if bins is None:
728
+ result = self._value_counts(
729
+ normalize=normalize, sort=sort, ascending=ascending, dropna=dropna
730
+ )
731
+ result.name = name
732
+ return result
733
+
734
+ from pandas.core.reshape.merge import get_join_indexers
735
+ from pandas.core.reshape.tile import cut
736
+
737
+ ids, _, _ = self._grouper.group_info
738
+ val = self.obj._values
739
+
740
+ index_names = self._grouper.names + [self.obj.name]
741
+
742
+ if isinstance(val.dtype, CategoricalDtype) or (
743
+ bins is not None and not np.iterable(bins)
744
+ ):
745
+ # scalar bins cannot be done at top level
746
+ # in a backward compatible way
747
+ # GH38672 relates to categorical dtype
748
+ ser = self.apply(
749
+ Series.value_counts,
750
+ normalize=normalize,
751
+ sort=sort,
752
+ ascending=ascending,
753
+ bins=bins,
754
+ )
755
+ ser.name = name
756
+ ser.index.names = index_names
757
+ return ser
758
+
759
+ # groupby removes null keys from groupings
760
+ mask = ids != -1
761
+ ids, val = ids[mask], val[mask]
762
+
763
+ lab: Index | np.ndarray
764
+ if bins is None:
765
+ lab, lev = algorithms.factorize(val, sort=True)
766
+ llab = lambda lab, inc: lab[inc]
767
+ else:
768
+ # lab is a Categorical with categories an IntervalIndex
769
+ cat_ser = cut(Series(val, copy=False), bins, include_lowest=True)
770
+ cat_obj = cast("Categorical", cat_ser._values)
771
+ lev = cat_obj.categories
772
+ lab = lev.take(
773
+ cat_obj.codes,
774
+ allow_fill=True,
775
+ fill_value=lev._na_value,
776
+ )
777
+ llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
778
+
779
+ if isinstance(lab.dtype, IntervalDtype):
780
+ # TODO: should we do this inside II?
781
+ lab_interval = cast(Interval, lab)
782
+
783
+ sorter = np.lexsort((lab_interval.left, lab_interval.right, ids))
784
+ else:
785
+ sorter = np.lexsort((lab, ids))
786
+
787
+ ids, lab = ids[sorter], lab[sorter]
788
+
789
+ # group boundaries are where group ids change
790
+ idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
791
+ idx = np.r_[0, idchanges]
792
+ if not len(ids):
793
+ idx = idchanges
794
+
795
+ # new values are where sorted labels change
796
+ lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
797
+ inc = np.r_[True, lchanges]
798
+ if not len(val):
799
+ inc = lchanges
800
+ inc[idx] = True # group boundaries are also new values
801
+ out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
802
+
803
+ # num. of times each group should be repeated
804
+ rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
805
+
806
+ # multi-index components
807
+ codes = self._grouper.reconstructed_codes
808
+ codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
809
+ levels = [ping._group_index for ping in self._grouper.groupings] + [lev]
810
+
811
+ if dropna:
812
+ mask = codes[-1] != -1
813
+ if mask.all():
814
+ dropna = False
815
+ else:
816
+ out, codes = out[mask], [level_codes[mask] for level_codes in codes]
817
+
818
+ if normalize:
819
+ out = out.astype("float")
820
+ d = np.diff(np.r_[idx, len(ids)])
821
+ if dropna:
822
+ m = ids[lab == -1]
823
+ np.add.at(d, m, -1)
824
+ acc = rep(d)[mask]
825
+ else:
826
+ acc = rep(d)
827
+ out /= acc
828
+
829
+ if sort and bins is None:
830
+ cat = ids[inc][mask] if dropna else ids[inc]
831
+ sorter = np.lexsort((out if ascending else -out, cat))
832
+ out, codes[-1] = out[sorter], codes[-1][sorter]
833
+
834
+ if bins is not None:
835
+ # for compat. with libgroupby.value_counts need to ensure every
836
+ # bin is present at every index level, null filled with zeros
837
+ diff = np.zeros(len(out), dtype="bool")
838
+ for level_codes in codes[:-1]:
839
+ diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
840
+
841
+ ncat, nbin = diff.sum(), len(levels[-1])
842
+
843
+ left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
844
+
845
+ right = [diff.cumsum() - 1, codes[-1]]
846
+
847
+ # error: Argument 1 to "get_join_indexers" has incompatible type
848
+ # "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray,
849
+ # ndarray[Any, Any]], Index, Series]]
850
+ _, idx = get_join_indexers(
851
+ left, right, sort=False, how="left" # type: ignore[arg-type]
852
+ )
853
+ if idx is not None:
854
+ out = np.where(idx != -1, out[idx], 0)
855
+
856
+ if sort:
857
+ sorter = np.lexsort((out if ascending else -out, left[0]))
858
+ out, left[-1] = out[sorter], left[-1][sorter]
859
+
860
+ # build the multi-index w/ full levels
861
+ def build_codes(lev_codes: np.ndarray) -> np.ndarray:
862
+ return np.repeat(lev_codes[diff], nbin)
863
+
864
+ codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
865
+ codes.append(left[-1])
866
+
867
+ mi = MultiIndex(
868
+ levels=levels, codes=codes, names=index_names, verify_integrity=False
869
+ )
870
+
871
+ if is_integer_dtype(out.dtype):
872
+ out = ensure_int64(out)
873
+ result = self.obj._constructor(out, index=mi, name=name)
874
+ if not self.as_index:
875
+ result = result.reset_index()
876
+ return result
877
+
878
+ def fillna(
879
+ self,
880
+ value: object | ArrayLike | None = None,
881
+ method: FillnaOptions | None = None,
882
+ axis: Axis | None | lib.NoDefault = lib.no_default,
883
+ inplace: bool = False,
884
+ limit: int | None = None,
885
+ downcast: dict | None | lib.NoDefault = lib.no_default,
886
+ ) -> Series | None:
887
+ """
888
+ Fill NA/NaN values using the specified method within groups.
889
+
890
+ .. deprecated:: 2.2.0
891
+ This method is deprecated and will be removed in a future version.
892
+ Use the :meth:`.SeriesGroupBy.ffill` or :meth:`.SeriesGroupBy.bfill`
893
+ for forward or backward filling instead. If you want to fill with a
894
+ single value, use :meth:`Series.fillna` instead.
895
+
896
+ Parameters
897
+ ----------
898
+ value : scalar, dict, Series, or DataFrame
899
+ Value to use to fill holes (e.g. 0), alternately a
900
+ dict/Series/DataFrame of values specifying which value to use for
901
+ each index (for a Series) or column (for a DataFrame). Values not
902
+ in the dict/Series/DataFrame will not be filled. This value cannot
903
+ be a list. Users wanting to use the ``value`` argument and not ``method``
904
+ should prefer :meth:`.Series.fillna` as this
905
+ will produce the same result and be more performant.
906
+ method : {{'bfill', 'ffill', None}}, default None
907
+ Method to use for filling holes. ``'ffill'`` will propagate
908
+ the last valid observation forward within a group.
909
+ ``'bfill'`` will use next valid observation to fill the gap.
910
+ axis : {0 or 'index', 1 or 'columns'}
911
+ Unused, only for compatibility with :meth:`DataFrameGroupBy.fillna`.
912
+ inplace : bool, default False
913
+ Broken. Do not set to True.
914
+ limit : int, default None
915
+ If method is specified, this is the maximum number of consecutive
916
+ NaN values to forward/backward fill within a group. In other words,
917
+ if there is a gap with more than this number of consecutive NaNs,
918
+ it will only be partially filled. If method is not specified, this is the
919
+ maximum number of entries along the entire axis where NaNs will be
920
+ filled. Must be greater than 0 if not None.
921
+ downcast : dict, default is None
922
+ A dict of item->dtype of what to downcast if possible,
923
+ or the string 'infer' which will try to downcast to an appropriate
924
+ equal type (e.g. float64 to int64 if possible).
925
+
926
+ Returns
927
+ -------
928
+ Series
929
+ Object with missing values filled within groups.
930
+
931
+ See Also
932
+ --------
933
+ ffill : Forward fill values within a group.
934
+ bfill : Backward fill values within a group.
935
+
936
+ Examples
937
+ --------
938
+ For SeriesGroupBy:
939
+
940
+ >>> lst = ['cat', 'cat', 'cat', 'mouse', 'mouse']
941
+ >>> ser = pd.Series([1, None, None, 2, None], index=lst)
942
+ >>> ser
943
+ cat 1.0
944
+ cat NaN
945
+ cat NaN
946
+ mouse 2.0
947
+ mouse NaN
948
+ dtype: float64
949
+ >>> ser.groupby(level=0).fillna(0, limit=1)
950
+ cat 1.0
951
+ cat 0.0
952
+ cat NaN
953
+ mouse 2.0
954
+ mouse 0.0
955
+ dtype: float64
956
+ """
957
+ warnings.warn(
958
+ f"{type(self).__name__}.fillna is deprecated and "
959
+ "will be removed in a future version. Use obj.ffill() or obj.bfill() "
960
+ "for forward or backward filling instead. If you want to fill with a "
961
+ f"single value, use {type(self.obj).__name__}.fillna instead",
962
+ FutureWarning,
963
+ stacklevel=find_stack_level(),
964
+ )
965
+ result = self._op_via_apply(
966
+ "fillna",
967
+ value=value,
968
+ method=method,
969
+ axis=axis,
970
+ inplace=inplace,
971
+ limit=limit,
972
+ downcast=downcast,
973
+ )
974
+ return result
975
+
976
+ def take(
977
+ self,
978
+ indices: TakeIndexer,
979
+ axis: Axis | lib.NoDefault = lib.no_default,
980
+ **kwargs,
981
+ ) -> Series:
982
+ """
983
+ Return the elements in the given *positional* indices in each group.
984
+
985
+ This means that we are not indexing according to actual values in
986
+ the index attribute of the object. We are indexing according to the
987
+ actual position of the element in the object.
988
+
989
+ If a requested index does not exist for some group, this method will raise.
990
+ To get similar behavior that ignores indices that don't exist, see
991
+ :meth:`.SeriesGroupBy.nth`.
992
+
993
+ Parameters
994
+ ----------
995
+ indices : array-like
996
+ An array of ints indicating which positions to take in each group.
997
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
998
+ The axis on which to select elements. ``0`` means that we are
999
+ selecting rows, ``1`` means that we are selecting columns.
1000
+ For `SeriesGroupBy` this parameter is unused and defaults to 0.
1001
+
1002
+ .. deprecated:: 2.1.0
1003
+ For axis=1, operate on the underlying object instead. Otherwise
1004
+ the axis keyword is not necessary.
1005
+
1006
+ **kwargs
1007
+ For compatibility with :meth:`numpy.take`. Has no effect on the
1008
+ output.
1009
+
1010
+ Returns
1011
+ -------
1012
+ Series
1013
+ A Series containing the elements taken from each group.
1014
+
1015
+ See Also
1016
+ --------
1017
+ Series.take : Take elements from a Series along an axis.
1018
+ Series.loc : Select a subset of a DataFrame by labels.
1019
+ Series.iloc : Select a subset of a DataFrame by positions.
1020
+ numpy.take : Take elements from an array along an axis.
1021
+ SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist.
1022
+
1023
+ Examples
1024
+ --------
1025
+ >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
1026
+ ... ('parrot', 'bird', 24.0),
1027
+ ... ('lion', 'mammal', 80.5),
1028
+ ... ('monkey', 'mammal', np.nan),
1029
+ ... ('rabbit', 'mammal', 15.0)],
1030
+ ... columns=['name', 'class', 'max_speed'],
1031
+ ... index=[4, 3, 2, 1, 0])
1032
+ >>> df
1033
+ name class max_speed
1034
+ 4 falcon bird 389.0
1035
+ 3 parrot bird 24.0
1036
+ 2 lion mammal 80.5
1037
+ 1 monkey mammal NaN
1038
+ 0 rabbit mammal 15.0
1039
+ >>> gb = df["name"].groupby([1, 1, 2, 2, 2])
1040
+
1041
+ Take elements at positions 0 and 1 along the axis 0 in each group (default).
1042
+
1043
+ >>> gb.take([0, 1])
1044
+ 1 4 falcon
1045
+ 3 parrot
1046
+ 2 2 lion
1047
+ 1 monkey
1048
+ Name: name, dtype: object
1049
+
1050
+ We may take elements using negative integers for positive indices,
1051
+ starting from the end of the object, just like with Python lists.
1052
+
1053
+ >>> gb.take([-1, -2])
1054
+ 1 3 parrot
1055
+ 4 falcon
1056
+ 2 0 rabbit
1057
+ 1 monkey
1058
+ Name: name, dtype: object
1059
+ """
1060
+ result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)
1061
+ return result
1062
+
1063
+ def skew(
1064
+ self,
1065
+ axis: Axis | lib.NoDefault = lib.no_default,
1066
+ skipna: bool = True,
1067
+ numeric_only: bool = False,
1068
+ **kwargs,
1069
+ ) -> Series:
1070
+ """
1071
+ Return unbiased skew within groups.
1072
+
1073
+ Normalized by N-1.
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
1078
+ Axis for the function to be applied on.
1079
+ This parameter is only for compatibility with DataFrame and is unused.
1080
+
1081
+ .. deprecated:: 2.1.0
1082
+ For axis=1, operate on the underlying object instead. Otherwise
1083
+ the axis keyword is not necessary.
1084
+
1085
+ skipna : bool, default True
1086
+ Exclude NA/null values when computing the result.
1087
+
1088
+ numeric_only : bool, default False
1089
+ Include only float, int, boolean columns. Not implemented for Series.
1090
+
1091
+ **kwargs
1092
+ Additional keyword arguments to be passed to the function.
1093
+
1094
+ Returns
1095
+ -------
1096
+ Series
1097
+
1098
+ See Also
1099
+ --------
1100
+ Series.skew : Return unbiased skew over requested axis.
1101
+
1102
+ Examples
1103
+ --------
1104
+ >>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.],
1105
+ ... index=['Falcon', 'Falcon', 'Falcon', 'Falcon',
1106
+ ... 'Parrot', 'Parrot', 'Parrot'],
1107
+ ... name="Max Speed")
1108
+ >>> ser
1109
+ Falcon 390.0
1110
+ Falcon 350.0
1111
+ Falcon 357.0
1112
+ Falcon NaN
1113
+ Parrot 22.0
1114
+ Parrot 20.0
1115
+ Parrot 30.0
1116
+ Name: Max Speed, dtype: float64
1117
+ >>> ser.groupby(level=0).skew()
1118
+ Falcon 1.525174
1119
+ Parrot 1.457863
1120
+ Name: Max Speed, dtype: float64
1121
+ >>> ser.groupby(level=0).skew(skipna=False)
1122
+ Falcon NaN
1123
+ Parrot 1.457863
1124
+ Name: Max Speed, dtype: float64
1125
+ """
1126
+ if axis is lib.no_default:
1127
+ axis = 0
1128
+
1129
+ if axis != 0:
1130
+ result = self._op_via_apply(
1131
+ "skew",
1132
+ axis=axis,
1133
+ skipna=skipna,
1134
+ numeric_only=numeric_only,
1135
+ **kwargs,
1136
+ )
1137
+ return result
1138
+
1139
+ def alt(obj):
1140
+ # This should not be reached since the cython path should raise
1141
+ # TypeError and not NotImplementedError.
1142
+ raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")
1143
+
1144
+ return self._cython_agg_general(
1145
+ "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
1146
+ )
1147
+
1148
+ @property
1149
+ @doc(Series.plot.__doc__)
1150
+ def plot(self) -> GroupByPlot:
1151
+ result = GroupByPlot(self)
1152
+ return result
1153
+
1154
+ @doc(Series.nlargest.__doc__)
1155
+ def nlargest(
1156
+ self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
1157
+ ) -> Series:
1158
+ f = partial(Series.nlargest, n=n, keep=keep)
1159
+ data = self._obj_with_exclusions
1160
+ # Don't change behavior if result index happens to be the same, i.e.
1161
+ # already ordered and n >= all group sizes.
1162
+ result = self._python_apply_general(f, data, not_indexed_same=True)
1163
+ return result
1164
+
1165
+ @doc(Series.nsmallest.__doc__)
1166
+ def nsmallest(
1167
+ self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
1168
+ ) -> Series:
1169
+ f = partial(Series.nsmallest, n=n, keep=keep)
1170
+ data = self._obj_with_exclusions
1171
+ # Don't change behavior if result index happens to be the same, i.e.
1172
+ # already ordered and n >= all group sizes.
1173
+ result = self._python_apply_general(f, data, not_indexed_same=True)
1174
+ return result
1175
+
1176
+ @doc(Series.idxmin.__doc__)
1177
+ def idxmin(
1178
+ self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True
1179
+ ) -> Series:
1180
+ return self._idxmax_idxmin("idxmin", axis=axis, skipna=skipna)
1181
+
1182
+ @doc(Series.idxmax.__doc__)
1183
+ def idxmax(
1184
+ self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True
1185
+ ) -> Series:
1186
+ return self._idxmax_idxmin("idxmax", axis=axis, skipna=skipna)
1187
+
1188
+ @doc(Series.corr.__doc__)
1189
+ def corr(
1190
+ self,
1191
+ other: Series,
1192
+ method: CorrelationMethod = "pearson",
1193
+ min_periods: int | None = None,
1194
+ ) -> Series:
1195
+ result = self._op_via_apply(
1196
+ "corr", other=other, method=method, min_periods=min_periods
1197
+ )
1198
+ return result
1199
+
1200
+ @doc(Series.cov.__doc__)
1201
+ def cov(
1202
+ self, other: Series, min_periods: int | None = None, ddof: int | None = 1
1203
+ ) -> Series:
1204
+ result = self._op_via_apply(
1205
+ "cov", other=other, min_periods=min_periods, ddof=ddof
1206
+ )
1207
+ return result
1208
+
1209
+ @property
1210
+ def is_monotonic_increasing(self) -> Series:
1211
+ """
1212
+ Return whether each group's values are monotonically increasing.
1213
+
1214
+ Returns
1215
+ -------
1216
+ Series
1217
+
1218
+ Examples
1219
+ --------
1220
+ >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
1221
+ >>> s.groupby(level=0).is_monotonic_increasing
1222
+ Falcon False
1223
+ Parrot True
1224
+ dtype: bool
1225
+ """
1226
+ return self.apply(lambda ser: ser.is_monotonic_increasing)
1227
+
1228
+ @property
1229
+ def is_monotonic_decreasing(self) -> Series:
1230
+ """
1231
+ Return whether each group's values are monotonically decreasing.
1232
+
1233
+ Returns
1234
+ -------
1235
+ Series
1236
+
1237
+ Examples
1238
+ --------
1239
+ >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
1240
+ >>> s.groupby(level=0).is_monotonic_decreasing
1241
+ Falcon True
1242
+ Parrot False
1243
+ dtype: bool
1244
+ """
1245
+ return self.apply(lambda ser: ser.is_monotonic_decreasing)
1246
+
1247
+ @doc(Series.hist.__doc__)
1248
+ def hist(
1249
+ self,
1250
+ by=None,
1251
+ ax=None,
1252
+ grid: bool = True,
1253
+ xlabelsize: int | None = None,
1254
+ xrot: float | None = None,
1255
+ ylabelsize: int | None = None,
1256
+ yrot: float | None = None,
1257
+ figsize: tuple[int, int] | None = None,
1258
+ bins: int | Sequence[int] = 10,
1259
+ backend: str | None = None,
1260
+ legend: bool = False,
1261
+ **kwargs,
1262
+ ):
1263
+ result = self._op_via_apply(
1264
+ "hist",
1265
+ by=by,
1266
+ ax=ax,
1267
+ grid=grid,
1268
+ xlabelsize=xlabelsize,
1269
+ xrot=xrot,
1270
+ ylabelsize=ylabelsize,
1271
+ yrot=yrot,
1272
+ figsize=figsize,
1273
+ bins=bins,
1274
+ backend=backend,
1275
+ legend=legend,
1276
+ **kwargs,
1277
+ )
1278
+ return result
1279
+
1280
+ @property
1281
+ @doc(Series.dtype.__doc__)
1282
+ def dtype(self) -> Series:
1283
+ return self.apply(lambda ser: ser.dtype)
1284
+
1285
+ def unique(self) -> Series:
1286
+ """
1287
+ Return unique values for each group.
1288
+
1289
+ It returns unique values for each of the grouped values. Returned in
1290
+ order of appearance. Hash table-based unique, therefore does NOT sort.
1291
+
1292
+ Returns
1293
+ -------
1294
+ Series
1295
+ Unique values for each of the grouped values.
1296
+
1297
+ See Also
1298
+ --------
1299
+ Series.unique : Return unique values of Series object.
1300
+
1301
+ Examples
1302
+ --------
1303
+ >>> df = pd.DataFrame([('Chihuahua', 'dog', 6.1),
1304
+ ... ('Beagle', 'dog', 15.2),
1305
+ ... ('Chihuahua', 'dog', 6.9),
1306
+ ... ('Persian', 'cat', 9.2),
1307
+ ... ('Chihuahua', 'dog', 7),
1308
+ ... ('Persian', 'cat', 8.8)],
1309
+ ... columns=['breed', 'animal', 'height_in'])
1310
+ >>> df
1311
+ breed animal height_in
1312
+ 0 Chihuahua dog 6.1
1313
+ 1 Beagle dog 15.2
1314
+ 2 Chihuahua dog 6.9
1315
+ 3 Persian cat 9.2
1316
+ 4 Chihuahua dog 7.0
1317
+ 5 Persian cat 8.8
1318
+ >>> ser = df.groupby('animal')['breed'].unique()
1319
+ >>> ser
1320
+ animal
1321
+ cat [Persian]
1322
+ dog [Chihuahua, Beagle]
1323
+ Name: breed, dtype: object
1324
+ """
1325
+ result = self._op_via_apply("unique")
1326
+ return result
1327
+
1328
+
1329
+ class DataFrameGroupBy(GroupBy[DataFrame]):
1330
+ _agg_examples_doc = dedent(
1331
+ """
1332
+ Examples
1333
+ --------
1334
+ >>> data = {"A": [1, 1, 2, 2],
1335
+ ... "B": [1, 2, 3, 4],
1336
+ ... "C": [0.362838, 0.227877, 1.267767, -0.562860]}
1337
+ >>> df = pd.DataFrame(data)
1338
+ >>> df
1339
+ A B C
1340
+ 0 1 1 0.362838
1341
+ 1 1 2 0.227877
1342
+ 2 2 3 1.267767
1343
+ 3 2 4 -0.562860
1344
+
1345
+ The aggregation is for each column.
1346
+
1347
+ >>> df.groupby('A').agg('min')
1348
+ B C
1349
+ A
1350
+ 1 1 0.227877
1351
+ 2 3 -0.562860
1352
+
1353
+ Multiple aggregations
1354
+
1355
+ >>> df.groupby('A').agg(['min', 'max'])
1356
+ B C
1357
+ min max min max
1358
+ A
1359
+ 1 1 2 0.227877 0.362838
1360
+ 2 3 4 -0.562860 1.267767
1361
+
1362
+ Select a column for aggregation
1363
+
1364
+ >>> df.groupby('A').B.agg(['min', 'max'])
1365
+ min max
1366
+ A
1367
+ 1 1 2
1368
+ 2 3 4
1369
+
1370
+ User-defined function for aggregation
1371
+
1372
+ >>> df.groupby('A').agg(lambda x: sum(x) + 2)
1373
+ B C
1374
+ A
1375
+ 1 5 2.590715
1376
+ 2 9 2.704907
1377
+
1378
+ Different aggregations per column
1379
+
1380
+ >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
1381
+ B C
1382
+ min max sum
1383
+ A
1384
+ 1 1 2 0.590715
1385
+ 2 3 4 0.704907
1386
+
1387
+ To control the output names with different aggregations per column,
1388
+ pandas supports "named aggregation"
1389
+
1390
+ >>> df.groupby("A").agg(
1391
+ ... b_min=pd.NamedAgg(column="B", aggfunc="min"),
1392
+ ... c_sum=pd.NamedAgg(column="C", aggfunc="sum")
1393
+ ... )
1394
+ b_min c_sum
1395
+ A
1396
+ 1 1 0.590715
1397
+ 2 3 0.704907
1398
+
1399
+ - The keywords are the *output* column names
1400
+ - The values are tuples whose first element is the column to select
1401
+ and the second element is the aggregation to apply to that column.
1402
+ Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
1403
+ ``['column', 'aggfunc']`` to make it clearer what the arguments are.
1404
+ As usual, the aggregation can be a callable or a string alias.
1405
+
1406
+ See :ref:`groupby.aggregate.named` for more.
1407
+
1408
+ .. versionchanged:: 1.3.0
1409
+
1410
+ The resulting dtype will reflect the return value of the aggregating function.
1411
+
1412
+ >>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min())
1413
+ B
1414
+ A
1415
+ 1 1.0
1416
+ 2 3.0
1417
+ """
1418
+ )
1419
+
1420
+ @doc(_agg_template_frame, examples=_agg_examples_doc, klass="DataFrame")
1421
+ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
1422
+ relabeling, func, columns, order = reconstruct_func(func, **kwargs)
1423
+ func = maybe_mangle_lambdas(func)
1424
+
1425
+ if maybe_use_numba(engine):
1426
+ # Not all agg functions support numba, only propagate numba kwargs
1427
+ # if user asks for numba
1428
+ kwargs["engine"] = engine
1429
+ kwargs["engine_kwargs"] = engine_kwargs
1430
+
1431
+ op = GroupByApply(self, func, args=args, kwargs=kwargs)
1432
+ result = op.agg()
1433
+ if not is_dict_like(func) and result is not None:
1434
+ # GH #52849
1435
+ if not self.as_index and is_list_like(func):
1436
+ return result.reset_index()
1437
+ else:
1438
+ return result
1439
+ elif relabeling:
1440
+ # this should be the only (non-raising) case with relabeling
1441
+ # used reordered index of columns
1442
+ result = cast(DataFrame, result)
1443
+ result = result.iloc[:, order]
1444
+ result = cast(DataFrame, result)
1445
+ # error: Incompatible types in assignment (expression has type
1446
+ # "Optional[List[str]]", variable has type
1447
+ # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]],
1448
+ # Index, Series], Sequence[Any]]")
1449
+ result.columns = columns # type: ignore[assignment]
1450
+
1451
+ if result is None:
1452
+ # Remove the kwargs we inserted
1453
+ # (already stored in engine, engine_kwargs arguments)
1454
+ if "engine" in kwargs:
1455
+ del kwargs["engine"]
1456
+ del kwargs["engine_kwargs"]
1457
+ # at this point func is not a str, list-like, dict-like,
1458
+ # or a known callable(e.g. sum)
1459
+ if maybe_use_numba(engine):
1460
+ return self._aggregate_with_numba(
1461
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
1462
+ )
1463
+ # grouper specific aggregations
1464
+ if self._grouper.nkeys > 1:
1465
+ # test_groupby_as_index_series_scalar gets here with 'not self.as_index'
1466
+ return self._python_agg_general(func, *args, **kwargs)
1467
+ elif args or kwargs:
1468
+ # test_pass_args_kwargs gets here (with and without as_index)
1469
+ # can't return early
1470
+ result = self._aggregate_frame(func, *args, **kwargs)
1471
+
1472
+ elif self.axis == 1:
1473
+ # _aggregate_multiple_funcs does not allow self.axis == 1
1474
+ # Note: axis == 1 precludes 'not self.as_index', see __init__
1475
+ result = self._aggregate_frame(func)
1476
+ return result
1477
+
1478
+ else:
1479
+ # try to treat as if we are passing a list
1480
+ gba = GroupByApply(self, [func], args=(), kwargs={})
1481
+ try:
1482
+ result = gba.agg()
1483
+
1484
+ except ValueError as err:
1485
+ if "No objects to concatenate" not in str(err):
1486
+ raise
1487
+ # _aggregate_frame can fail with e.g. func=Series.mode,
1488
+ # where it expects 1D values but would be getting 2D values
1489
+ # In other tests, using aggregate_frame instead of GroupByApply
1490
+ # would give correct values but incorrect dtypes
1491
+ # object vs float64 in test_cython_agg_empty_buckets
1492
+ # float64 vs int64 in test_category_order_apply
1493
+ result = self._aggregate_frame(func)
1494
+
1495
+ else:
1496
+ # GH#32040, GH#35246
1497
+ # e.g. test_groupby_as_index_select_column_sum_empty_df
1498
+ result = cast(DataFrame, result)
1499
+ result.columns = self._obj_with_exclusions.columns.copy()
1500
+
1501
+ if not self.as_index:
1502
+ result = self._insert_inaxis_grouper(result)
1503
+ result.index = default_index(len(result))
1504
+
1505
+ return result
1506
+
1507
+ agg = aggregate
1508
+
1509
+ def _python_agg_general(self, func, *args, **kwargs):
1510
+ orig_func = func
1511
+ func = com.is_builtin_func(func)
1512
+ if orig_func != func:
1513
+ alias = com._builtin_table_alias[func]
1514
+ warn_alias_replacement(self, orig_func, alias)
1515
+ f = lambda x: func(x, *args, **kwargs)
1516
+
1517
+ if self.ngroups == 0:
1518
+ # e.g. test_evaluate_with_empty_groups different path gets different
1519
+ # result dtype in empty case.
1520
+ return self._python_apply_general(f, self._selected_obj, is_agg=True)
1521
+
1522
+ obj = self._obj_with_exclusions
1523
+ if self.axis == 1:
1524
+ obj = obj.T
1525
+
1526
+ if not len(obj.columns):
1527
+ # e.g. test_margins_no_values_no_cols
1528
+ return self._python_apply_general(f, self._selected_obj)
1529
+
1530
+ output: dict[int, ArrayLike] = {}
1531
+ for idx, (name, ser) in enumerate(obj.items()):
1532
+ result = self._grouper.agg_series(ser, f)
1533
+ output[idx] = result
1534
+
1535
+ res = self.obj._constructor(output)
1536
+ res.columns = obj.columns.copy(deep=False)
1537
+ return self._wrap_aggregated_output(res)
1538
+
1539
+ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
1540
+ if self._grouper.nkeys != 1:
1541
+ raise AssertionError("Number of keys must be 1")
1542
+
1543
+ obj = self._obj_with_exclusions
1544
+
1545
+ result: dict[Hashable, NDFrame | np.ndarray] = {}
1546
+ for name, grp_df in self._grouper.get_iterator(obj, self.axis):
1547
+ fres = func(grp_df, *args, **kwargs)
1548
+ result[name] = fres
1549
+
1550
+ result_index = self._grouper.result_index
1551
+ other_ax = obj.axes[1 - self.axis]
1552
+ out = self.obj._constructor(result, index=other_ax, columns=result_index)
1553
+ if self.axis == 0:
1554
+ out = out.T
1555
+
1556
+ return out
1557
+
1558
+ def _wrap_applied_output(
1559
+ self,
1560
+ data: DataFrame,
1561
+ values: list,
1562
+ not_indexed_same: bool = False,
1563
+ is_transform: bool = False,
1564
+ ):
1565
+ if len(values) == 0:
1566
+ if is_transform:
1567
+ # GH#47787 see test_group_on_empty_multiindex
1568
+ res_index = data.index
1569
+ else:
1570
+ res_index = self._grouper.result_index
1571
+
1572
+ result = self.obj._constructor(index=res_index, columns=data.columns)
1573
+ result = result.astype(data.dtypes, copy=False)
1574
+ return result
1575
+
1576
+ # GH12824
1577
+ # using values[0] here breaks test_groupby_apply_none_first
1578
+ first_not_none = next(com.not_none(*values), None)
1579
+
1580
+ if first_not_none is None:
1581
+ # GH9684 - All values are None, return an empty frame.
1582
+ return self.obj._constructor()
1583
+ elif isinstance(first_not_none, DataFrame):
1584
+ return self._concat_objects(
1585
+ values,
1586
+ not_indexed_same=not_indexed_same,
1587
+ is_transform=is_transform,
1588
+ )
1589
+
1590
+ key_index = self._grouper.result_index if self.as_index else None
1591
+
1592
+ if isinstance(first_not_none, (np.ndarray, Index)):
1593
+ # GH#1738: values is list of arrays of unequal lengths
1594
+ # fall through to the outer else clause
1595
+ # TODO: sure this is right? we used to do this
1596
+ # after raising AttributeError above
1597
+ # GH 18930
1598
+ if not is_hashable(self._selection):
1599
+ # error: Need type annotation for "name"
1600
+ name = tuple(self._selection) # type: ignore[var-annotated, arg-type]
1601
+ else:
1602
+ # error: Incompatible types in assignment
1603
+ # (expression has type "Hashable", variable
1604
+ # has type "Tuple[Any, ...]")
1605
+ name = self._selection # type: ignore[assignment]
1606
+ return self.obj._constructor_sliced(values, index=key_index, name=name)
1607
+ elif not isinstance(first_not_none, Series):
1608
+ # values are not series or array-like but scalars
1609
+ # self._selection not passed through to Series as the
1610
+ # result should not take the name of original selection
1611
+ # of columns
1612
+ if self.as_index:
1613
+ return self.obj._constructor_sliced(values, index=key_index)
1614
+ else:
1615
+ result = self.obj._constructor(values, columns=[self._selection])
1616
+ result = self._insert_inaxis_grouper(result)
1617
+ return result
1618
+ else:
1619
+ # values are Series
1620
+ return self._wrap_applied_output_series(
1621
+ values,
1622
+ not_indexed_same,
1623
+ first_not_none,
1624
+ key_index,
1625
+ is_transform,
1626
+ )
1627
+
1628
+ def _wrap_applied_output_series(
1629
+ self,
1630
+ values: list[Series],
1631
+ not_indexed_same: bool,
1632
+ first_not_none,
1633
+ key_index: Index | None,
1634
+ is_transform: bool,
1635
+ ) -> DataFrame | Series:
1636
+ kwargs = first_not_none._construct_axes_dict()
1637
+ backup = Series(**kwargs)
1638
+ values = [x if (x is not None) else backup for x in values]
1639
+
1640
+ all_indexed_same = all_indexes_same(x.index for x in values)
1641
+
1642
+ if not all_indexed_same:
1643
+ # GH 8467
1644
+ return self._concat_objects(
1645
+ values,
1646
+ not_indexed_same=True,
1647
+ is_transform=is_transform,
1648
+ )
1649
+
1650
+ # Combine values
1651
+ # vstack+constructor is faster than concat and handles MI-columns
1652
+ stacked_values = np.vstack([np.asarray(v) for v in values])
1653
+
1654
+ if self.axis == 0:
1655
+ index = key_index
1656
+ columns = first_not_none.index.copy()
1657
+ if columns.name is None:
1658
+ # GH6124 - propagate name of Series when it's consistent
1659
+ names = {v.name for v in values}
1660
+ if len(names) == 1:
1661
+ columns.name = next(iter(names))
1662
+ else:
1663
+ index = first_not_none.index
1664
+ columns = key_index
1665
+ stacked_values = stacked_values.T
1666
+
1667
+ if stacked_values.dtype == object:
1668
+ # We'll have the DataFrame constructor do inference
1669
+ stacked_values = stacked_values.tolist()
1670
+ result = self.obj._constructor(stacked_values, index=index, columns=columns)
1671
+
1672
+ if not self.as_index:
1673
+ result = self._insert_inaxis_grouper(result)
1674
+
1675
+ return self._reindex_output(result)
1676
+
1677
+ def _cython_transform(
1678
+ self,
1679
+ how: str,
1680
+ numeric_only: bool = False,
1681
+ axis: AxisInt = 0,
1682
+ **kwargs,
1683
+ ) -> DataFrame:
1684
+ assert axis == 0 # handled by caller
1685
+
1686
+ # With self.axis == 0, we have multi-block tests
1687
+ # e.g. test_rank_min_int, test_cython_transform_frame
1688
+ # test_transform_numeric_ret
1689
+ # With self.axis == 1, _get_data_to_aggregate does a transpose
1690
+ # so we always have a single block.
1691
+ mgr: Manager2D = self._get_data_to_aggregate(
1692
+ numeric_only=numeric_only, name=how
1693
+ )
1694
+
1695
+ def arr_func(bvalues: ArrayLike) -> ArrayLike:
1696
+ return self._grouper._cython_operation(
1697
+ "transform", bvalues, how, 1, **kwargs
1698
+ )
1699
+
1700
+ # We could use `mgr.apply` here and not have to set_axis, but
1701
+ # we would have to do shape gymnastics for ArrayManager compat
1702
+ res_mgr = mgr.grouped_reduce(arr_func)
1703
+ res_mgr.set_axis(1, mgr.axes[1])
1704
+
1705
+ res_df = self.obj._constructor_from_mgr(res_mgr, axes=res_mgr.axes)
1706
+ res_df = self._maybe_transpose_result(res_df)
1707
+ return res_df
1708
+
1709
+ def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs):
1710
+ if maybe_use_numba(engine):
1711
+ return self._transform_with_numba(
1712
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
1713
+ )
1714
+ from pandas.core.reshape.concat import concat
1715
+
1716
+ applied = []
1717
+ obj = self._obj_with_exclusions
1718
+ gen = self._grouper.get_iterator(obj, axis=self.axis)
1719
+ fast_path, slow_path = self._define_paths(func, *args, **kwargs)
1720
+
1721
+ # Determine whether to use slow or fast path by evaluating on the first group.
1722
+ # Need to handle the case of an empty generator and process the result so that
1723
+ # it does not need to be computed again.
1724
+ try:
1725
+ name, group = next(gen)
1726
+ except StopIteration:
1727
+ pass
1728
+ else:
1729
+ # 2023-02-27 No tests broken by disabling this pinning
1730
+ object.__setattr__(group, "name", name)
1731
+ try:
1732
+ path, res = self._choose_path(fast_path, slow_path, group)
1733
+ except ValueError as err:
1734
+ # e.g. test_transform_with_non_scalar_group
1735
+ msg = "transform must return a scalar value for each group"
1736
+ raise ValueError(msg) from err
1737
+ if group.size > 0:
1738
+ res = _wrap_transform_general_frame(self.obj, group, res)
1739
+ applied.append(res)
1740
+
1741
+ # Compute and process with the remaining groups
1742
+ for name, group in gen:
1743
+ if group.size == 0:
1744
+ continue
1745
+ # 2023-02-27 No tests broken by disabling this pinning
1746
+ object.__setattr__(group, "name", name)
1747
+ res = path(group)
1748
+
1749
+ res = _wrap_transform_general_frame(self.obj, group, res)
1750
+ applied.append(res)
1751
+
1752
+ concat_index = obj.columns if self.axis == 0 else obj.index
1753
+ other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
1754
+ concatenated = concat(applied, axis=self.axis, verify_integrity=False)
1755
+ concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
1756
+ return self._set_result_index_ordered(concatenated)
1757
+
1758
+ __examples_dataframe_doc = dedent(
1759
+ """
1760
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
1761
+ ... 'foo', 'bar'],
1762
+ ... 'B' : ['one', 'one', 'two', 'three',
1763
+ ... 'two', 'two'],
1764
+ ... 'C' : [1, 5, 5, 2, 5, 5],
1765
+ ... 'D' : [2.0, 5., 8., 1., 2., 9.]})
1766
+ >>> grouped = df.groupby('A')[['C', 'D']]
1767
+ >>> grouped.transform(lambda x: (x - x.mean()) / x.std())
1768
+ C D
1769
+ 0 -1.154701 -0.577350
1770
+ 1 0.577350 0.000000
1771
+ 2 0.577350 1.154701
1772
+ 3 -1.154701 -1.000000
1773
+ 4 0.577350 -0.577350
1774
+ 5 0.577350 1.000000
1775
+
1776
+ Broadcast result of the transformation
1777
+
1778
+ >>> grouped.transform(lambda x: x.max() - x.min())
1779
+ C D
1780
+ 0 4.0 6.0
1781
+ 1 3.0 8.0
1782
+ 2 4.0 6.0
1783
+ 3 3.0 8.0
1784
+ 4 4.0 6.0
1785
+ 5 3.0 8.0
1786
+
1787
+ >>> grouped.transform("mean")
1788
+ C D
1789
+ 0 3.666667 4.0
1790
+ 1 4.000000 5.0
1791
+ 2 3.666667 4.0
1792
+ 3 4.000000 5.0
1793
+ 4 3.666667 4.0
1794
+ 5 4.000000 5.0
1795
+
1796
+ .. versionchanged:: 1.3.0
1797
+
1798
+ The resulting dtype will reflect the return value of the passed ``func``,
1799
+ for example:
1800
+
1801
+ >>> grouped.transform(lambda x: x.astype(int).max())
1802
+ C D
1803
+ 0 5 8
1804
+ 1 5 9
1805
+ 2 5 8
1806
+ 3 5 9
1807
+ 4 5 8
1808
+ 5 5 9
1809
+ """
1810
+ )
1811
+
1812
+ @Substitution(klass="DataFrame", example=__examples_dataframe_doc)
1813
+ @Appender(_transform_template)
1814
+ def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
1815
+ return self._transform(
1816
+ func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
1817
+ )
1818
+
1819
+ def _define_paths(self, func, *args, **kwargs):
1820
+ if isinstance(func, str):
1821
+ fast_path = lambda group: getattr(group, func)(*args, **kwargs)
1822
+ slow_path = lambda group: group.apply(
1823
+ lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
1824
+ )
1825
+ else:
1826
+ fast_path = lambda group: func(group, *args, **kwargs)
1827
+ slow_path = lambda group: group.apply(
1828
+ lambda x: func(x, *args, **kwargs), axis=self.axis
1829
+ )
1830
+ return fast_path, slow_path
1831
+
1832
+ def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
1833
+ path = slow_path
1834
+ res = slow_path(group)
1835
+
1836
+ if self.ngroups == 1:
1837
+ # no need to evaluate multiple paths when only
1838
+ # a single group exists
1839
+ return path, res
1840
+
1841
+ # if we make it here, test if we can use the fast path
1842
+ try:
1843
+ res_fast = fast_path(group)
1844
+ except AssertionError:
1845
+ raise # pragma: no cover
1846
+ except Exception:
1847
+ # GH#29631 For user-defined function, we can't predict what may be
1848
+ # raised; see test_transform.test_transform_fastpath_raises
1849
+ return path, res
1850
+
1851
+ # verify fast path returns either:
1852
+ # a DataFrame with columns equal to group.columns
1853
+ # OR a Series with index equal to group.columns
1854
+ if isinstance(res_fast, DataFrame):
1855
+ if not res_fast.columns.equals(group.columns):
1856
+ return path, res
1857
+ elif isinstance(res_fast, Series):
1858
+ if not res_fast.index.equals(group.columns):
1859
+ return path, res
1860
+ else:
1861
+ return path, res
1862
+
1863
+ if res_fast.equals(res):
1864
+ path = fast_path
1865
+
1866
+ return path, res
1867
+
1868
+ def filter(self, func, dropna: bool = True, *args, **kwargs):
1869
+ """
1870
+ Filter elements from groups that don't satisfy a criterion.
1871
+
1872
+ Elements from groups are filtered if they do not satisfy the
1873
+ boolean criterion specified by func.
1874
+
1875
+ Parameters
1876
+ ----------
1877
+ func : function
1878
+ Criterion to apply to each group. Should return True or False.
1879
+ dropna : bool
1880
+ Drop groups that do not pass the filter. True by default; if False,
1881
+ groups that evaluate False are filled with NaNs.
1882
+
1883
+ Returns
1884
+ -------
1885
+ DataFrame
1886
+
1887
+ Notes
1888
+ -----
1889
+ Each subframe is endowed the attribute 'name' in case you need to know
1890
+ which group you are working on.
1891
+
1892
+ Functions that mutate the passed object can produce unexpected
1893
+ behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
1894
+ for more details.
1895
+
1896
+ Examples
1897
+ --------
1898
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
1899
+ ... 'foo', 'bar'],
1900
+ ... 'B' : [1, 2, 3, 4, 5, 6],
1901
+ ... 'C' : [2.0, 5., 8., 1., 2., 9.]})
1902
+ >>> grouped = df.groupby('A')
1903
+ >>> grouped.filter(lambda x: x['B'].mean() > 3.)
1904
+ A B C
1905
+ 1 bar 2 5.0
1906
+ 3 bar 4 1.0
1907
+ 5 bar 6 9.0
1908
+ """
1909
+ indices = []
1910
+
1911
+ obj = self._selected_obj
1912
+ gen = self._grouper.get_iterator(obj, axis=self.axis)
1913
+
1914
+ for name, group in gen:
1915
+ # 2023-02-27 no tests are broken this pinning, but it is documented in the
1916
+ # docstring above.
1917
+ object.__setattr__(group, "name", name)
1918
+
1919
+ res = func(group, *args, **kwargs)
1920
+
1921
+ try:
1922
+ res = res.squeeze()
1923
+ except AttributeError: # allow e.g., scalars and frames to pass
1924
+ pass
1925
+
1926
+ # interpret the result of the filter
1927
+ if is_bool(res) or (is_scalar(res) and isna(res)):
1928
+ if notna(res) and res:
1929
+ indices.append(self._get_index(name))
1930
+ else:
1931
+ # non scalars aren't allowed
1932
+ raise TypeError(
1933
+ f"filter function returned a {type(res).__name__}, "
1934
+ "but expected a scalar bool"
1935
+ )
1936
+
1937
+ return self._apply_filter(indices, dropna)
1938
+
1939
+ def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:
1940
+ if self.axis == 1:
1941
+ # GH 37725
1942
+ raise ValueError("Cannot subset columns when using axis=1")
1943
+ # per GH 23566
1944
+ if isinstance(key, tuple) and len(key) > 1:
1945
+ # if len == 1, then it becomes a SeriesGroupBy and this is actually
1946
+ # valid syntax, so don't raise
1947
+ raise ValueError(
1948
+ "Cannot subset columns with a tuple with more than one element. "
1949
+ "Use a list instead."
1950
+ )
1951
+ return super().__getitem__(key)
1952
+
1953
+ def _gotitem(self, key, ndim: int, subset=None):
1954
+ """
1955
+ sub-classes to define
1956
+ return a sliced object
1957
+
1958
+ Parameters
1959
+ ----------
1960
+ key : string / list of selections
1961
+ ndim : {1, 2}
1962
+ requested ndim of result
1963
+ subset : object, default None
1964
+ subset to act on
1965
+ """
1966
+ if ndim == 2:
1967
+ if subset is None:
1968
+ subset = self.obj
1969
+ return DataFrameGroupBy(
1970
+ subset,
1971
+ self.keys,
1972
+ axis=self.axis,
1973
+ level=self.level,
1974
+ grouper=self._grouper,
1975
+ exclusions=self.exclusions,
1976
+ selection=key,
1977
+ as_index=self.as_index,
1978
+ sort=self.sort,
1979
+ group_keys=self.group_keys,
1980
+ observed=self.observed,
1981
+ dropna=self.dropna,
1982
+ )
1983
+ elif ndim == 1:
1984
+ if subset is None:
1985
+ subset = self.obj[key]
1986
+ return SeriesGroupBy(
1987
+ subset,
1988
+ self.keys,
1989
+ level=self.level,
1990
+ grouper=self._grouper,
1991
+ exclusions=self.exclusions,
1992
+ selection=key,
1993
+ as_index=self.as_index,
1994
+ sort=self.sort,
1995
+ group_keys=self.group_keys,
1996
+ observed=self.observed,
1997
+ dropna=self.dropna,
1998
+ )
1999
+
2000
+ raise AssertionError("invalid ndim for _gotitem")
2001
+
2002
+ def _get_data_to_aggregate(
2003
+ self, *, numeric_only: bool = False, name: str | None = None
2004
+ ) -> Manager2D:
2005
+ obj = self._obj_with_exclusions
2006
+ if self.axis == 1:
2007
+ mgr = obj.T._mgr
2008
+ else:
2009
+ mgr = obj._mgr
2010
+
2011
+ if numeric_only:
2012
+ mgr = mgr.get_numeric_data()
2013
+ return mgr
2014
+
2015
+ def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:
2016
+ return self.obj._constructor_from_mgr(mgr, axes=mgr.axes)
2017
+
2018
+ def _apply_to_column_groupbys(self, func) -> DataFrame:
2019
+ from pandas.core.reshape.concat import concat
2020
+
2021
+ obj = self._obj_with_exclusions
2022
+ columns = obj.columns
2023
+ sgbs = [
2024
+ SeriesGroupBy(
2025
+ obj.iloc[:, i],
2026
+ selection=colname,
2027
+ grouper=self._grouper,
2028
+ exclusions=self.exclusions,
2029
+ observed=self.observed,
2030
+ )
2031
+ for i, colname in enumerate(obj.columns)
2032
+ ]
2033
+ results = [func(sgb) for sgb in sgbs]
2034
+
2035
+ if not len(results):
2036
+ # concat would raise
2037
+ res_df = DataFrame([], columns=columns, index=self._grouper.result_index)
2038
+ else:
2039
+ res_df = concat(results, keys=columns, axis=1)
2040
+
2041
+ if not self.as_index:
2042
+ res_df.index = default_index(len(res_df))
2043
+ res_df = self._insert_inaxis_grouper(res_df)
2044
+ return res_df
2045
+
2046
+ def nunique(self, dropna: bool = True) -> DataFrame:
2047
+ """
2048
+ Return DataFrame with counts of unique elements in each position.
2049
+
2050
+ Parameters
2051
+ ----------
2052
+ dropna : bool, default True
2053
+ Don't include NaN in the counts.
2054
+
2055
+ Returns
2056
+ -------
2057
+ nunique: DataFrame
2058
+
2059
+ Examples
2060
+ --------
2061
+ >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
2062
+ ... 'ham', 'ham'],
2063
+ ... 'value1': [1, 5, 5, 2, 5, 5],
2064
+ ... 'value2': list('abbaxy')})
2065
+ >>> df
2066
+ id value1 value2
2067
+ 0 spam 1 a
2068
+ 1 egg 5 b
2069
+ 2 egg 5 b
2070
+ 3 spam 2 a
2071
+ 4 ham 5 x
2072
+ 5 ham 5 y
2073
+
2074
+ >>> df.groupby('id').nunique()
2075
+ value1 value2
2076
+ id
2077
+ egg 1 1
2078
+ ham 1 2
2079
+ spam 2 1
2080
+
2081
+ Check for rows with the same id but conflicting values:
2082
+
2083
+ >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
2084
+ id value1 value2
2085
+ 0 spam 1 a
2086
+ 3 spam 2 a
2087
+ 4 ham 5 x
2088
+ 5 ham 5 y
2089
+ """
2090
+
2091
+ if self.axis != 0:
2092
+ # see test_groupby_crash_on_nunique
2093
+ return self._python_apply_general(
2094
+ lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True
2095
+ )
2096
+
2097
+ return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna))
2098
+
2099
+ def idxmax(
2100
+ self,
2101
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2102
+ skipna: bool = True,
2103
+ numeric_only: bool = False,
2104
+ ) -> DataFrame:
2105
+ """
2106
+ Return index of first occurrence of maximum over requested axis.
2107
+
2108
+ NA/null values are excluded.
2109
+
2110
+ Parameters
2111
+ ----------
2112
+ axis : {{0 or 'index', 1 or 'columns'}}, default None
2113
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
2114
+ If axis is not provided, grouper's axis is used.
2115
+
2116
+ .. versionchanged:: 2.0.0
2117
+
2118
+ .. deprecated:: 2.1.0
2119
+ For axis=1, operate on the underlying object instead. Otherwise
2120
+ the axis keyword is not necessary.
2121
+
2122
+ skipna : bool, default True
2123
+ Exclude NA/null values. If an entire row/column is NA, the result
2124
+ will be NA.
2125
+ numeric_only : bool, default False
2126
+ Include only `float`, `int` or `boolean` data.
2127
+
2128
+ .. versionadded:: 1.5.0
2129
+
2130
+ Returns
2131
+ -------
2132
+ Series
2133
+ Indexes of maxima along the specified axis.
2134
+
2135
+ Raises
2136
+ ------
2137
+ ValueError
2138
+ * If the row/column is empty
2139
+
2140
+ See Also
2141
+ --------
2142
+ Series.idxmax : Return index of the maximum element.
2143
+
2144
+ Notes
2145
+ -----
2146
+ This method is the DataFrame version of ``ndarray.argmax``.
2147
+
2148
+ Examples
2149
+ --------
2150
+ Consider a dataset containing food consumption in Argentina.
2151
+
2152
+ >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
2153
+ ... 'co2_emissions': [37.2, 19.66, 1712]},
2154
+ ... index=['Pork', 'Wheat Products', 'Beef'])
2155
+
2156
+ >>> df
2157
+ consumption co2_emissions
2158
+ Pork 10.51 37.20
2159
+ Wheat Products 103.11 19.66
2160
+ Beef 55.48 1712.00
2161
+
2162
+ By default, it returns the index for the maximum value in each column.
2163
+
2164
+ >>> df.idxmax()
2165
+ consumption Wheat Products
2166
+ co2_emissions Beef
2167
+ dtype: object
2168
+
2169
+ To return the index for the maximum value in each row, use ``axis="columns"``.
2170
+
2171
+ >>> df.idxmax(axis="columns")
2172
+ Pork co2_emissions
2173
+ Wheat Products consumption
2174
+ Beef co2_emissions
2175
+ dtype: object
2176
+ """
2177
+ return self._idxmax_idxmin(
2178
+ "idxmax", axis=axis, numeric_only=numeric_only, skipna=skipna
2179
+ )
2180
+
2181
+ def idxmin(
2182
+ self,
2183
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2184
+ skipna: bool = True,
2185
+ numeric_only: bool = False,
2186
+ ) -> DataFrame:
2187
+ """
2188
+ Return index of first occurrence of minimum over requested axis.
2189
+
2190
+ NA/null values are excluded.
2191
+
2192
+ Parameters
2193
+ ----------
2194
+ axis : {{0 or 'index', 1 or 'columns'}}, default None
2195
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
2196
+ If axis is not provided, grouper's axis is used.
2197
+
2198
+ .. versionchanged:: 2.0.0
2199
+
2200
+ .. deprecated:: 2.1.0
2201
+ For axis=1, operate on the underlying object instead. Otherwise
2202
+ the axis keyword is not necessary.
2203
+
2204
+ skipna : bool, default True
2205
+ Exclude NA/null values. If an entire row/column is NA, the result
2206
+ will be NA.
2207
+ numeric_only : bool, default False
2208
+ Include only `float`, `int` or `boolean` data.
2209
+
2210
+ .. versionadded:: 1.5.0
2211
+
2212
+ Returns
2213
+ -------
2214
+ Series
2215
+ Indexes of minima along the specified axis.
2216
+
2217
+ Raises
2218
+ ------
2219
+ ValueError
2220
+ * If the row/column is empty
2221
+
2222
+ See Also
2223
+ --------
2224
+ Series.idxmin : Return index of the minimum element.
2225
+
2226
+ Notes
2227
+ -----
2228
+ This method is the DataFrame version of ``ndarray.argmin``.
2229
+
2230
+ Examples
2231
+ --------
2232
+ Consider a dataset containing food consumption in Argentina.
2233
+
2234
+ >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
2235
+ ... 'co2_emissions': [37.2, 19.66, 1712]},
2236
+ ... index=['Pork', 'Wheat Products', 'Beef'])
2237
+
2238
+ >>> df
2239
+ consumption co2_emissions
2240
+ Pork 10.51 37.20
2241
+ Wheat Products 103.11 19.66
2242
+ Beef 55.48 1712.00
2243
+
2244
+ By default, it returns the index for the minimum value in each column.
2245
+
2246
+ >>> df.idxmin()
2247
+ consumption Pork
2248
+ co2_emissions Wheat Products
2249
+ dtype: object
2250
+
2251
+ To return the index for the minimum value in each row, use ``axis="columns"``.
2252
+
2253
+ >>> df.idxmin(axis="columns")
2254
+ Pork consumption
2255
+ Wheat Products co2_emissions
2256
+ Beef consumption
2257
+ dtype: object
2258
+ """
2259
+ return self._idxmax_idxmin(
2260
+ "idxmin", axis=axis, numeric_only=numeric_only, skipna=skipna
2261
+ )
2262
+
2263
+ boxplot = boxplot_frame_groupby
2264
+
2265
+ def value_counts(
2266
+ self,
2267
+ subset: Sequence[Hashable] | None = None,
2268
+ normalize: bool = False,
2269
+ sort: bool = True,
2270
+ ascending: bool = False,
2271
+ dropna: bool = True,
2272
+ ) -> DataFrame | Series:
2273
+ """
2274
+ Return a Series or DataFrame containing counts of unique rows.
2275
+
2276
+ .. versionadded:: 1.4.0
2277
+
2278
+ Parameters
2279
+ ----------
2280
+ subset : list-like, optional
2281
+ Columns to use when counting unique combinations.
2282
+ normalize : bool, default False
2283
+ Return proportions rather than frequencies.
2284
+ sort : bool, default True
2285
+ Sort by frequencies.
2286
+ ascending : bool, default False
2287
+ Sort in ascending order.
2288
+ dropna : bool, default True
2289
+ Don't include counts of rows that contain NA values.
2290
+
2291
+ Returns
2292
+ -------
2293
+ Series or DataFrame
2294
+ Series if the groupby as_index is True, otherwise DataFrame.
2295
+
2296
+ See Also
2297
+ --------
2298
+ Series.value_counts: Equivalent method on Series.
2299
+ DataFrame.value_counts: Equivalent method on DataFrame.
2300
+ SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy.
2301
+
2302
+ Notes
2303
+ -----
2304
+ - If the groupby as_index is True then the returned Series will have a
2305
+ MultiIndex with one level per input column.
2306
+ - If the groupby as_index is False then the returned DataFrame will have an
2307
+ additional column with the value_counts. The column is labelled 'count' or
2308
+ 'proportion', depending on the ``normalize`` parameter.
2309
+
2310
+ By default, rows that contain any NA values are omitted from
2311
+ the result.
2312
+
2313
+ By default, the result will be in descending order so that the
2314
+ first element of each group is the most frequently-occurring row.
2315
+
2316
+ Examples
2317
+ --------
2318
+ >>> df = pd.DataFrame({
2319
+ ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],
2320
+ ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],
2321
+ ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']
2322
+ ... })
2323
+
2324
+ >>> df
2325
+ gender education country
2326
+ 0 male low US
2327
+ 1 male medium FR
2328
+ 2 female high US
2329
+ 3 male low FR
2330
+ 4 female high FR
2331
+ 5 male low FR
2332
+
2333
+ >>> df.groupby('gender').value_counts()
2334
+ gender education country
2335
+ female high FR 1
2336
+ US 1
2337
+ male low FR 2
2338
+ US 1
2339
+ medium FR 1
2340
+ Name: count, dtype: int64
2341
+
2342
+ >>> df.groupby('gender').value_counts(ascending=True)
2343
+ gender education country
2344
+ female high FR 1
2345
+ US 1
2346
+ male low US 1
2347
+ medium FR 1
2348
+ low FR 2
2349
+ Name: count, dtype: int64
2350
+
2351
+ >>> df.groupby('gender').value_counts(normalize=True)
2352
+ gender education country
2353
+ female high FR 0.50
2354
+ US 0.50
2355
+ male low FR 0.50
2356
+ US 0.25
2357
+ medium FR 0.25
2358
+ Name: proportion, dtype: float64
2359
+
2360
+ >>> df.groupby('gender', as_index=False).value_counts()
2361
+ gender education country count
2362
+ 0 female high FR 1
2363
+ 1 female high US 1
2364
+ 2 male low FR 2
2365
+ 3 male low US 1
2366
+ 4 male medium FR 1
2367
+
2368
+ >>> df.groupby('gender', as_index=False).value_counts(normalize=True)
2369
+ gender education country proportion
2370
+ 0 female high FR 0.50
2371
+ 1 female high US 0.50
2372
+ 2 male low FR 0.50
2373
+ 3 male low US 0.25
2374
+ 4 male medium FR 0.25
2375
+ """
2376
+ return self._value_counts(subset, normalize, sort, ascending, dropna)
2377
+
2378
+ def fillna(
2379
+ self,
2380
+ value: Hashable | Mapping | Series | DataFrame | None = None,
2381
+ method: FillnaOptions | None = None,
2382
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2383
+ inplace: bool = False,
2384
+ limit: int | None = None,
2385
+ downcast=lib.no_default,
2386
+ ) -> DataFrame | None:
2387
+ """
2388
+ Fill NA/NaN values using the specified method within groups.
2389
+
2390
+ .. deprecated:: 2.2.0
2391
+ This method is deprecated and will be removed in a future version.
2392
+ Use the :meth:`.DataFrameGroupBy.ffill` or :meth:`.DataFrameGroupBy.bfill`
2393
+ for forward or backward filling instead. If you want to fill with a
2394
+ single value, use :meth:`DataFrame.fillna` instead.
2395
+
2396
+ Parameters
2397
+ ----------
2398
+ value : scalar, dict, Series, or DataFrame
2399
+ Value to use to fill holes (e.g. 0), alternately a
2400
+ dict/Series/DataFrame of values specifying which value to use for
2401
+ each index (for a Series) or column (for a DataFrame). Values not
2402
+ in the dict/Series/DataFrame will not be filled. This value cannot
2403
+ be a list. Users wanting to use the ``value`` argument and not ``method``
2404
+ should prefer :meth:`.DataFrame.fillna` as this
2405
+ will produce the same result and be more performant.
2406
+ method : {{'bfill', 'ffill', None}}, default None
2407
+ Method to use for filling holes. ``'ffill'`` will propagate
2408
+ the last valid observation forward within a group.
2409
+ ``'bfill'`` will use next valid observation to fill the gap.
2410
+ axis : {0 or 'index', 1 or 'columns'}
2411
+ Axis along which to fill missing values. When the :class:`DataFrameGroupBy`
2412
+ ``axis`` argument is ``0``, using ``axis=1`` here will produce
2413
+ the same results as :meth:`.DataFrame.fillna`. When the
2414
+ :class:`DataFrameGroupBy` ``axis`` argument is ``1``, using ``axis=0``
2415
+ or ``axis=1`` here will produce the same results.
2416
+ inplace : bool, default False
2417
+ Broken. Do not set to True.
2418
+ limit : int, default None
2419
+ If method is specified, this is the maximum number of consecutive
2420
+ NaN values to forward/backward fill within a group. In other words,
2421
+ if there is a gap with more than this number of consecutive NaNs,
2422
+ it will only be partially filled. If method is not specified, this is the
2423
+ maximum number of entries along the entire axis where NaNs will be
2424
+ filled. Must be greater than 0 if not None.
2425
+ downcast : dict, default is None
2426
+ A dict of item->dtype of what to downcast if possible,
2427
+ or the string 'infer' which will try to downcast to an appropriate
2428
+ equal type (e.g. float64 to int64 if possible).
2429
+
2430
+ Returns
2431
+ -------
2432
+ DataFrame
2433
+ Object with missing values filled.
2434
+
2435
+ See Also
2436
+ --------
2437
+ ffill : Forward fill values within a group.
2438
+ bfill : Backward fill values within a group.
2439
+
2440
+ Examples
2441
+ --------
2442
+ >>> df = pd.DataFrame(
2443
+ ... {
2444
+ ... "key": [0, 0, 1, 1, 1],
2445
+ ... "A": [np.nan, 2, np.nan, 3, np.nan],
2446
+ ... "B": [2, 3, np.nan, np.nan, np.nan],
2447
+ ... "C": [np.nan, np.nan, 2, np.nan, np.nan],
2448
+ ... }
2449
+ ... )
2450
+ >>> df
2451
+ key A B C
2452
+ 0 0 NaN 2.0 NaN
2453
+ 1 0 2.0 3.0 NaN
2454
+ 2 1 NaN NaN 2.0
2455
+ 3 1 3.0 NaN NaN
2456
+ 4 1 NaN NaN NaN
2457
+
2458
+ Propagate non-null values forward or backward within each group along columns.
2459
+
2460
+ >>> df.groupby("key").fillna(method="ffill")
2461
+ A B C
2462
+ 0 NaN 2.0 NaN
2463
+ 1 2.0 3.0 NaN
2464
+ 2 NaN NaN 2.0
2465
+ 3 3.0 NaN 2.0
2466
+ 4 3.0 NaN 2.0
2467
+
2468
+ >>> df.groupby("key").fillna(method="bfill")
2469
+ A B C
2470
+ 0 2.0 2.0 NaN
2471
+ 1 2.0 3.0 NaN
2472
+ 2 3.0 NaN 2.0
2473
+ 3 3.0 NaN NaN
2474
+ 4 NaN NaN NaN
2475
+
2476
+ Propagate non-null values forward or backward within each group along rows.
2477
+
2478
+ >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="ffill").T
2479
+ key A B C
2480
+ 0 0.0 0.0 2.0 2.0
2481
+ 1 0.0 2.0 3.0 3.0
2482
+ 2 1.0 1.0 NaN 2.0
2483
+ 3 1.0 3.0 NaN NaN
2484
+ 4 1.0 1.0 NaN NaN
2485
+
2486
+ >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="bfill").T
2487
+ key A B C
2488
+ 0 0.0 NaN 2.0 NaN
2489
+ 1 0.0 2.0 3.0 NaN
2490
+ 2 1.0 NaN 2.0 2.0
2491
+ 3 1.0 3.0 NaN NaN
2492
+ 4 1.0 NaN NaN NaN
2493
+
2494
+ Only replace the first NaN element within a group along rows.
2495
+
2496
+ >>> df.groupby("key").fillna(method="ffill", limit=1)
2497
+ A B C
2498
+ 0 NaN 2.0 NaN
2499
+ 1 2.0 3.0 NaN
2500
+ 2 NaN NaN 2.0
2501
+ 3 3.0 NaN 2.0
2502
+ 4 3.0 NaN NaN
2503
+ """
2504
+ warnings.warn(
2505
+ f"{type(self).__name__}.fillna is deprecated and "
2506
+ "will be removed in a future version. Use obj.ffill() or obj.bfill() "
2507
+ "for forward or backward filling instead. If you want to fill with a "
2508
+ f"single value, use {type(self.obj).__name__}.fillna instead",
2509
+ FutureWarning,
2510
+ stacklevel=find_stack_level(),
2511
+ )
2512
+
2513
+ result = self._op_via_apply(
2514
+ "fillna",
2515
+ value=value,
2516
+ method=method,
2517
+ axis=axis,
2518
+ inplace=inplace,
2519
+ limit=limit,
2520
+ downcast=downcast,
2521
+ )
2522
+ return result
2523
+
2524
+ def take(
2525
+ self,
2526
+ indices: TakeIndexer,
2527
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2528
+ **kwargs,
2529
+ ) -> DataFrame:
2530
+ """
2531
+ Return the elements in the given *positional* indices in each group.
2532
+
2533
+ This means that we are not indexing according to actual values in
2534
+ the index attribute of the object. We are indexing according to the
2535
+ actual position of the element in the object.
2536
+
2537
+ If a requested index does not exist for some group, this method will raise.
2538
+ To get similar behavior that ignores indices that don't exist, see
2539
+ :meth:`.DataFrameGroupBy.nth`.
2540
+
2541
+ Parameters
2542
+ ----------
2543
+ indices : array-like
2544
+ An array of ints indicating which positions to take.
2545
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
2546
+ The axis on which to select elements. ``0`` means that we are
2547
+ selecting rows, ``1`` means that we are selecting columns.
2548
+
2549
+ .. deprecated:: 2.1.0
2550
+ For axis=1, operate on the underlying object instead. Otherwise
2551
+ the axis keyword is not necessary.
2552
+
2553
+ **kwargs
2554
+ For compatibility with :meth:`numpy.take`. Has no effect on the
2555
+ output.
2556
+
2557
+ Returns
2558
+ -------
2559
+ DataFrame
2560
+ An DataFrame containing the elements taken from each group.
2561
+
2562
+ See Also
2563
+ --------
2564
+ DataFrame.take : Take elements from a Series along an axis.
2565
+ DataFrame.loc : Select a subset of a DataFrame by labels.
2566
+ DataFrame.iloc : Select a subset of a DataFrame by positions.
2567
+ numpy.take : Take elements from an array along an axis.
2568
+
2569
+ Examples
2570
+ --------
2571
+ >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
2572
+ ... ('parrot', 'bird', 24.0),
2573
+ ... ('lion', 'mammal', 80.5),
2574
+ ... ('monkey', 'mammal', np.nan),
2575
+ ... ('rabbit', 'mammal', 15.0)],
2576
+ ... columns=['name', 'class', 'max_speed'],
2577
+ ... index=[4, 3, 2, 1, 0])
2578
+ >>> df
2579
+ name class max_speed
2580
+ 4 falcon bird 389.0
2581
+ 3 parrot bird 24.0
2582
+ 2 lion mammal 80.5
2583
+ 1 monkey mammal NaN
2584
+ 0 rabbit mammal 15.0
2585
+ >>> gb = df.groupby([1, 1, 2, 2, 2])
2586
+
2587
+ Take elements at positions 0 and 1 along the axis 0 (default).
2588
+
2589
+ Note how the indices selected in the result do not correspond to
2590
+ our input indices 0 and 1. That's because we are selecting the 0th
2591
+ and 1st rows, not rows whose indices equal 0 and 1.
2592
+
2593
+ >>> gb.take([0, 1])
2594
+ name class max_speed
2595
+ 1 4 falcon bird 389.0
2596
+ 3 parrot bird 24.0
2597
+ 2 2 lion mammal 80.5
2598
+ 1 monkey mammal NaN
2599
+
2600
+ The order of the specified indices influences the order in the result.
2601
+ Here, the order is swapped from the previous example.
2602
+
2603
+ >>> gb.take([1, 0])
2604
+ name class max_speed
2605
+ 1 3 parrot bird 24.0
2606
+ 4 falcon bird 389.0
2607
+ 2 1 monkey mammal NaN
2608
+ 2 lion mammal 80.5
2609
+
2610
+ Take elements at indices 1 and 2 along the axis 1 (column selection).
2611
+
2612
+ We may take elements using negative integers for positive indices,
2613
+ starting from the end of the object, just like with Python lists.
2614
+
2615
+ >>> gb.take([-1, -2])
2616
+ name class max_speed
2617
+ 1 3 parrot bird 24.0
2618
+ 4 falcon bird 389.0
2619
+ 2 0 rabbit mammal 15.0
2620
+ 1 monkey mammal NaN
2621
+ """
2622
+ result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)
2623
+ return result
2624
+
2625
+ def skew(
2626
+ self,
2627
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2628
+ skipna: bool = True,
2629
+ numeric_only: bool = False,
2630
+ **kwargs,
2631
+ ) -> DataFrame:
2632
+ """
2633
+ Return unbiased skew within groups.
2634
+
2635
+ Normalized by N-1.
2636
+
2637
+ Parameters
2638
+ ----------
2639
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
2640
+ Axis for the function to be applied on.
2641
+
2642
+ Specifying ``axis=None`` will apply the aggregation across both axes.
2643
+
2644
+ .. versionadded:: 2.0.0
2645
+
2646
+ .. deprecated:: 2.1.0
2647
+ For axis=1, operate on the underlying object instead. Otherwise
2648
+ the axis keyword is not necessary.
2649
+
2650
+ skipna : bool, default True
2651
+ Exclude NA/null values when computing the result.
2652
+
2653
+ numeric_only : bool, default False
2654
+ Include only float, int, boolean columns.
2655
+
2656
+ **kwargs
2657
+ Additional keyword arguments to be passed to the function.
2658
+
2659
+ Returns
2660
+ -------
2661
+ DataFrame
2662
+
2663
+ See Also
2664
+ --------
2665
+ DataFrame.skew : Return unbiased skew over requested axis.
2666
+
2667
+ Examples
2668
+ --------
2669
+ >>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi',
2670
+ ... 'lion', 'monkey', 'rabbit'],
2671
+ ... ['bird', 'bird', 'bird', 'bird',
2672
+ ... 'mammal', 'mammal', 'mammal']]
2673
+ >>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class'))
2674
+ >>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan,
2675
+ ... 80.5, 21.5, 15.0]},
2676
+ ... index=index)
2677
+ >>> df
2678
+ max_speed
2679
+ name class
2680
+ falcon bird 389.0
2681
+ parrot bird 24.0
2682
+ cockatoo bird 70.0
2683
+ kiwi bird NaN
2684
+ lion mammal 80.5
2685
+ monkey mammal 21.5
2686
+ rabbit mammal 15.0
2687
+ >>> gb = df.groupby(["class"])
2688
+ >>> gb.skew()
2689
+ max_speed
2690
+ class
2691
+ bird 1.628296
2692
+ mammal 1.669046
2693
+ >>> gb.skew(skipna=False)
2694
+ max_speed
2695
+ class
2696
+ bird NaN
2697
+ mammal 1.669046
2698
+ """
2699
+ if axis is lib.no_default:
2700
+ axis = 0
2701
+
2702
+ if axis != 0:
2703
+ result = self._op_via_apply(
2704
+ "skew",
2705
+ axis=axis,
2706
+ skipna=skipna,
2707
+ numeric_only=numeric_only,
2708
+ **kwargs,
2709
+ )
2710
+ return result
2711
+
2712
+ def alt(obj):
2713
+ # This should not be reached since the cython path should raise
2714
+ # TypeError and not NotImplementedError.
2715
+ raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")
2716
+
2717
+ return self._cython_agg_general(
2718
+ "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
2719
+ )
2720
+
2721
+ @property
2722
+ @doc(DataFrame.plot.__doc__)
2723
+ def plot(self) -> GroupByPlot:
2724
+ result = GroupByPlot(self)
2725
+ return result
2726
+
2727
+ @doc(DataFrame.corr.__doc__)
2728
+ def corr(
2729
+ self,
2730
+ method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
2731
+ min_periods: int = 1,
2732
+ numeric_only: bool = False,
2733
+ ) -> DataFrame:
2734
+ result = self._op_via_apply(
2735
+ "corr", method=method, min_periods=min_periods, numeric_only=numeric_only
2736
+ )
2737
+ return result
2738
+
2739
+ @doc(DataFrame.cov.__doc__)
2740
+ def cov(
2741
+ self,
2742
+ min_periods: int | None = None,
2743
+ ddof: int | None = 1,
2744
+ numeric_only: bool = False,
2745
+ ) -> DataFrame:
2746
+ result = self._op_via_apply(
2747
+ "cov", min_periods=min_periods, ddof=ddof, numeric_only=numeric_only
2748
+ )
2749
+ return result
2750
+
2751
+ @doc(DataFrame.hist.__doc__)
2752
+ def hist(
2753
+ self,
2754
+ column: IndexLabel | None = None,
2755
+ by=None,
2756
+ grid: bool = True,
2757
+ xlabelsize: int | None = None,
2758
+ xrot: float | None = None,
2759
+ ylabelsize: int | None = None,
2760
+ yrot: float | None = None,
2761
+ ax=None,
2762
+ sharex: bool = False,
2763
+ sharey: bool = False,
2764
+ figsize: tuple[int, int] | None = None,
2765
+ layout: tuple[int, int] | None = None,
2766
+ bins: int | Sequence[int] = 10,
2767
+ backend: str | None = None,
2768
+ legend: bool = False,
2769
+ **kwargs,
2770
+ ):
2771
+ result = self._op_via_apply(
2772
+ "hist",
2773
+ column=column,
2774
+ by=by,
2775
+ grid=grid,
2776
+ xlabelsize=xlabelsize,
2777
+ xrot=xrot,
2778
+ ylabelsize=ylabelsize,
2779
+ yrot=yrot,
2780
+ ax=ax,
2781
+ sharex=sharex,
2782
+ sharey=sharey,
2783
+ figsize=figsize,
2784
+ layout=layout,
2785
+ bins=bins,
2786
+ backend=backend,
2787
+ legend=legend,
2788
+ **kwargs,
2789
+ )
2790
+ return result
2791
+
2792
+ @property
2793
+ @doc(DataFrame.dtypes.__doc__)
2794
+ def dtypes(self) -> Series:
2795
+ # GH#51045
2796
+ warnings.warn(
2797
+ f"{type(self).__name__}.dtypes is deprecated and will be removed in "
2798
+ "a future version. Check the dtypes on the base object instead",
2799
+ FutureWarning,
2800
+ stacklevel=find_stack_level(),
2801
+ )
2802
+
2803
+ # error: Incompatible return value type (got "DataFrame", expected "Series")
2804
+ return self._python_apply_general( # type: ignore[return-value]
2805
+ lambda df: df.dtypes, self._selected_obj
2806
+ )
2807
+
2808
+ @doc(DataFrame.corrwith.__doc__)
2809
+ def corrwith(
2810
+ self,
2811
+ other: DataFrame | Series,
2812
+ axis: Axis | lib.NoDefault = lib.no_default,
2813
+ drop: bool = False,
2814
+ method: CorrelationMethod = "pearson",
2815
+ numeric_only: bool = False,
2816
+ ) -> DataFrame:
2817
+ result = self._op_via_apply(
2818
+ "corrwith",
2819
+ other=other,
2820
+ axis=axis,
2821
+ drop=drop,
2822
+ method=method,
2823
+ numeric_only=numeric_only,
2824
+ )
2825
+ return result
2826
+
2827
+
2828
+ def _wrap_transform_general_frame(
2829
+ obj: DataFrame, group: DataFrame, res: DataFrame | Series
2830
+ ) -> DataFrame:
2831
+ from pandas import concat
2832
+
2833
+ if isinstance(res, Series):
2834
+ # we need to broadcast across the
2835
+ # other dimension; this will preserve dtypes
2836
+ # GH14457
2837
+ if res.index.is_(obj.index):
2838
+ res_frame = concat([res] * len(group.columns), axis=1)
2839
+ res_frame.columns = group.columns
2840
+ res_frame.index = group.index
2841
+ else:
2842
+ res_frame = obj._constructor(
2843
+ np.tile(res.values, (len(group.index), 1)),
2844
+ columns=group.columns,
2845
+ index=group.index,
2846
+ )
2847
+ assert isinstance(res_frame, DataFrame)
2848
+ return res_frame
2849
+ elif isinstance(res, DataFrame) and not res.index.is_(group.index):
2850
+ return res._align_frame(group)[0]
2851
+ else:
2852
+ return res
videollama2/lib/python3.10/site-packages/pandas/core/groupby/groupby.py ADDED
The diff for this file is too large to render. See raw diff
 
videollama2/lib/python3.10/site-packages/pandas/core/groupby/grouper.py ADDED
@@ -0,0 +1,1102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provide user facing operators for doing the split part of the
3
+ split-apply-combine paradigm.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ final,
10
+ )
11
+ import warnings
12
+
13
+ import numpy as np
14
+
15
+ from pandas._config import (
16
+ using_copy_on_write,
17
+ warn_copy_on_write,
18
+ )
19
+
20
+ from pandas._libs import lib
21
+ from pandas._libs.tslibs import OutOfBoundsDatetime
22
+ from pandas.errors import InvalidIndexError
23
+ from pandas.util._decorators import cache_readonly
24
+ from pandas.util._exceptions import find_stack_level
25
+
26
+ from pandas.core.dtypes.common import (
27
+ is_list_like,
28
+ is_scalar,
29
+ )
30
+ from pandas.core.dtypes.dtypes import CategoricalDtype
31
+
32
+ from pandas.core import algorithms
33
+ from pandas.core.arrays import (
34
+ Categorical,
35
+ ExtensionArray,
36
+ )
37
+ import pandas.core.common as com
38
+ from pandas.core.frame import DataFrame
39
+ from pandas.core.groupby import ops
40
+ from pandas.core.groupby.categorical import recode_for_groupby
41
+ from pandas.core.indexes.api import (
42
+ CategoricalIndex,
43
+ Index,
44
+ MultiIndex,
45
+ )
46
+ from pandas.core.series import Series
47
+
48
+ from pandas.io.formats.printing import pprint_thing
49
+
50
+ if TYPE_CHECKING:
51
+ from collections.abc import (
52
+ Hashable,
53
+ Iterator,
54
+ )
55
+
56
+ from pandas._typing import (
57
+ ArrayLike,
58
+ Axis,
59
+ NDFrameT,
60
+ npt,
61
+ )
62
+
63
+ from pandas.core.generic import NDFrame
64
+
65
+
66
+ class Grouper:
67
+ """
68
+ A Grouper allows the user to specify a groupby instruction for an object.
69
+
70
+ This specification will select a column via the key parameter, or if the
71
+ level and/or axis parameters are given, a level of the index of the target
72
+ object.
73
+
74
+ If `axis` and/or `level` are passed as keywords to both `Grouper` and
75
+ `groupby`, the values passed to `Grouper` take precedence.
76
+
77
+ Parameters
78
+ ----------
79
+ key : str, defaults to None
80
+ Groupby key, which selects the grouping column of the target.
81
+ level : name/number, defaults to None
82
+ The level for the target index.
83
+ freq : str / frequency object, defaults to None
84
+ This will groupby the specified frequency if the target selection
85
+ (via key or level) is a datetime-like object. For full specification
86
+ of available frequencies, please see `here
87
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
88
+ axis : str, int, defaults to 0
89
+ Number/name of the axis.
90
+ sort : bool, default to False
91
+ Whether to sort the resulting labels.
92
+ closed : {'left' or 'right'}
93
+ Closed end of interval. Only when `freq` parameter is passed.
94
+ label : {'left' or 'right'}
95
+ Interval boundary to use for labeling.
96
+ Only when `freq` parameter is passed.
97
+ convention : {'start', 'end', 'e', 's'}
98
+ If grouper is PeriodIndex and `freq` parameter is passed.
99
+
100
+ origin : Timestamp or str, default 'start_day'
101
+ The timestamp on which to adjust the grouping. The timezone of origin must
102
+ match the timezone of the index.
103
+ If string, must be one of the following:
104
+
105
+ - 'epoch': `origin` is 1970-01-01
106
+ - 'start': `origin` is the first value of the timeseries
107
+ - 'start_day': `origin` is the first day at midnight of the timeseries
108
+
109
+ - 'end': `origin` is the last value of the timeseries
110
+ - 'end_day': `origin` is the ceiling midnight of the last day
111
+
112
+ .. versionadded:: 1.3.0
113
+
114
+ offset : Timedelta or str, default is None
115
+ An offset timedelta added to the origin.
116
+
117
+ dropna : bool, default True
118
+ If True, and if group keys contain NA values, NA values together with
119
+ row/column will be dropped. If False, NA values will also be treated as
120
+ the key in groups.
121
+
122
+ Returns
123
+ -------
124
+ Grouper or pandas.api.typing.TimeGrouper
125
+ A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper
126
+ is returned.
127
+
128
+ Examples
129
+ --------
130
+ ``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')``
131
+
132
+ >>> df = pd.DataFrame(
133
+ ... {
134
+ ... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
135
+ ... "Speed": [100, 5, 200, 300, 15],
136
+ ... }
137
+ ... )
138
+ >>> df
139
+ Animal Speed
140
+ 0 Falcon 100
141
+ 1 Parrot 5
142
+ 2 Falcon 200
143
+ 3 Falcon 300
144
+ 4 Parrot 15
145
+ >>> df.groupby(pd.Grouper(key="Animal")).mean()
146
+ Speed
147
+ Animal
148
+ Falcon 200.0
149
+ Parrot 10.0
150
+
151
+ Specify a resample operation on the column 'Publish date'
152
+
153
+ >>> df = pd.DataFrame(
154
+ ... {
155
+ ... "Publish date": [
156
+ ... pd.Timestamp("2000-01-02"),
157
+ ... pd.Timestamp("2000-01-02"),
158
+ ... pd.Timestamp("2000-01-09"),
159
+ ... pd.Timestamp("2000-01-16")
160
+ ... ],
161
+ ... "ID": [0, 1, 2, 3],
162
+ ... "Price": [10, 20, 30, 40]
163
+ ... }
164
+ ... )
165
+ >>> df
166
+ Publish date ID Price
167
+ 0 2000-01-02 0 10
168
+ 1 2000-01-02 1 20
169
+ 2 2000-01-09 2 30
170
+ 3 2000-01-16 3 40
171
+ >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
172
+ ID Price
173
+ Publish date
174
+ 2000-01-02 0.5 15.0
175
+ 2000-01-09 2.0 30.0
176
+ 2000-01-16 3.0 40.0
177
+
178
+ If you want to adjust the start of the bins based on a fixed timestamp:
179
+
180
+ >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
181
+ >>> rng = pd.date_range(start, end, freq='7min')
182
+ >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
183
+ >>> ts
184
+ 2000-10-01 23:30:00 0
185
+ 2000-10-01 23:37:00 3
186
+ 2000-10-01 23:44:00 6
187
+ 2000-10-01 23:51:00 9
188
+ 2000-10-01 23:58:00 12
189
+ 2000-10-02 00:05:00 15
190
+ 2000-10-02 00:12:00 18
191
+ 2000-10-02 00:19:00 21
192
+ 2000-10-02 00:26:00 24
193
+ Freq: 7min, dtype: int64
194
+
195
+ >>> ts.groupby(pd.Grouper(freq='17min')).sum()
196
+ 2000-10-01 23:14:00 0
197
+ 2000-10-01 23:31:00 9
198
+ 2000-10-01 23:48:00 21
199
+ 2000-10-02 00:05:00 54
200
+ 2000-10-02 00:22:00 24
201
+ Freq: 17min, dtype: int64
202
+
203
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
204
+ 2000-10-01 23:18:00 0
205
+ 2000-10-01 23:35:00 18
206
+ 2000-10-01 23:52:00 27
207
+ 2000-10-02 00:09:00 39
208
+ 2000-10-02 00:26:00 24
209
+ Freq: 17min, dtype: int64
210
+
211
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
212
+ 2000-10-01 23:24:00 3
213
+ 2000-10-01 23:41:00 15
214
+ 2000-10-01 23:58:00 45
215
+ 2000-10-02 00:15:00 45
216
+ Freq: 17min, dtype: int64
217
+
218
+ If you want to adjust the start of the bins with an `offset` Timedelta, the two
219
+ following lines are equivalent:
220
+
221
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
222
+ 2000-10-01 23:30:00 9
223
+ 2000-10-01 23:47:00 21
224
+ 2000-10-02 00:04:00 54
225
+ 2000-10-02 00:21:00 24
226
+ Freq: 17min, dtype: int64
227
+
228
+ >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
229
+ 2000-10-01 23:30:00 9
230
+ 2000-10-01 23:47:00 21
231
+ 2000-10-02 00:04:00 54
232
+ 2000-10-02 00:21:00 24
233
+ Freq: 17min, dtype: int64
234
+
235
+ To replace the use of the deprecated `base` argument, you can now use `offset`,
236
+ in this example it is equivalent to have `base=2`:
237
+
238
+ >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
239
+ 2000-10-01 23:16:00 0
240
+ 2000-10-01 23:33:00 9
241
+ 2000-10-01 23:50:00 36
242
+ 2000-10-02 00:07:00 39
243
+ 2000-10-02 00:24:00 24
244
+ Freq: 17min, dtype: int64
245
+ """
246
+
247
+ sort: bool
248
+ dropna: bool
249
+ _gpr_index: Index | None
250
+ _grouper: Index | None
251
+
252
+ _attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna")
253
+
254
+ def __new__(cls, *args, **kwargs):
255
+ if kwargs.get("freq") is not None:
256
+ from pandas.core.resample import TimeGrouper
257
+
258
+ cls = TimeGrouper
259
+ return super().__new__(cls)
260
+
261
+ def __init__(
262
+ self,
263
+ key=None,
264
+ level=None,
265
+ freq=None,
266
+ axis: Axis | lib.NoDefault = lib.no_default,
267
+ sort: bool = False,
268
+ dropna: bool = True,
269
+ ) -> None:
270
+ if type(self) is Grouper:
271
+ # i.e. not TimeGrouper
272
+ if axis is not lib.no_default:
273
+ warnings.warn(
274
+ "Grouper axis keyword is deprecated and will be removed in a "
275
+ "future version. To group on axis=1, use obj.T.groupby(...) "
276
+ "instead",
277
+ FutureWarning,
278
+ stacklevel=find_stack_level(),
279
+ )
280
+ else:
281
+ axis = 0
282
+ if axis is lib.no_default:
283
+ axis = 0
284
+
285
+ self.key = key
286
+ self.level = level
287
+ self.freq = freq
288
+ self.axis = axis
289
+ self.sort = sort
290
+ self.dropna = dropna
291
+
292
+ self._grouper_deprecated = None
293
+ self._indexer_deprecated: npt.NDArray[np.intp] | None = None
294
+ self._obj_deprecated = None
295
+ self._gpr_index = None
296
+ self.binner = None
297
+ self._grouper = None
298
+ self._indexer: npt.NDArray[np.intp] | None = None
299
+
300
+ def _get_grouper(
301
+ self, obj: NDFrameT, validate: bool = True
302
+ ) -> tuple[ops.BaseGrouper, NDFrameT]:
303
+ """
304
+ Parameters
305
+ ----------
306
+ obj : Series or DataFrame
307
+ validate : bool, default True
308
+ if True, validate the grouper
309
+
310
+ Returns
311
+ -------
312
+ a tuple of grouper, obj (possibly sorted)
313
+ """
314
+ obj, _, _ = self._set_grouper(obj)
315
+ grouper, _, obj = get_grouper(
316
+ obj,
317
+ [self.key],
318
+ axis=self.axis,
319
+ level=self.level,
320
+ sort=self.sort,
321
+ validate=validate,
322
+ dropna=self.dropna,
323
+ )
324
+ # Without setting this, subsequent lookups to .groups raise
325
+ # error: Incompatible types in assignment (expression has type "BaseGrouper",
326
+ # variable has type "None")
327
+ self._grouper_deprecated = grouper # type: ignore[assignment]
328
+
329
+ return grouper, obj
330
+
331
+ def _set_grouper(
332
+ self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None
333
+ ) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]:
334
+ """
335
+ given an object and the specifications, setup the internal grouper
336
+ for this particular specification
337
+
338
+ Parameters
339
+ ----------
340
+ obj : Series or DataFrame
341
+ sort : bool, default False
342
+ whether the resulting grouper should be sorted
343
+ gpr_index : Index or None, default None
344
+
345
+ Returns
346
+ -------
347
+ NDFrame
348
+ Index
349
+ np.ndarray[np.intp] | None
350
+ """
351
+ assert obj is not None
352
+
353
+ if self.key is not None and self.level is not None:
354
+ raise ValueError("The Grouper cannot specify both a key and a level!")
355
+
356
+ # Keep self._grouper value before overriding
357
+ if self._grouper is None:
358
+ # TODO: What are we assuming about subsequent calls?
359
+ self._grouper = gpr_index
360
+ self._indexer = self._indexer_deprecated
361
+
362
+ # the key must be a valid info item
363
+ if self.key is not None:
364
+ key = self.key
365
+ # The 'on' is already defined
366
+ if getattr(gpr_index, "name", None) == key and isinstance(obj, Series):
367
+ # Sometimes self._grouper will have been resorted while
368
+ # obj has not. In this case there is a mismatch when we
369
+ # call self._grouper.take(obj.index) so we need to undo the sorting
370
+ # before we call _grouper.take.
371
+ assert self._grouper is not None
372
+ if self._indexer is not None:
373
+ reverse_indexer = self._indexer.argsort()
374
+ unsorted_ax = self._grouper.take(reverse_indexer)
375
+ ax = unsorted_ax.take(obj.index)
376
+ else:
377
+ ax = self._grouper.take(obj.index)
378
+ else:
379
+ if key not in obj._info_axis:
380
+ raise KeyError(f"The grouper name {key} is not found")
381
+ ax = Index(obj[key], name=key)
382
+
383
+ else:
384
+ ax = obj._get_axis(self.axis)
385
+ if self.level is not None:
386
+ level = self.level
387
+
388
+ # if a level is given it must be a mi level or
389
+ # equivalent to the axis name
390
+ if isinstance(ax, MultiIndex):
391
+ level = ax._get_level_number(level)
392
+ ax = Index(ax._get_level_values(level), name=ax.names[level])
393
+
394
+ else:
395
+ if level not in (0, ax.name):
396
+ raise ValueError(f"The level {level} is not valid")
397
+
398
+ # possibly sort
399
+ indexer: npt.NDArray[np.intp] | None = None
400
+ if (self.sort or sort) and not ax.is_monotonic_increasing:
401
+ # use stable sort to support first, last, nth
402
+ # TODO: why does putting na_position="first" fix datetimelike cases?
403
+ indexer = self._indexer_deprecated = ax.array.argsort(
404
+ kind="mergesort", na_position="first"
405
+ )
406
+ ax = ax.take(indexer)
407
+ obj = obj.take(indexer, axis=self.axis)
408
+
409
+ # error: Incompatible types in assignment (expression has type
410
+ # "NDFrameT", variable has type "None")
411
+ self._obj_deprecated = obj # type: ignore[assignment]
412
+ self._gpr_index = ax
413
+ return obj, ax, indexer
414
+
415
+ @final
416
+ @property
417
+ def ax(self) -> Index:
418
+ warnings.warn(
419
+ f"{type(self).__name__}.ax is deprecated and will be removed in a "
420
+ "future version. Use Resampler.ax instead",
421
+ FutureWarning,
422
+ stacklevel=find_stack_level(),
423
+ )
424
+ index = self._gpr_index
425
+ if index is None:
426
+ raise ValueError("_set_grouper must be called before ax is accessed")
427
+ return index
428
+
429
+ @final
430
+ @property
431
+ def indexer(self):
432
+ warnings.warn(
433
+ f"{type(self).__name__}.indexer is deprecated and will be removed "
434
+ "in a future version. Use Resampler.indexer instead.",
435
+ FutureWarning,
436
+ stacklevel=find_stack_level(),
437
+ )
438
+ return self._indexer_deprecated
439
+
440
+ @final
441
+ @property
442
+ def obj(self):
443
+ # TODO(3.0): enforcing these deprecations on Grouper should close
444
+ # GH#25564, GH#41930
445
+ warnings.warn(
446
+ f"{type(self).__name__}.obj is deprecated and will be removed "
447
+ "in a future version. Use GroupBy.indexer instead.",
448
+ FutureWarning,
449
+ stacklevel=find_stack_level(),
450
+ )
451
+ return self._obj_deprecated
452
+
453
+ @final
454
+ @property
455
+ def grouper(self):
456
+ warnings.warn(
457
+ f"{type(self).__name__}.grouper is deprecated and will be removed "
458
+ "in a future version. Use GroupBy.grouper instead.",
459
+ FutureWarning,
460
+ stacklevel=find_stack_level(),
461
+ )
462
+ return self._grouper_deprecated
463
+
464
+ @final
465
+ @property
466
+ def groups(self):
467
+ warnings.warn(
468
+ f"{type(self).__name__}.groups is deprecated and will be removed "
469
+ "in a future version. Use GroupBy.groups instead.",
470
+ FutureWarning,
471
+ stacklevel=find_stack_level(),
472
+ )
473
+ # error: "None" has no attribute "groups"
474
+ return self._grouper_deprecated.groups # type: ignore[attr-defined]
475
+
476
+ @final
477
+ def __repr__(self) -> str:
478
+ attrs_list = (
479
+ f"{attr_name}={repr(getattr(self, attr_name))}"
480
+ for attr_name in self._attributes
481
+ if getattr(self, attr_name) is not None
482
+ )
483
+ attrs = ", ".join(attrs_list)
484
+ cls_name = type(self).__name__
485
+ return f"{cls_name}({attrs})"
486
+
487
+
488
+ @final
489
+ class Grouping:
490
+ """
491
+ Holds the grouping information for a single key
492
+
493
+ Parameters
494
+ ----------
495
+ index : Index
496
+ grouper :
497
+ obj : DataFrame or Series
498
+ name : Label
499
+ level :
500
+ observed : bool, default False
501
+ If we are a Categorical, use the observed values
502
+ in_axis : if the Grouping is a column in self.obj and hence among
503
+ Groupby.exclusions list
504
+ dropna : bool, default True
505
+ Whether to drop NA groups.
506
+ uniques : Array-like, optional
507
+ When specified, will be used for unique values. Enables including empty groups
508
+ in the result for a BinGrouper. Must not contain duplicates.
509
+
510
+ Attributes
511
+ -------
512
+ indices : dict
513
+ Mapping of {group -> index_list}
514
+ codes : ndarray
515
+ Group codes
516
+ group_index : Index or None
517
+ unique groups
518
+ groups : dict
519
+ Mapping of {group -> label_list}
520
+ """
521
+
522
+ _codes: npt.NDArray[np.signedinteger] | None = None
523
+ _all_grouper: Categorical | None
524
+ _orig_cats: Index | None
525
+ _index: Index
526
+
527
+ def __init__(
528
+ self,
529
+ index: Index,
530
+ grouper=None,
531
+ obj: NDFrame | None = None,
532
+ level=None,
533
+ sort: bool = True,
534
+ observed: bool = False,
535
+ in_axis: bool = False,
536
+ dropna: bool = True,
537
+ uniques: ArrayLike | None = None,
538
+ ) -> None:
539
+ self.level = level
540
+ self._orig_grouper = grouper
541
+ grouping_vector = _convert_grouper(index, grouper)
542
+ self._all_grouper = None
543
+ self._orig_cats = None
544
+ self._index = index
545
+ self._sort = sort
546
+ self.obj = obj
547
+ self._observed = observed
548
+ self.in_axis = in_axis
549
+ self._dropna = dropna
550
+ self._uniques = uniques
551
+
552
+ # we have a single grouper which may be a myriad of things,
553
+ # some of which are dependent on the passing in level
554
+
555
+ ilevel = self._ilevel
556
+ if ilevel is not None:
557
+ # In extant tests, the new self.grouping_vector matches
558
+ # `index.get_level_values(ilevel)` whenever
559
+ # mapper is None and isinstance(index, MultiIndex)
560
+ if isinstance(index, MultiIndex):
561
+ index_level = index.get_level_values(ilevel)
562
+ else:
563
+ index_level = index
564
+
565
+ if grouping_vector is None:
566
+ grouping_vector = index_level
567
+ else:
568
+ mapper = grouping_vector
569
+ grouping_vector = index_level.map(mapper)
570
+
571
+ # a passed Grouper like, directly get the grouper in the same way
572
+ # as single grouper groupby, use the group_info to get codes
573
+ elif isinstance(grouping_vector, Grouper):
574
+ # get the new grouper; we already have disambiguated
575
+ # what key/level refer to exactly, don't need to
576
+ # check again as we have by this point converted these
577
+ # to an actual value (rather than a pd.Grouper)
578
+ assert self.obj is not None # for mypy
579
+ newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False)
580
+ self.obj = newobj
581
+
582
+ if isinstance(newgrouper, ops.BinGrouper):
583
+ # TODO: can we unwrap this and get a tighter typing
584
+ # for self.grouping_vector?
585
+ grouping_vector = newgrouper
586
+ else:
587
+ # ops.BaseGrouper
588
+ # TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1.
589
+ # If that were to occur, would we be throwing out information?
590
+ # error: Cannot determine type of "grouping_vector" [has-type]
591
+ ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type]
592
+ # use Index instead of ndarray so we can recover the name
593
+ grouping_vector = Index(ng, name=newgrouper.result_index.name)
594
+
595
+ elif not isinstance(
596
+ grouping_vector, (Series, Index, ExtensionArray, np.ndarray)
597
+ ):
598
+ # no level passed
599
+ if getattr(grouping_vector, "ndim", 1) != 1:
600
+ t = str(type(grouping_vector))
601
+ raise ValueError(f"Grouper for '{t}' not 1-dimensional")
602
+
603
+ grouping_vector = index.map(grouping_vector)
604
+
605
+ if not (
606
+ hasattr(grouping_vector, "__len__")
607
+ and len(grouping_vector) == len(index)
608
+ ):
609
+ grper = pprint_thing(grouping_vector)
610
+ errmsg = (
611
+ "Grouper result violates len(labels) == "
612
+ f"len(data)\nresult: {grper}"
613
+ )
614
+ raise AssertionError(errmsg)
615
+
616
+ if isinstance(grouping_vector, np.ndarray):
617
+ if grouping_vector.dtype.kind in "mM":
618
+ # if we have a date/time-like grouper, make sure that we have
619
+ # Timestamps like
620
+ # TODO 2022-10-08 we only have one test that gets here and
621
+ # values are already in nanoseconds in that case.
622
+ grouping_vector = Series(grouping_vector).to_numpy()
623
+ elif isinstance(getattr(grouping_vector, "dtype", None), CategoricalDtype):
624
+ # a passed Categorical
625
+ self._orig_cats = grouping_vector.categories
626
+ grouping_vector, self._all_grouper = recode_for_groupby(
627
+ grouping_vector, sort, observed
628
+ )
629
+
630
+ self.grouping_vector = grouping_vector
631
+
632
+ def __repr__(self) -> str:
633
+ return f"Grouping({self.name})"
634
+
635
+ def __iter__(self) -> Iterator:
636
+ return iter(self.indices)
637
+
638
+ @cache_readonly
639
+ def _passed_categorical(self) -> bool:
640
+ dtype = getattr(self.grouping_vector, "dtype", None)
641
+ return isinstance(dtype, CategoricalDtype)
642
+
643
+ @cache_readonly
644
+ def name(self) -> Hashable:
645
+ ilevel = self._ilevel
646
+ if ilevel is not None:
647
+ return self._index.names[ilevel]
648
+
649
+ if isinstance(self._orig_grouper, (Index, Series)):
650
+ return self._orig_grouper.name
651
+
652
+ elif isinstance(self.grouping_vector, ops.BaseGrouper):
653
+ return self.grouping_vector.result_index.name
654
+
655
+ elif isinstance(self.grouping_vector, Index):
656
+ return self.grouping_vector.name
657
+
658
+ # otherwise we have ndarray or ExtensionArray -> no name
659
+ return None
660
+
661
+ @cache_readonly
662
+ def _ilevel(self) -> int | None:
663
+ """
664
+ If necessary, converted index level name to index level position.
665
+ """
666
+ level = self.level
667
+ if level is None:
668
+ return None
669
+ if not isinstance(level, int):
670
+ index = self._index
671
+ if level not in index.names:
672
+ raise AssertionError(f"Level {level} not in index")
673
+ return index.names.index(level)
674
+ return level
675
+
676
+ @property
677
+ def ngroups(self) -> int:
678
+ return len(self._group_index)
679
+
680
+ @cache_readonly
681
+ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
682
+ # we have a list of groupers
683
+ if isinstance(self.grouping_vector, ops.BaseGrouper):
684
+ return self.grouping_vector.indices
685
+
686
+ values = Categorical(self.grouping_vector)
687
+ return values._reverse_indexer()
688
+
689
+ @property
690
+ def codes(self) -> npt.NDArray[np.signedinteger]:
691
+ return self._codes_and_uniques[0]
692
+
693
+ @cache_readonly
694
+ def _group_arraylike(self) -> ArrayLike:
695
+ """
696
+ Analogous to result_index, but holding an ArrayLike to ensure
697
+ we can retain ExtensionDtypes.
698
+ """
699
+ if self._all_grouper is not None:
700
+ # retain dtype for categories, including unobserved ones
701
+ return self._result_index._values
702
+
703
+ elif self._passed_categorical:
704
+ return self._group_index._values
705
+
706
+ return self._codes_and_uniques[1]
707
+
708
+ @property
709
+ def group_arraylike(self) -> ArrayLike:
710
+ """
711
+ Analogous to result_index, but holding an ArrayLike to ensure
712
+ we can retain ExtensionDtypes.
713
+ """
714
+ warnings.warn(
715
+ "group_arraylike is deprecated and will be removed in a future "
716
+ "version of pandas",
717
+ category=FutureWarning,
718
+ stacklevel=find_stack_level(),
719
+ )
720
+ return self._group_arraylike
721
+
722
+ @cache_readonly
723
+ def _result_index(self) -> Index:
724
+ # result_index retains dtype for categories, including unobserved ones,
725
+ # which group_index does not
726
+ if self._all_grouper is not None:
727
+ group_idx = self._group_index
728
+ assert isinstance(group_idx, CategoricalIndex)
729
+ cats = self._orig_cats
730
+ # set_categories is dynamically added
731
+ return group_idx.set_categories(cats) # type: ignore[attr-defined]
732
+ return self._group_index
733
+
734
+ @property
735
+ def result_index(self) -> Index:
736
+ warnings.warn(
737
+ "result_index is deprecated and will be removed in a future "
738
+ "version of pandas",
739
+ category=FutureWarning,
740
+ stacklevel=find_stack_level(),
741
+ )
742
+ return self._result_index
743
+
744
+ @cache_readonly
745
+ def _group_index(self) -> Index:
746
+ codes, uniques = self._codes_and_uniques
747
+ if not self._dropna and self._passed_categorical:
748
+ assert isinstance(uniques, Categorical)
749
+ if self._sort and (codes == len(uniques)).any():
750
+ # Add NA value on the end when sorting
751
+ uniques = Categorical.from_codes(
752
+ np.append(uniques.codes, [-1]), uniques.categories, validate=False
753
+ )
754
+ elif len(codes) > 0:
755
+ # Need to determine proper placement of NA value when not sorting
756
+ cat = self.grouping_vector
757
+ na_idx = (cat.codes < 0).argmax()
758
+ if cat.codes[na_idx] < 0:
759
+ # count number of unique codes that comes before the nan value
760
+ na_unique_idx = algorithms.nunique_ints(cat.codes[:na_idx])
761
+ new_codes = np.insert(uniques.codes, na_unique_idx, -1)
762
+ uniques = Categorical.from_codes(
763
+ new_codes, uniques.categories, validate=False
764
+ )
765
+ return Index._with_infer(uniques, name=self.name)
766
+
767
+ @property
768
+ def group_index(self) -> Index:
769
+ warnings.warn(
770
+ "group_index is deprecated and will be removed in a future "
771
+ "version of pandas",
772
+ category=FutureWarning,
773
+ stacklevel=find_stack_level(),
774
+ )
775
+ return self._group_index
776
+
777
+ @cache_readonly
778
+ def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]:
779
+ uniques: ArrayLike
780
+ if self._passed_categorical:
781
+ # we make a CategoricalIndex out of the cat grouper
782
+ # preserving the categories / ordered attributes;
783
+ # doesn't (yet - GH#46909) handle dropna=False
784
+ cat = self.grouping_vector
785
+ categories = cat.categories
786
+
787
+ if self._observed:
788
+ ucodes = algorithms.unique1d(cat.codes)
789
+ ucodes = ucodes[ucodes != -1]
790
+ if self._sort:
791
+ ucodes = np.sort(ucodes)
792
+ else:
793
+ ucodes = np.arange(len(categories))
794
+
795
+ uniques = Categorical.from_codes(
796
+ codes=ucodes, categories=categories, ordered=cat.ordered, validate=False
797
+ )
798
+
799
+ codes = cat.codes
800
+ if not self._dropna:
801
+ na_mask = codes < 0
802
+ if np.any(na_mask):
803
+ if self._sort:
804
+ # Replace NA codes with `largest code + 1`
805
+ na_code = len(categories)
806
+ codes = np.where(na_mask, na_code, codes)
807
+ else:
808
+ # Insert NA code into the codes based on first appearance
809
+ # A negative code must exist, no need to check codes[na_idx] < 0
810
+ na_idx = na_mask.argmax()
811
+ # count number of unique codes that comes before the nan value
812
+ na_code = algorithms.nunique_ints(codes[:na_idx])
813
+ codes = np.where(codes >= na_code, codes + 1, codes)
814
+ codes = np.where(na_mask, na_code, codes)
815
+
816
+ if not self._observed:
817
+ uniques = uniques.reorder_categories(self._orig_cats)
818
+
819
+ return codes, uniques
820
+
821
+ elif isinstance(self.grouping_vector, ops.BaseGrouper):
822
+ # we have a list of groupers
823
+ codes = self.grouping_vector.codes_info
824
+ uniques = self.grouping_vector.result_index._values
825
+ elif self._uniques is not None:
826
+ # GH#50486 Code grouping_vector using _uniques; allows
827
+ # including uniques that are not present in grouping_vector.
828
+ cat = Categorical(self.grouping_vector, categories=self._uniques)
829
+ codes = cat.codes
830
+ uniques = self._uniques
831
+ else:
832
+ # GH35667, replace dropna=False with use_na_sentinel=False
833
+ # error: Incompatible types in assignment (expression has type "Union[
834
+ # ndarray[Any, Any], Index]", variable has type "Categorical")
835
+ codes, uniques = algorithms.factorize( # type: ignore[assignment]
836
+ self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna
837
+ )
838
+ return codes, uniques
839
+
840
+ @cache_readonly
841
+ def groups(self) -> dict[Hashable, np.ndarray]:
842
+ cats = Categorical.from_codes(self.codes, self._group_index, validate=False)
843
+ return self._index.groupby(cats)
844
+
845
+
846
+ def get_grouper(
847
+ obj: NDFrameT,
848
+ key=None,
849
+ axis: Axis = 0,
850
+ level=None,
851
+ sort: bool = True,
852
+ observed: bool = False,
853
+ validate: bool = True,
854
+ dropna: bool = True,
855
+ ) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]:
856
+ """
857
+ Create and return a BaseGrouper, which is an internal
858
+ mapping of how to create the grouper indexers.
859
+ This may be composed of multiple Grouping objects, indicating
860
+ multiple groupers
861
+
862
+ Groupers are ultimately index mappings. They can originate as:
863
+ index mappings, keys to columns, functions, or Groupers
864
+
865
+ Groupers enable local references to axis,level,sort, while
866
+ the passed in axis, level, and sort are 'global'.
867
+
868
+ This routine tries to figure out what the passing in references
869
+ are and then creates a Grouping for each one, combined into
870
+ a BaseGrouper.
871
+
872
+ If observed & we have a categorical grouper, only show the observed
873
+ values.
874
+
875
+ If validate, then check for key/level overlaps.
876
+
877
+ """
878
+ group_axis = obj._get_axis(axis)
879
+
880
+ # validate that the passed single level is compatible with the passed
881
+ # axis of the object
882
+ if level is not None:
883
+ # TODO: These if-block and else-block are almost same.
884
+ # MultiIndex instance check is removable, but it seems that there are
885
+ # some processes only for non-MultiIndex in else-block,
886
+ # eg. `obj.index.name != level`. We have to consider carefully whether
887
+ # these are applicable for MultiIndex. Even if these are applicable,
888
+ # we need to check if it makes no side effect to subsequent processes
889
+ # on the outside of this condition.
890
+ # (GH 17621)
891
+ if isinstance(group_axis, MultiIndex):
892
+ if is_list_like(level) and len(level) == 1:
893
+ level = level[0]
894
+
895
+ if key is None and is_scalar(level):
896
+ # Get the level values from group_axis
897
+ key = group_axis.get_level_values(level)
898
+ level = None
899
+
900
+ else:
901
+ # allow level to be a length-one list-like object
902
+ # (e.g., level=[0])
903
+ # GH 13901
904
+ if is_list_like(level):
905
+ nlevels = len(level)
906
+ if nlevels == 1:
907
+ level = level[0]
908
+ elif nlevels == 0:
909
+ raise ValueError("No group keys passed!")
910
+ else:
911
+ raise ValueError("multiple levels only valid with MultiIndex")
912
+
913
+ if isinstance(level, str):
914
+ if obj._get_axis(axis).name != level:
915
+ raise ValueError(
916
+ f"level name {level} is not the name "
917
+ f"of the {obj._get_axis_name(axis)}"
918
+ )
919
+ elif level > 0 or level < -1:
920
+ raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
921
+
922
+ # NOTE: `group_axis` and `group_axis.get_level_values(level)`
923
+ # are same in this section.
924
+ level = None
925
+ key = group_axis
926
+
927
+ # a passed-in Grouper, directly convert
928
+ if isinstance(key, Grouper):
929
+ grouper, obj = key._get_grouper(obj, validate=False)
930
+ if key.key is None:
931
+ return grouper, frozenset(), obj
932
+ else:
933
+ return grouper, frozenset({key.key}), obj
934
+
935
+ # already have a BaseGrouper, just return it
936
+ elif isinstance(key, ops.BaseGrouper):
937
+ return key, frozenset(), obj
938
+
939
+ if not isinstance(key, list):
940
+ keys = [key]
941
+ match_axis_length = False
942
+ else:
943
+ keys = key
944
+ match_axis_length = len(keys) == len(group_axis)
945
+
946
+ # what are we after, exactly?
947
+ any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
948
+ any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys)
949
+ any_arraylike = any(
950
+ isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys
951
+ )
952
+
953
+ # is this an index replacement?
954
+ if (
955
+ not any_callable
956
+ and not any_arraylike
957
+ and not any_groupers
958
+ and match_axis_length
959
+ and level is None
960
+ ):
961
+ if isinstance(obj, DataFrame):
962
+ all_in_columns_index = all(
963
+ g in obj.columns or g in obj.index.names for g in keys
964
+ )
965
+ else:
966
+ assert isinstance(obj, Series)
967
+ all_in_columns_index = all(g in obj.index.names for g in keys)
968
+
969
+ if not all_in_columns_index:
970
+ keys = [com.asarray_tuplesafe(keys)]
971
+
972
+ if isinstance(level, (tuple, list)):
973
+ if key is None:
974
+ keys = [None] * len(level)
975
+ levels = level
976
+ else:
977
+ levels = [level] * len(keys)
978
+
979
+ groupings: list[Grouping] = []
980
+ exclusions: set[Hashable] = set()
981
+
982
+ # if the actual grouper should be obj[key]
983
+ def is_in_axis(key) -> bool:
984
+ if not _is_label_like(key):
985
+ if obj.ndim == 1:
986
+ return False
987
+
988
+ # items -> .columns for DataFrame, .index for Series
989
+ items = obj.axes[-1]
990
+ try:
991
+ items.get_loc(key)
992
+ except (KeyError, TypeError, InvalidIndexError):
993
+ # TypeError shows up here if we pass e.g. an Index
994
+ return False
995
+
996
+ return True
997
+
998
+ # if the grouper is obj[name]
999
+ def is_in_obj(gpr) -> bool:
1000
+ if not hasattr(gpr, "name"):
1001
+ return False
1002
+ if using_copy_on_write() or warn_copy_on_write():
1003
+ # For the CoW case, we check the references to determine if the
1004
+ # series is part of the object
1005
+ try:
1006
+ obj_gpr_column = obj[gpr.name]
1007
+ except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):
1008
+ return False
1009
+ if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series):
1010
+ return gpr._mgr.references_same_values( # type: ignore[union-attr]
1011
+ obj_gpr_column._mgr, 0 # type: ignore[arg-type]
1012
+ )
1013
+ return False
1014
+ try:
1015
+ return gpr is obj[gpr.name]
1016
+ except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):
1017
+ # IndexError reached in e.g. test_skip_group_keys when we pass
1018
+ # lambda here
1019
+ # InvalidIndexError raised on key-types inappropriate for index,
1020
+ # e.g. DatetimeIndex.get_loc(tuple())
1021
+ # OutOfBoundsDatetime raised when obj is a Series with DatetimeIndex
1022
+ # and gpr.name is month str
1023
+ return False
1024
+
1025
+ for gpr, level in zip(keys, levels):
1026
+ if is_in_obj(gpr): # df.groupby(df['name'])
1027
+ in_axis = True
1028
+ exclusions.add(gpr.name)
1029
+
1030
+ elif is_in_axis(gpr): # df.groupby('name')
1031
+ if obj.ndim != 1 and gpr in obj:
1032
+ if validate:
1033
+ obj._check_label_or_level_ambiguity(gpr, axis=axis)
1034
+ in_axis, name, gpr = True, gpr, obj[gpr]
1035
+ if gpr.ndim != 1:
1036
+ # non-unique columns; raise here to get the name in the
1037
+ # exception message
1038
+ raise ValueError(f"Grouper for '{name}' not 1-dimensional")
1039
+ exclusions.add(name)
1040
+ elif obj._is_level_reference(gpr, axis=axis):
1041
+ in_axis, level, gpr = False, gpr, None
1042
+ else:
1043
+ raise KeyError(gpr)
1044
+ elif isinstance(gpr, Grouper) and gpr.key is not None:
1045
+ # Add key to exclusions
1046
+ exclusions.add(gpr.key)
1047
+ in_axis = True
1048
+ else:
1049
+ in_axis = False
1050
+
1051
+ # create the Grouping
1052
+ # allow us to passing the actual Grouping as the gpr
1053
+ ping = (
1054
+ Grouping(
1055
+ group_axis,
1056
+ gpr,
1057
+ obj=obj,
1058
+ level=level,
1059
+ sort=sort,
1060
+ observed=observed,
1061
+ in_axis=in_axis,
1062
+ dropna=dropna,
1063
+ )
1064
+ if not isinstance(gpr, Grouping)
1065
+ else gpr
1066
+ )
1067
+
1068
+ groupings.append(ping)
1069
+
1070
+ if len(groupings) == 0 and len(obj):
1071
+ raise ValueError("No group keys passed!")
1072
+ if len(groupings) == 0:
1073
+ groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp)))
1074
+
1075
+ # create the internals grouper
1076
+ grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna)
1077
+ return grouper, frozenset(exclusions), obj
1078
+
1079
+
1080
+ def _is_label_like(val) -> bool:
1081
+ return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))
1082
+
1083
+
1084
+ def _convert_grouper(axis: Index, grouper):
1085
+ if isinstance(grouper, dict):
1086
+ return grouper.get
1087
+ elif isinstance(grouper, Series):
1088
+ if grouper.index.equals(axis):
1089
+ return grouper._values
1090
+ else:
1091
+ return grouper.reindex(axis)._values
1092
+ elif isinstance(grouper, MultiIndex):
1093
+ return grouper._values
1094
+ elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)):
1095
+ if len(grouper) != len(axis):
1096
+ raise ValueError("Grouper and axis must be same length")
1097
+
1098
+ if isinstance(grouper, (list, tuple)):
1099
+ grouper = com.asarray_tuplesafe(grouper)
1100
+ return grouper
1101
+ else:
1102
+ return grouper
videollama2/lib/python3.10/site-packages/pandas/core/groupby/indexing.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Iterable
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Literal,
7
+ cast,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas.util._decorators import (
13
+ cache_readonly,
14
+ doc,
15
+ )
16
+
17
+ from pandas.core.dtypes.common import (
18
+ is_integer,
19
+ is_list_like,
20
+ )
21
+
22
+ if TYPE_CHECKING:
23
+ from pandas._typing import PositionalIndexer
24
+
25
+ from pandas import (
26
+ DataFrame,
27
+ Series,
28
+ )
29
+ from pandas.core.groupby import groupby
30
+
31
+
32
+ class GroupByIndexingMixin:
33
+ """
34
+ Mixin for adding ._positional_selector to GroupBy.
35
+ """
36
+
37
+ @cache_readonly
38
+ def _positional_selector(self) -> GroupByPositionalSelector:
39
+ """
40
+ Return positional selection for each group.
41
+
42
+ ``groupby._positional_selector[i:j]`` is similar to
43
+ ``groupby.apply(lambda x: x.iloc[i:j])``
44
+ but much faster and preserves the original index and order.
45
+
46
+ ``_positional_selector[]`` is compatible with and extends :meth:`~GroupBy.head`
47
+ and :meth:`~GroupBy.tail`. For example:
48
+
49
+ - ``head(5)``
50
+ - ``_positional_selector[5:-5]``
51
+ - ``tail(5)``
52
+
53
+ together return all the rows.
54
+
55
+ Allowed inputs for the index are:
56
+
57
+ - An integer valued iterable, e.g. ``range(2, 4)``.
58
+ - A comma separated list of integers and slices, e.g. ``5``, ``2, 4``, ``2:4``.
59
+
60
+ The output format is the same as :meth:`~GroupBy.head` and
61
+ :meth:`~GroupBy.tail`, namely
62
+ a subset of the ``DataFrame`` or ``Series`` with the index and order preserved.
63
+
64
+ Returns
65
+ -------
66
+ Series
67
+ The filtered subset of the original Series.
68
+ DataFrame
69
+ The filtered subset of the original DataFrame.
70
+
71
+ See Also
72
+ --------
73
+ DataFrame.iloc : Purely integer-location based indexing for selection by
74
+ position.
75
+ GroupBy.head : Return first n rows of each group.
76
+ GroupBy.tail : Return last n rows of each group.
77
+ GroupBy.nth : Take the nth row from each group if n is an int, or a
78
+ subset of rows, if n is a list of ints.
79
+
80
+ Notes
81
+ -----
82
+ - The slice step cannot be negative.
83
+ - If the index specification results in overlaps, the item is not duplicated.
84
+ - If the index specification changes the order of items, then
85
+ they are returned in their original order.
86
+ By contrast, ``DataFrame.iloc`` can change the row order.
87
+ - ``groupby()`` parameters such as as_index and dropna are ignored.
88
+
89
+ The differences between ``_positional_selector[]`` and :meth:`~GroupBy.nth`
90
+ with ``as_index=False`` are:
91
+
92
+ - Input to ``_positional_selector`` can include
93
+ one or more slices whereas ``nth``
94
+ just handles an integer or a list of integers.
95
+ - ``_positional_selector`` can accept a slice relative to the
96
+ last row of each group.
97
+ - ``_positional_selector`` does not have an equivalent to the
98
+ ``nth()`` ``dropna`` parameter.
99
+
100
+ Examples
101
+ --------
102
+ >>> df = pd.DataFrame([["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]],
103
+ ... columns=["A", "B"])
104
+ >>> df.groupby("A")._positional_selector[1:2]
105
+ A B
106
+ 1 a 2
107
+ 4 b 5
108
+
109
+ >>> df.groupby("A")._positional_selector[1, -1]
110
+ A B
111
+ 1 a 2
112
+ 2 a 3
113
+ 4 b 5
114
+ """
115
+ if TYPE_CHECKING:
116
+ # pylint: disable-next=used-before-assignment
117
+ groupby_self = cast(groupby.GroupBy, self)
118
+ else:
119
+ groupby_self = self
120
+
121
+ return GroupByPositionalSelector(groupby_self)
122
+
123
+ def _make_mask_from_positional_indexer(
124
+ self,
125
+ arg: PositionalIndexer | tuple,
126
+ ) -> np.ndarray:
127
+ if is_list_like(arg):
128
+ if all(is_integer(i) for i in cast(Iterable, arg)):
129
+ mask = self._make_mask_from_list(cast(Iterable[int], arg))
130
+ else:
131
+ mask = self._make_mask_from_tuple(cast(tuple, arg))
132
+
133
+ elif isinstance(arg, slice):
134
+ mask = self._make_mask_from_slice(arg)
135
+ elif is_integer(arg):
136
+ mask = self._make_mask_from_int(cast(int, arg))
137
+ else:
138
+ raise TypeError(
139
+ f"Invalid index {type(arg)}. "
140
+ "Must be integer, list-like, slice or a tuple of "
141
+ "integers and slices"
142
+ )
143
+
144
+ if isinstance(mask, bool):
145
+ if mask:
146
+ mask = self._ascending_count >= 0
147
+ else:
148
+ mask = self._ascending_count < 0
149
+
150
+ return cast(np.ndarray, mask)
151
+
152
+ def _make_mask_from_int(self, arg: int) -> np.ndarray:
153
+ if arg >= 0:
154
+ return self._ascending_count == arg
155
+ else:
156
+ return self._descending_count == (-arg - 1)
157
+
158
+ def _make_mask_from_list(self, args: Iterable[int]) -> bool | np.ndarray:
159
+ positive = [arg for arg in args if arg >= 0]
160
+ negative = [-arg - 1 for arg in args if arg < 0]
161
+
162
+ mask: bool | np.ndarray = False
163
+
164
+ if positive:
165
+ mask |= np.isin(self._ascending_count, positive)
166
+
167
+ if negative:
168
+ mask |= np.isin(self._descending_count, negative)
169
+
170
+ return mask
171
+
172
+ def _make_mask_from_tuple(self, args: tuple) -> bool | np.ndarray:
173
+ mask: bool | np.ndarray = False
174
+
175
+ for arg in args:
176
+ if is_integer(arg):
177
+ mask |= self._make_mask_from_int(cast(int, arg))
178
+ elif isinstance(arg, slice):
179
+ mask |= self._make_mask_from_slice(arg)
180
+ else:
181
+ raise ValueError(
182
+ f"Invalid argument {type(arg)}. Should be int or slice."
183
+ )
184
+
185
+ return mask
186
+
187
+ def _make_mask_from_slice(self, arg: slice) -> bool | np.ndarray:
188
+ start = arg.start
189
+ stop = arg.stop
190
+ step = arg.step
191
+
192
+ if step is not None and step < 0:
193
+ raise ValueError(f"Invalid step {step}. Must be non-negative")
194
+
195
+ mask: bool | np.ndarray = True
196
+
197
+ if step is None:
198
+ step = 1
199
+
200
+ if start is None:
201
+ if step > 1:
202
+ mask &= self._ascending_count % step == 0
203
+
204
+ elif start >= 0:
205
+ mask &= self._ascending_count >= start
206
+
207
+ if step > 1:
208
+ mask &= (self._ascending_count - start) % step == 0
209
+
210
+ else:
211
+ mask &= self._descending_count < -start
212
+
213
+ offset_array = self._descending_count + start + 1
214
+ limit_array = (
215
+ self._ascending_count + self._descending_count + (start + 1)
216
+ ) < 0
217
+ offset_array = np.where(limit_array, self._ascending_count, offset_array)
218
+
219
+ mask &= offset_array % step == 0
220
+
221
+ if stop is not None:
222
+ if stop >= 0:
223
+ mask &= self._ascending_count < stop
224
+ else:
225
+ mask &= self._descending_count >= -stop
226
+
227
+ return mask
228
+
229
+ @cache_readonly
230
+ def _ascending_count(self) -> np.ndarray:
231
+ if TYPE_CHECKING:
232
+ groupby_self = cast(groupby.GroupBy, self)
233
+ else:
234
+ groupby_self = self
235
+
236
+ return groupby_self._cumcount_array()
237
+
238
+ @cache_readonly
239
+ def _descending_count(self) -> np.ndarray:
240
+ if TYPE_CHECKING:
241
+ groupby_self = cast(groupby.GroupBy, self)
242
+ else:
243
+ groupby_self = self
244
+
245
+ return groupby_self._cumcount_array(ascending=False)
246
+
247
+
248
+ @doc(GroupByIndexingMixin._positional_selector)
249
+ class GroupByPositionalSelector:
250
+ def __init__(self, groupby_object: groupby.GroupBy) -> None:
251
+ self.groupby_object = groupby_object
252
+
253
+ def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series:
254
+ """
255
+ Select by positional index per group.
256
+
257
+ Implements GroupBy._positional_selector
258
+
259
+ Parameters
260
+ ----------
261
+ arg : PositionalIndexer | tuple
262
+ Allowed values are:
263
+ - int
264
+ - int valued iterable such as list or range
265
+ - slice with step either None or positive
266
+ - tuple of integers and slices
267
+
268
+ Returns
269
+ -------
270
+ Series
271
+ The filtered subset of the original groupby Series.
272
+ DataFrame
273
+ The filtered subset of the original groupby DataFrame.
274
+
275
+ See Also
276
+ --------
277
+ DataFrame.iloc : Integer-location based indexing for selection by position.
278
+ GroupBy.head : Return first n rows of each group.
279
+ GroupBy.tail : Return last n rows of each group.
280
+ GroupBy._positional_selector : Return positional selection for each group.
281
+ GroupBy.nth : Take the nth row from each group if n is an int, or a
282
+ subset of rows, if n is a list of ints.
283
+ """
284
+ mask = self.groupby_object._make_mask_from_positional_indexer(arg)
285
+ return self.groupby_object._mask_selected_obj(mask)
286
+
287
+
288
+ class GroupByNthSelector:
289
+ """
290
+ Dynamically substituted for GroupBy.nth to enable both call and index
291
+ """
292
+
293
+ def __init__(self, groupby_object: groupby.GroupBy) -> None:
294
+ self.groupby_object = groupby_object
295
+
296
+ def __call__(
297
+ self,
298
+ n: PositionalIndexer | tuple,
299
+ dropna: Literal["any", "all", None] = None,
300
+ ) -> DataFrame | Series:
301
+ return self.groupby_object._nth(n, dropna)
302
+
303
+ def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series:
304
+ return self.groupby_object._nth(n)
videollama2/lib/python3.10/site-packages/pandas/core/groupby/numba_.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Common utilities for Numba operations with groupby ops"""
2
+ from __future__ import annotations
3
+
4
+ import functools
5
+ import inspect
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ Callable,
10
+ )
11
+
12
+ import numpy as np
13
+
14
+ from pandas.compat._optional import import_optional_dependency
15
+
16
+ from pandas.core.util.numba_ import (
17
+ NumbaUtilError,
18
+ jit_user_function,
19
+ )
20
+
21
+ if TYPE_CHECKING:
22
+ from pandas._typing import Scalar
23
+
24
+
25
+ def validate_udf(func: Callable) -> None:
26
+ """
27
+ Validate user defined function for ops when using Numba with groupby ops.
28
+
29
+ The first signature arguments should include:
30
+
31
+ def f(values, index, ...):
32
+ ...
33
+
34
+ Parameters
35
+ ----------
36
+ func : function, default False
37
+ user defined function
38
+
39
+ Returns
40
+ -------
41
+ None
42
+
43
+ Raises
44
+ ------
45
+ NumbaUtilError
46
+ """
47
+ if not callable(func):
48
+ raise NotImplementedError(
49
+ "Numba engine can only be used with a single function."
50
+ )
51
+ udf_signature = list(inspect.signature(func).parameters.keys())
52
+ expected_args = ["values", "index"]
53
+ min_number_args = len(expected_args)
54
+ if (
55
+ len(udf_signature) < min_number_args
56
+ or udf_signature[:min_number_args] != expected_args
57
+ ):
58
+ raise NumbaUtilError(
59
+ f"The first {min_number_args} arguments to {func.__name__} must be "
60
+ f"{expected_args}"
61
+ )
62
+
63
+
64
+ @functools.cache
65
+ def generate_numba_agg_func(
66
+ func: Callable[..., Scalar],
67
+ nopython: bool,
68
+ nogil: bool,
69
+ parallel: bool,
70
+ ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:
71
+ """
72
+ Generate a numba jitted agg function specified by values from engine_kwargs.
73
+
74
+ 1. jit the user's function
75
+ 2. Return a groupby agg function with the jitted function inline
76
+
77
+ Configurations specified in engine_kwargs apply to both the user's
78
+ function _AND_ the groupby evaluation loop.
79
+
80
+ Parameters
81
+ ----------
82
+ func : function
83
+ function to be applied to each group and will be JITed
84
+ nopython : bool
85
+ nopython to be passed into numba.jit
86
+ nogil : bool
87
+ nogil to be passed into numba.jit
88
+ parallel : bool
89
+ parallel to be passed into numba.jit
90
+
91
+ Returns
92
+ -------
93
+ Numba function
94
+ """
95
+ numba_func = jit_user_function(func)
96
+ if TYPE_CHECKING:
97
+ import numba
98
+ else:
99
+ numba = import_optional_dependency("numba")
100
+
101
+ @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
102
+ def group_agg(
103
+ values: np.ndarray,
104
+ index: np.ndarray,
105
+ begin: np.ndarray,
106
+ end: np.ndarray,
107
+ num_columns: int,
108
+ *args: Any,
109
+ ) -> np.ndarray:
110
+ assert len(begin) == len(end)
111
+ num_groups = len(begin)
112
+
113
+ result = np.empty((num_groups, num_columns))
114
+ for i in numba.prange(num_groups):
115
+ group_index = index[begin[i] : end[i]]
116
+ for j in numba.prange(num_columns):
117
+ group = values[begin[i] : end[i], j]
118
+ result[i, j] = numba_func(group, group_index, *args)
119
+ return result
120
+
121
+ return group_agg
122
+
123
+
124
+ @functools.cache
125
+ def generate_numba_transform_func(
126
+ func: Callable[..., np.ndarray],
127
+ nopython: bool,
128
+ nogil: bool,
129
+ parallel: bool,
130
+ ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:
131
+ """
132
+ Generate a numba jitted transform function specified by values from engine_kwargs.
133
+
134
+ 1. jit the user's function
135
+ 2. Return a groupby transform function with the jitted function inline
136
+
137
+ Configurations specified in engine_kwargs apply to both the user's
138
+ function _AND_ the groupby evaluation loop.
139
+
140
+ Parameters
141
+ ----------
142
+ func : function
143
+ function to be applied to each window and will be JITed
144
+ nopython : bool
145
+ nopython to be passed into numba.jit
146
+ nogil : bool
147
+ nogil to be passed into numba.jit
148
+ parallel : bool
149
+ parallel to be passed into numba.jit
150
+
151
+ Returns
152
+ -------
153
+ Numba function
154
+ """
155
+ numba_func = jit_user_function(func)
156
+ if TYPE_CHECKING:
157
+ import numba
158
+ else:
159
+ numba = import_optional_dependency("numba")
160
+
161
+ @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
162
+ def group_transform(
163
+ values: np.ndarray,
164
+ index: np.ndarray,
165
+ begin: np.ndarray,
166
+ end: np.ndarray,
167
+ num_columns: int,
168
+ *args: Any,
169
+ ) -> np.ndarray:
170
+ assert len(begin) == len(end)
171
+ num_groups = len(begin)
172
+
173
+ result = np.empty((len(values), num_columns))
174
+ for i in numba.prange(num_groups):
175
+ group_index = index[begin[i] : end[i]]
176
+ for j in numba.prange(num_columns):
177
+ group = values[begin[i] : end[i], j]
178
+ result[begin[i] : end[i], j] = numba_func(group, group_index, *args)
179
+ return result
180
+
181
+ return group_transform
videollama2/lib/python3.10/site-packages/pandas/core/groupby/ops.py ADDED
@@ -0,0 +1,1208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provide classes to perform the groupby aggregate operations.
3
+
4
+ These are not exposed to the user and provide implementations of the grouping
5
+ operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
6
+ are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
7
+ """
8
+ from __future__ import annotations
9
+
10
+ import collections
11
+ import functools
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Callable,
15
+ Generic,
16
+ final,
17
+ )
18
+
19
+ import numpy as np
20
+
21
+ from pandas._libs import (
22
+ NaT,
23
+ lib,
24
+ )
25
+ import pandas._libs.groupby as libgroupby
26
+ from pandas._typing import (
27
+ ArrayLike,
28
+ AxisInt,
29
+ NDFrameT,
30
+ Shape,
31
+ npt,
32
+ )
33
+ from pandas.errors import AbstractMethodError
34
+ from pandas.util._decorators import cache_readonly
35
+
36
+ from pandas.core.dtypes.cast import (
37
+ maybe_cast_pointwise_result,
38
+ maybe_downcast_to_dtype,
39
+ )
40
+ from pandas.core.dtypes.common import (
41
+ ensure_float64,
42
+ ensure_int64,
43
+ ensure_platform_int,
44
+ ensure_uint64,
45
+ is_1d_only_ea_dtype,
46
+ )
47
+ from pandas.core.dtypes.missing import (
48
+ isna,
49
+ maybe_fill,
50
+ )
51
+
52
+ from pandas.core.frame import DataFrame
53
+ from pandas.core.groupby import grouper
54
+ from pandas.core.indexes.api import (
55
+ CategoricalIndex,
56
+ Index,
57
+ MultiIndex,
58
+ ensure_index,
59
+ )
60
+ from pandas.core.series import Series
61
+ from pandas.core.sorting import (
62
+ compress_group_index,
63
+ decons_obs_group_ids,
64
+ get_flattened_list,
65
+ get_group_index,
66
+ get_group_index_sorter,
67
+ get_indexer_dict,
68
+ )
69
+
70
+ if TYPE_CHECKING:
71
+ from collections.abc import (
72
+ Hashable,
73
+ Iterator,
74
+ Sequence,
75
+ )
76
+
77
+ from pandas.core.generic import NDFrame
78
+
79
+
80
+ def check_result_array(obj, dtype) -> None:
81
+ # Our operation is supposed to be an aggregation/reduction. If
82
+ # it returns an ndarray, this likely means an invalid operation has
83
+ # been passed. See test_apply_without_aggregation, test_agg_must_agg
84
+ if isinstance(obj, np.ndarray):
85
+ if dtype != object:
86
+ # If it is object dtype, the function can be a reduction/aggregation
87
+ # and still return an ndarray e.g. test_agg_over_numpy_arrays
88
+ raise ValueError("Must produce aggregated value")
89
+
90
+
91
+ def extract_result(res):
92
+ """
93
+ Extract the result object, it might be a 0-dim ndarray
94
+ or a len-1 0-dim, or a scalar
95
+ """
96
+ if hasattr(res, "_values"):
97
+ # Preserve EA
98
+ res = res._values
99
+ if res.ndim == 1 and len(res) == 1:
100
+ # see test_agg_lambda_with_timezone, test_resampler_grouper.py::test_apply
101
+ res = res[0]
102
+ return res
103
+
104
+
105
+ class WrappedCythonOp:
106
+ """
107
+ Dispatch logic for functions defined in _libs.groupby
108
+
109
+ Parameters
110
+ ----------
111
+ kind: str
112
+ Whether the operation is an aggregate or transform.
113
+ how: str
114
+ Operation name, e.g. "mean".
115
+ has_dropped_na: bool
116
+ True precisely when dropna=True and the grouper contains a null value.
117
+ """
118
+
119
+ # Functions for which we do _not_ attempt to cast the cython result
120
+ # back to the original dtype.
121
+ cast_blocklist = frozenset(
122
+ ["any", "all", "rank", "count", "size", "idxmin", "idxmax"]
123
+ )
124
+
125
+ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
126
+ self.kind = kind
127
+ self.how = how
128
+ self.has_dropped_na = has_dropped_na
129
+
130
+ _CYTHON_FUNCTIONS: dict[str, dict] = {
131
+ "aggregate": {
132
+ "any": functools.partial(libgroupby.group_any_all, val_test="any"),
133
+ "all": functools.partial(libgroupby.group_any_all, val_test="all"),
134
+ "sum": "group_sum",
135
+ "prod": "group_prod",
136
+ "idxmin": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmin"),
137
+ "idxmax": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmax"),
138
+ "min": "group_min",
139
+ "max": "group_max",
140
+ "mean": "group_mean",
141
+ "median": "group_median_float64",
142
+ "var": "group_var",
143
+ "std": functools.partial(libgroupby.group_var, name="std"),
144
+ "sem": functools.partial(libgroupby.group_var, name="sem"),
145
+ "skew": "group_skew",
146
+ "first": "group_nth",
147
+ "last": "group_last",
148
+ "ohlc": "group_ohlc",
149
+ },
150
+ "transform": {
151
+ "cumprod": "group_cumprod",
152
+ "cumsum": "group_cumsum",
153
+ "cummin": "group_cummin",
154
+ "cummax": "group_cummax",
155
+ "rank": "group_rank",
156
+ },
157
+ }
158
+
159
+ _cython_arity = {"ohlc": 4} # OHLC
160
+
161
+ @classmethod
162
+ def get_kind_from_how(cls, how: str) -> str:
163
+ if how in cls._CYTHON_FUNCTIONS["aggregate"]:
164
+ return "aggregate"
165
+ return "transform"
166
+
167
+ # Note: we make this a classmethod and pass kind+how so that caching
168
+ # works at the class level and not the instance level
169
+ @classmethod
170
+ @functools.cache
171
+ def _get_cython_function(
172
+ cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
173
+ ):
174
+ dtype_str = dtype.name
175
+ ftype = cls._CYTHON_FUNCTIONS[kind][how]
176
+
177
+ # see if there is a fused-type version of function
178
+ # only valid for numeric
179
+ if callable(ftype):
180
+ f = ftype
181
+ else:
182
+ f = getattr(libgroupby, ftype)
183
+ if is_numeric:
184
+ return f
185
+ elif dtype == np.dtype(object):
186
+ if how in ["median", "cumprod"]:
187
+ # no fused types -> no __signatures__
188
+ raise NotImplementedError(
189
+ f"function is not implemented for this dtype: "
190
+ f"[how->{how},dtype->{dtype_str}]"
191
+ )
192
+ elif how in ["std", "sem", "idxmin", "idxmax"]:
193
+ # We have a partial object that does not have __signatures__
194
+ return f
195
+ elif how == "skew":
196
+ # _get_cython_vals will convert to float64
197
+ pass
198
+ elif "object" not in f.__signatures__:
199
+ # raise NotImplementedError here rather than TypeError later
200
+ raise NotImplementedError(
201
+ f"function is not implemented for this dtype: "
202
+ f"[how->{how},dtype->{dtype_str}]"
203
+ )
204
+ return f
205
+ else:
206
+ raise NotImplementedError(
207
+ "This should not be reached. Please report a bug at "
208
+ "github.com/pandas-dev/pandas/",
209
+ dtype,
210
+ )
211
+
212
+ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:
213
+ """
214
+ Cast numeric dtypes to float64 for functions that only support that.
215
+
216
+ Parameters
217
+ ----------
218
+ values : np.ndarray
219
+
220
+ Returns
221
+ -------
222
+ values : np.ndarray
223
+ """
224
+ how = self.how
225
+
226
+ if how in ["median", "std", "sem", "skew"]:
227
+ # median only has a float64 implementation
228
+ # We should only get here with is_numeric, as non-numeric cases
229
+ # should raise in _get_cython_function
230
+ values = ensure_float64(values)
231
+
232
+ elif values.dtype.kind in "iu":
233
+ if how in ["var", "mean"] or (
234
+ self.kind == "transform" and self.has_dropped_na
235
+ ):
236
+ # has_dropped_na check need for test_null_group_str_transformer
237
+ # result may still include NaN, so we have to cast
238
+ values = ensure_float64(values)
239
+
240
+ elif how in ["sum", "ohlc", "prod", "cumsum", "cumprod"]:
241
+ # Avoid overflow during group op
242
+ if values.dtype.kind == "i":
243
+ values = ensure_int64(values)
244
+ else:
245
+ values = ensure_uint64(values)
246
+
247
+ return values
248
+
249
+ def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
250
+ how = self.how
251
+ kind = self.kind
252
+
253
+ arity = self._cython_arity.get(how, 1)
254
+
255
+ out_shape: Shape
256
+ if how == "ohlc":
257
+ out_shape = (ngroups, arity)
258
+ elif arity > 1:
259
+ raise NotImplementedError(
260
+ "arity of more than 1 is not supported for the 'how' argument"
261
+ )
262
+ elif kind == "transform":
263
+ out_shape = values.shape
264
+ else:
265
+ out_shape = (ngroups,) + values.shape[1:]
266
+ return out_shape
267
+
268
+ def _get_out_dtype(self, dtype: np.dtype) -> np.dtype:
269
+ how = self.how
270
+
271
+ if how == "rank":
272
+ out_dtype = "float64"
273
+ elif how in ["idxmin", "idxmax"]:
274
+ # The Cython implementation only produces the row number; we'll take
275
+ # from the index using this in post processing
276
+ out_dtype = "intp"
277
+ else:
278
+ if dtype.kind in "iufcb":
279
+ out_dtype = f"{dtype.kind}{dtype.itemsize}"
280
+ else:
281
+ out_dtype = "object"
282
+ return np.dtype(out_dtype)
283
+
284
+ def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
285
+ """
286
+ Get the desired dtype of a result based on the
287
+ input dtype and how it was computed.
288
+
289
+ Parameters
290
+ ----------
291
+ dtype : np.dtype
292
+
293
+ Returns
294
+ -------
295
+ np.dtype
296
+ The desired dtype of the result.
297
+ """
298
+ how = self.how
299
+
300
+ if how in ["sum", "cumsum", "sum", "prod", "cumprod"]:
301
+ if dtype == np.dtype(bool):
302
+ return np.dtype(np.int64)
303
+ elif how in ["mean", "median", "var", "std", "sem"]:
304
+ if dtype.kind in "fc":
305
+ return dtype
306
+ elif dtype.kind in "iub":
307
+ return np.dtype(np.float64)
308
+ return dtype
309
+
310
+ @final
311
+ def _cython_op_ndim_compat(
312
+ self,
313
+ values: np.ndarray,
314
+ *,
315
+ min_count: int,
316
+ ngroups: int,
317
+ comp_ids: np.ndarray,
318
+ mask: npt.NDArray[np.bool_] | None = None,
319
+ result_mask: npt.NDArray[np.bool_] | None = None,
320
+ **kwargs,
321
+ ) -> np.ndarray:
322
+ if values.ndim == 1:
323
+ # expand to 2d, dispatch, then squeeze if appropriate
324
+ values2d = values[None, :]
325
+ if mask is not None:
326
+ mask = mask[None, :]
327
+ if result_mask is not None:
328
+ result_mask = result_mask[None, :]
329
+ res = self._call_cython_op(
330
+ values2d,
331
+ min_count=min_count,
332
+ ngroups=ngroups,
333
+ comp_ids=comp_ids,
334
+ mask=mask,
335
+ result_mask=result_mask,
336
+ **kwargs,
337
+ )
338
+ if res.shape[0] == 1:
339
+ return res[0]
340
+
341
+ # otherwise we have OHLC
342
+ return res.T
343
+
344
+ return self._call_cython_op(
345
+ values,
346
+ min_count=min_count,
347
+ ngroups=ngroups,
348
+ comp_ids=comp_ids,
349
+ mask=mask,
350
+ result_mask=result_mask,
351
+ **kwargs,
352
+ )
353
+
354
+ @final
355
+ def _call_cython_op(
356
+ self,
357
+ values: np.ndarray, # np.ndarray[ndim=2]
358
+ *,
359
+ min_count: int,
360
+ ngroups: int,
361
+ comp_ids: np.ndarray,
362
+ mask: npt.NDArray[np.bool_] | None,
363
+ result_mask: npt.NDArray[np.bool_] | None,
364
+ **kwargs,
365
+ ) -> np.ndarray: # np.ndarray[ndim=2]
366
+ orig_values = values
367
+
368
+ dtype = values.dtype
369
+ is_numeric = dtype.kind in "iufcb"
370
+
371
+ is_datetimelike = dtype.kind in "mM"
372
+
373
+ if is_datetimelike:
374
+ values = values.view("int64")
375
+ is_numeric = True
376
+ elif dtype.kind == "b":
377
+ values = values.view("uint8")
378
+ if values.dtype == "float16":
379
+ values = values.astype(np.float32)
380
+
381
+ if self.how in ["any", "all"]:
382
+ if mask is None:
383
+ mask = isna(values)
384
+ if dtype == object:
385
+ if kwargs["skipna"]:
386
+ # GH#37501: don't raise on pd.NA when skipna=True
387
+ if mask.any():
388
+ # mask on original values computed separately
389
+ values = values.copy()
390
+ values[mask] = True
391
+ values = values.astype(bool, copy=False).view(np.int8)
392
+ is_numeric = True
393
+
394
+ values = values.T
395
+ if mask is not None:
396
+ mask = mask.T
397
+ if result_mask is not None:
398
+ result_mask = result_mask.T
399
+
400
+ out_shape = self._get_output_shape(ngroups, values)
401
+ func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric)
402
+ values = self._get_cython_vals(values)
403
+ out_dtype = self._get_out_dtype(values.dtype)
404
+
405
+ result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
406
+ if self.kind == "aggregate":
407
+ counts = np.zeros(ngroups, dtype=np.int64)
408
+ if self.how in [
409
+ "idxmin",
410
+ "idxmax",
411
+ "min",
412
+ "max",
413
+ "mean",
414
+ "last",
415
+ "first",
416
+ "sum",
417
+ ]:
418
+ func(
419
+ out=result,
420
+ counts=counts,
421
+ values=values,
422
+ labels=comp_ids,
423
+ min_count=min_count,
424
+ mask=mask,
425
+ result_mask=result_mask,
426
+ is_datetimelike=is_datetimelike,
427
+ **kwargs,
428
+ )
429
+ elif self.how in ["sem", "std", "var", "ohlc", "prod", "median"]:
430
+ if self.how in ["std", "sem"]:
431
+ kwargs["is_datetimelike"] = is_datetimelike
432
+ func(
433
+ result,
434
+ counts,
435
+ values,
436
+ comp_ids,
437
+ min_count=min_count,
438
+ mask=mask,
439
+ result_mask=result_mask,
440
+ **kwargs,
441
+ )
442
+ elif self.how in ["any", "all"]:
443
+ func(
444
+ out=result,
445
+ values=values,
446
+ labels=comp_ids,
447
+ mask=mask,
448
+ result_mask=result_mask,
449
+ **kwargs,
450
+ )
451
+ result = result.astype(bool, copy=False)
452
+ elif self.how in ["skew"]:
453
+ func(
454
+ out=result,
455
+ counts=counts,
456
+ values=values,
457
+ labels=comp_ids,
458
+ mask=mask,
459
+ result_mask=result_mask,
460
+ **kwargs,
461
+ )
462
+ if dtype == object:
463
+ result = result.astype(object)
464
+
465
+ else:
466
+ raise NotImplementedError(f"{self.how} is not implemented")
467
+ else:
468
+ # TODO: min_count
469
+ if self.how != "rank":
470
+ # TODO: should rank take result_mask?
471
+ kwargs["result_mask"] = result_mask
472
+ func(
473
+ out=result,
474
+ values=values,
475
+ labels=comp_ids,
476
+ ngroups=ngroups,
477
+ is_datetimelike=is_datetimelike,
478
+ mask=mask,
479
+ **kwargs,
480
+ )
481
+
482
+ if self.kind == "aggregate" and self.how not in ["idxmin", "idxmax"]:
483
+ # i.e. counts is defined. Locations where count<min_count
484
+ # need to have the result set to np.nan, which may require casting,
485
+ # see GH#40767. For idxmin/idxmax is handled specially via post-processing
486
+ if result.dtype.kind in "iu" and not is_datetimelike:
487
+ # if the op keeps the int dtypes, we have to use 0
488
+ cutoff = max(0 if self.how in ["sum", "prod"] else 1, min_count)
489
+ empty_groups = counts < cutoff
490
+ if empty_groups.any():
491
+ if result_mask is not None:
492
+ assert result_mask[empty_groups].all()
493
+ else:
494
+ # Note: this conversion could be lossy, see GH#40767
495
+ result = result.astype("float64")
496
+ result[empty_groups] = np.nan
497
+
498
+ result = result.T
499
+
500
+ if self.how not in self.cast_blocklist:
501
+ # e.g. if we are int64 and need to restore to datetime64/timedelta64
502
+ # "rank" is the only member of cast_blocklist we get here
503
+ # Casting only needed for float16, bool, datetimelike,
504
+ # and self.how in ["sum", "prod", "ohlc", "cumprod"]
505
+ res_dtype = self._get_result_dtype(orig_values.dtype)
506
+ op_result = maybe_downcast_to_dtype(result, res_dtype)
507
+ else:
508
+ op_result = result
509
+
510
+ return op_result
511
+
512
+ @final
513
+ def _validate_axis(self, axis: AxisInt, values: ArrayLike) -> None:
514
+ if values.ndim > 2:
515
+ raise NotImplementedError("number of dimensions is currently limited to 2")
516
+ if values.ndim == 2:
517
+ assert axis == 1, axis
518
+ elif not is_1d_only_ea_dtype(values.dtype):
519
+ # Note: it is *not* the case that axis is always 0 for 1-dim values,
520
+ # as we can have 1D ExtensionArrays that we need to treat as 2D
521
+ assert axis == 0
522
+
523
+ @final
524
+ def cython_operation(
525
+ self,
526
+ *,
527
+ values: ArrayLike,
528
+ axis: AxisInt,
529
+ min_count: int = -1,
530
+ comp_ids: np.ndarray,
531
+ ngroups: int,
532
+ **kwargs,
533
+ ) -> ArrayLike:
534
+ """
535
+ Call our cython function, with appropriate pre- and post- processing.
536
+ """
537
+ self._validate_axis(axis, values)
538
+
539
+ if not isinstance(values, np.ndarray):
540
+ # i.e. ExtensionArray
541
+ return values._groupby_op(
542
+ how=self.how,
543
+ has_dropped_na=self.has_dropped_na,
544
+ min_count=min_count,
545
+ ngroups=ngroups,
546
+ ids=comp_ids,
547
+ **kwargs,
548
+ )
549
+
550
+ return self._cython_op_ndim_compat(
551
+ values,
552
+ min_count=min_count,
553
+ ngroups=ngroups,
554
+ comp_ids=comp_ids,
555
+ mask=None,
556
+ **kwargs,
557
+ )
558
+
559
+
560
+ class BaseGrouper:
561
+ """
562
+ This is an internal Grouper class, which actually holds
563
+ the generated groups
564
+
565
+ Parameters
566
+ ----------
567
+ axis : Index
568
+ groupings : Sequence[Grouping]
569
+ all the grouping instances to handle in this grouper
570
+ for example for grouper list to groupby, need to pass the list
571
+ sort : bool, default True
572
+ whether this grouper will give sorted result or not
573
+
574
+ """
575
+
576
+ axis: Index
577
+
578
+ def __init__(
579
+ self,
580
+ axis: Index,
581
+ groupings: Sequence[grouper.Grouping],
582
+ sort: bool = True,
583
+ dropna: bool = True,
584
+ ) -> None:
585
+ assert isinstance(axis, Index), axis
586
+
587
+ self.axis = axis
588
+ self._groupings: list[grouper.Grouping] = list(groupings)
589
+ self._sort = sort
590
+ self.dropna = dropna
591
+
592
+ @property
593
+ def groupings(self) -> list[grouper.Grouping]:
594
+ return self._groupings
595
+
596
+ @property
597
+ def shape(self) -> Shape:
598
+ return tuple(ping.ngroups for ping in self.groupings)
599
+
600
+ def __iter__(self) -> Iterator[Hashable]:
601
+ return iter(self.indices)
602
+
603
+ @property
604
+ def nkeys(self) -> int:
605
+ return len(self.groupings)
606
+
607
+ def get_iterator(
608
+ self, data: NDFrameT, axis: AxisInt = 0
609
+ ) -> Iterator[tuple[Hashable, NDFrameT]]:
610
+ """
611
+ Groupby iterator
612
+
613
+ Returns
614
+ -------
615
+ Generator yielding sequence of (name, subsetted object)
616
+ for each group
617
+ """
618
+ splitter = self._get_splitter(data, axis=axis)
619
+ keys = self.group_keys_seq
620
+ yield from zip(keys, splitter)
621
+
622
+ @final
623
+ def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter:
624
+ """
625
+ Returns
626
+ -------
627
+ Generator yielding subsetted objects
628
+ """
629
+ ids, _, ngroups = self.group_info
630
+ return _get_splitter(
631
+ data,
632
+ ids,
633
+ ngroups,
634
+ sorted_ids=self._sorted_ids,
635
+ sort_idx=self._sort_idx,
636
+ axis=axis,
637
+ )
638
+
639
+ @final
640
+ @cache_readonly
641
+ def group_keys_seq(self):
642
+ if len(self.groupings) == 1:
643
+ return self.levels[0]
644
+ else:
645
+ ids, _, ngroups = self.group_info
646
+
647
+ # provide "flattened" iterator for multi-group setting
648
+ return get_flattened_list(ids, ngroups, self.levels, self.codes)
649
+
650
+ @cache_readonly
651
+ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
652
+ """dict {group name -> group indices}"""
653
+ if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):
654
+ # This shows unused categories in indices GH#38642
655
+ return self.groupings[0].indices
656
+ codes_list = [ping.codes for ping in self.groupings]
657
+ keys = [ping._group_index for ping in self.groupings]
658
+ return get_indexer_dict(codes_list, keys)
659
+
660
+ @final
661
+ def result_ilocs(self) -> npt.NDArray[np.intp]:
662
+ """
663
+ Get the original integer locations of result_index in the input.
664
+ """
665
+ # Original indices are where group_index would go via sorting.
666
+ # But when dropna is true, we need to remove null values while accounting for
667
+ # any gaps that then occur because of them.
668
+ group_index = get_group_index(
669
+ self.codes, self.shape, sort=self._sort, xnull=True
670
+ )
671
+ group_index, _ = compress_group_index(group_index, sort=self._sort)
672
+
673
+ if self.has_dropped_na:
674
+ mask = np.where(group_index >= 0)
675
+ # Count how many gaps are caused by previous null values for each position
676
+ null_gaps = np.cumsum(group_index == -1)[mask]
677
+ group_index = group_index[mask]
678
+
679
+ result = get_group_index_sorter(group_index, self.ngroups)
680
+
681
+ if self.has_dropped_na:
682
+ # Shift by the number of prior null gaps
683
+ result += np.take(null_gaps, result)
684
+
685
+ return result
686
+
687
+ @final
688
+ @property
689
+ def codes(self) -> list[npt.NDArray[np.signedinteger]]:
690
+ return [ping.codes for ping in self.groupings]
691
+
692
+ @property
693
+ def levels(self) -> list[Index]:
694
+ return [ping._group_index for ping in self.groupings]
695
+
696
+ @property
697
+ def names(self) -> list[Hashable]:
698
+ return [ping.name for ping in self.groupings]
699
+
700
+ @final
701
+ def size(self) -> Series:
702
+ """
703
+ Compute group sizes.
704
+ """
705
+ ids, _, ngroups = self.group_info
706
+ out: np.ndarray | list
707
+ if ngroups:
708
+ out = np.bincount(ids[ids != -1], minlength=ngroups)
709
+ else:
710
+ out = []
711
+ return Series(out, index=self.result_index, dtype="int64", copy=False)
712
+
713
+ @cache_readonly
714
+ def groups(self) -> dict[Hashable, np.ndarray]:
715
+ """dict {group name -> group labels}"""
716
+ if len(self.groupings) == 1:
717
+ return self.groupings[0].groups
718
+ else:
719
+ to_groupby = []
720
+ for ping in self.groupings:
721
+ gv = ping.grouping_vector
722
+ if not isinstance(gv, BaseGrouper):
723
+ to_groupby.append(gv)
724
+ else:
725
+ to_groupby.append(gv.groupings[0].grouping_vector)
726
+ index = MultiIndex.from_arrays(to_groupby)
727
+ return self.axis.groupby(index)
728
+
729
+ @final
730
+ @cache_readonly
731
+ def is_monotonic(self) -> bool:
732
+ # return if my group orderings are monotonic
733
+ return Index(self.group_info[0]).is_monotonic_increasing
734
+
735
+ @final
736
+ @cache_readonly
737
+ def has_dropped_na(self) -> bool:
738
+ """
739
+ Whether grouper has null value(s) that are dropped.
740
+ """
741
+ return bool((self.group_info[0] < 0).any())
742
+
743
+ @cache_readonly
744
+ def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
745
+ comp_ids, obs_group_ids = self._get_compressed_codes()
746
+
747
+ ngroups = len(obs_group_ids)
748
+ comp_ids = ensure_platform_int(comp_ids)
749
+
750
+ return comp_ids, obs_group_ids, ngroups
751
+
752
+ @cache_readonly
753
+ def codes_info(self) -> npt.NDArray[np.intp]:
754
+ # return the codes of items in original grouped axis
755
+ ids, _, _ = self.group_info
756
+ return ids
757
+
758
+ @final
759
+ def _get_compressed_codes(
760
+ self,
761
+ ) -> tuple[npt.NDArray[np.signedinteger], npt.NDArray[np.intp]]:
762
+ # The first returned ndarray may have any signed integer dtype
763
+ if len(self.groupings) > 1:
764
+ group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)
765
+ return compress_group_index(group_index, sort=self._sort)
766
+ # FIXME: compress_group_index's second return value is int64, not intp
767
+
768
+ ping = self.groupings[0]
769
+ return ping.codes, np.arange(len(ping._group_index), dtype=np.intp)
770
+
771
+ @final
772
+ @cache_readonly
773
+ def ngroups(self) -> int:
774
+ return len(self.result_index)
775
+
776
+ @property
777
+ def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]:
778
+ codes = self.codes
779
+ ids, obs_ids, _ = self.group_info
780
+ return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)
781
+
782
+ @cache_readonly
783
+ def result_index(self) -> Index:
784
+ if len(self.groupings) == 1:
785
+ return self.groupings[0]._result_index.rename(self.names[0])
786
+
787
+ codes = self.reconstructed_codes
788
+ levels = [ping._result_index for ping in self.groupings]
789
+ return MultiIndex(
790
+ levels=levels, codes=codes, verify_integrity=False, names=self.names
791
+ )
792
+
793
+ @final
794
+ def get_group_levels(self) -> list[ArrayLike]:
795
+ # Note: only called from _insert_inaxis_grouper, which
796
+ # is only called for BaseGrouper, never for BinGrouper
797
+ if len(self.groupings) == 1:
798
+ return [self.groupings[0]._group_arraylike]
799
+
800
+ name_list = []
801
+ for ping, codes in zip(self.groupings, self.reconstructed_codes):
802
+ codes = ensure_platform_int(codes)
803
+ levels = ping._group_arraylike.take(codes)
804
+
805
+ name_list.append(levels)
806
+
807
+ return name_list
808
+
809
+ # ------------------------------------------------------------
810
+ # Aggregation functions
811
+
812
+ @final
813
+ def _cython_operation(
814
+ self,
815
+ kind: str,
816
+ values,
817
+ how: str,
818
+ axis: AxisInt,
819
+ min_count: int = -1,
820
+ **kwargs,
821
+ ) -> ArrayLike:
822
+ """
823
+ Returns the values of a cython operation.
824
+ """
825
+ assert kind in ["transform", "aggregate"]
826
+
827
+ cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na)
828
+
829
+ ids, _, _ = self.group_info
830
+ ngroups = self.ngroups
831
+ return cy_op.cython_operation(
832
+ values=values,
833
+ axis=axis,
834
+ min_count=min_count,
835
+ comp_ids=ids,
836
+ ngroups=ngroups,
837
+ **kwargs,
838
+ )
839
+
840
+ @final
841
+ def agg_series(
842
+ self, obj: Series, func: Callable, preserve_dtype: bool = False
843
+ ) -> ArrayLike:
844
+ """
845
+ Parameters
846
+ ----------
847
+ obj : Series
848
+ func : function taking a Series and returning a scalar-like
849
+ preserve_dtype : bool
850
+ Whether the aggregation is known to be dtype-preserving.
851
+
852
+ Returns
853
+ -------
854
+ np.ndarray or ExtensionArray
855
+ """
856
+
857
+ if not isinstance(obj._values, np.ndarray):
858
+ # we can preserve a little bit more aggressively with EA dtype
859
+ # because maybe_cast_pointwise_result will do a try/except
860
+ # with _from_sequence. NB we are assuming here that _from_sequence
861
+ # is sufficiently strict that it casts appropriately.
862
+ preserve_dtype = True
863
+
864
+ result = self._aggregate_series_pure_python(obj, func)
865
+
866
+ npvalues = lib.maybe_convert_objects(result, try_float=False)
867
+ if preserve_dtype:
868
+ out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)
869
+ else:
870
+ out = npvalues
871
+ return out
872
+
873
+ @final
874
+ def _aggregate_series_pure_python(
875
+ self, obj: Series, func: Callable
876
+ ) -> npt.NDArray[np.object_]:
877
+ _, _, ngroups = self.group_info
878
+
879
+ result = np.empty(ngroups, dtype="O")
880
+ initialized = False
881
+
882
+ splitter = self._get_splitter(obj, axis=0)
883
+
884
+ for i, group in enumerate(splitter):
885
+ res = func(group)
886
+ res = extract_result(res)
887
+
888
+ if not initialized:
889
+ # We only do this validation on the first iteration
890
+ check_result_array(res, group.dtype)
891
+ initialized = True
892
+
893
+ result[i] = res
894
+
895
+ return result
896
+
897
+ @final
898
+ def apply_groupwise(
899
+ self, f: Callable, data: DataFrame | Series, axis: AxisInt = 0
900
+ ) -> tuple[list, bool]:
901
+ mutated = False
902
+ splitter = self._get_splitter(data, axis=axis)
903
+ group_keys = self.group_keys_seq
904
+ result_values = []
905
+
906
+ # This calls DataSplitter.__iter__
907
+ zipped = zip(group_keys, splitter)
908
+
909
+ for key, group in zipped:
910
+ # Pinning name is needed for
911
+ # test_group_apply_once_per_group,
912
+ # test_inconsistent_return_type, test_set_group_name,
913
+ # test_group_name_available_in_inference_pass,
914
+ # test_groupby_multi_timezone
915
+ object.__setattr__(group, "name", key)
916
+
917
+ # group might be modified
918
+ group_axes = group.axes
919
+ res = f(group)
920
+ if not mutated and not _is_indexed_like(res, group_axes, axis):
921
+ mutated = True
922
+ result_values.append(res)
923
+ # getattr pattern for __name__ is needed for functools.partial objects
924
+ if len(group_keys) == 0 and getattr(f, "__name__", None) in [
925
+ "skew",
926
+ "sum",
927
+ "prod",
928
+ ]:
929
+ # If group_keys is empty, then no function calls have been made,
930
+ # so we will not have raised even if this is an invalid dtype.
931
+ # So do one dummy call here to raise appropriate TypeError.
932
+ f(data.iloc[:0])
933
+
934
+ return result_values, mutated
935
+
936
+ # ------------------------------------------------------------
937
+ # Methods for sorting subsets of our GroupBy's object
938
+
939
+ @final
940
+ @cache_readonly
941
+ def _sort_idx(self) -> npt.NDArray[np.intp]:
942
+ # Counting sort indexer
943
+ ids, _, ngroups = self.group_info
944
+ return get_group_index_sorter(ids, ngroups)
945
+
946
+ @final
947
+ @cache_readonly
948
+ def _sorted_ids(self) -> npt.NDArray[np.intp]:
949
+ ids, _, _ = self.group_info
950
+ return ids.take(self._sort_idx)
951
+
952
+
953
+ class BinGrouper(BaseGrouper):
954
+ """
955
+ This is an internal Grouper class
956
+
957
+ Parameters
958
+ ----------
959
+ bins : the split index of binlabels to group the item of axis
960
+ binlabels : the label list
961
+ indexer : np.ndarray[np.intp], optional
962
+ the indexer created by Grouper
963
+ some groupers (TimeGrouper) will sort its axis and its
964
+ group_info is also sorted, so need the indexer to reorder
965
+
966
+ Examples
967
+ --------
968
+ bins: [2, 4, 6, 8, 10]
969
+ binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
970
+ '2005-01-05', '2005-01-07', '2005-01-09'],
971
+ dtype='datetime64[ns]', freq='2D')
972
+
973
+ the group_info, which contains the label of each item in grouped
974
+ axis, the index of label in label list, group number, is
975
+
976
+ (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
977
+
978
+ means that, the grouped axis has 10 items, can be grouped into 5
979
+ labels, the first and second items belong to the first label, the
980
+ third and forth items belong to the second label, and so on
981
+
982
+ """
983
+
984
+ bins: npt.NDArray[np.int64]
985
+ binlabels: Index
986
+
987
+ def __init__(
988
+ self,
989
+ bins,
990
+ binlabels,
991
+ indexer=None,
992
+ ) -> None:
993
+ self.bins = ensure_int64(bins)
994
+ self.binlabels = ensure_index(binlabels)
995
+ self.indexer = indexer
996
+
997
+ # These lengths must match, otherwise we could call agg_series
998
+ # with empty self.bins, which would raise later.
999
+ assert len(self.binlabels) == len(self.bins)
1000
+
1001
+ @cache_readonly
1002
+ def groups(self):
1003
+ """dict {group name -> group labels}"""
1004
+ # this is mainly for compat
1005
+ # GH 3881
1006
+ result = {
1007
+ key: value
1008
+ for key, value in zip(self.binlabels, self.bins)
1009
+ if key is not NaT
1010
+ }
1011
+ return result
1012
+
1013
+ @property
1014
+ def nkeys(self) -> int:
1015
+ # still matches len(self.groupings), but we can hard-code
1016
+ return 1
1017
+
1018
+ @cache_readonly
1019
+ def codes_info(self) -> npt.NDArray[np.intp]:
1020
+ # return the codes of items in original grouped axis
1021
+ ids, _, _ = self.group_info
1022
+ if self.indexer is not None:
1023
+ sorter = np.lexsort((ids, self.indexer))
1024
+ ids = ids[sorter]
1025
+ return ids
1026
+
1027
+ def get_iterator(self, data: NDFrame, axis: AxisInt = 0):
1028
+ """
1029
+ Groupby iterator
1030
+
1031
+ Returns
1032
+ -------
1033
+ Generator yielding sequence of (name, subsetted object)
1034
+ for each group
1035
+ """
1036
+ if axis == 0:
1037
+ slicer = lambda start, edge: data.iloc[start:edge]
1038
+ else:
1039
+ slicer = lambda start, edge: data.iloc[:, start:edge]
1040
+
1041
+ length = len(data.axes[axis])
1042
+
1043
+ start = 0
1044
+ for edge, label in zip(self.bins, self.binlabels):
1045
+ if label is not NaT:
1046
+ yield label, slicer(start, edge)
1047
+ start = edge
1048
+
1049
+ if start < length:
1050
+ yield self.binlabels[-1], slicer(start, None)
1051
+
1052
+ @cache_readonly
1053
+ def indices(self):
1054
+ indices = collections.defaultdict(list)
1055
+
1056
+ i = 0
1057
+ for label, bin in zip(self.binlabels, self.bins):
1058
+ if i < bin:
1059
+ if label is not NaT:
1060
+ indices[label] = list(range(i, bin))
1061
+ i = bin
1062
+ return indices
1063
+
1064
+ @cache_readonly
1065
+ def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
1066
+ ngroups = self.ngroups
1067
+ obs_group_ids = np.arange(ngroups, dtype=np.intp)
1068
+ rep = np.diff(np.r_[0, self.bins])
1069
+
1070
+ rep = ensure_platform_int(rep)
1071
+ if ngroups == len(self.bins):
1072
+ comp_ids = np.repeat(np.arange(ngroups), rep)
1073
+ else:
1074
+ comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
1075
+
1076
+ return (
1077
+ ensure_platform_int(comp_ids),
1078
+ obs_group_ids,
1079
+ ngroups,
1080
+ )
1081
+
1082
+ @cache_readonly
1083
+ def reconstructed_codes(self) -> list[np.ndarray]:
1084
+ # get unique result indices, and prepend 0 as groupby starts from the first
1085
+ return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
1086
+
1087
+ @cache_readonly
1088
+ def result_index(self) -> Index:
1089
+ if len(self.binlabels) != 0 and isna(self.binlabels[0]):
1090
+ return self.binlabels[1:]
1091
+
1092
+ return self.binlabels
1093
+
1094
+ @property
1095
+ def levels(self) -> list[Index]:
1096
+ return [self.binlabels]
1097
+
1098
+ @property
1099
+ def names(self) -> list[Hashable]:
1100
+ return [self.binlabels.name]
1101
+
1102
+ @property
1103
+ def groupings(self) -> list[grouper.Grouping]:
1104
+ lev = self.binlabels
1105
+ codes = self.group_info[0]
1106
+ labels = lev.take(codes)
1107
+ ping = grouper.Grouping(
1108
+ labels, labels, in_axis=False, level=None, uniques=lev._values
1109
+ )
1110
+ return [ping]
1111
+
1112
+
1113
+ def _is_indexed_like(obj, axes, axis: AxisInt) -> bool:
1114
+ if isinstance(obj, Series):
1115
+ if len(axes) > 1:
1116
+ return False
1117
+ return obj.axes[axis].equals(axes[axis])
1118
+ elif isinstance(obj, DataFrame):
1119
+ return obj.axes[axis].equals(axes[axis])
1120
+
1121
+ return False
1122
+
1123
+
1124
+ # ----------------------------------------------------------------------
1125
+ # Splitting / application
1126
+
1127
+
1128
+ class DataSplitter(Generic[NDFrameT]):
1129
+ def __init__(
1130
+ self,
1131
+ data: NDFrameT,
1132
+ labels: npt.NDArray[np.intp],
1133
+ ngroups: int,
1134
+ *,
1135
+ sort_idx: npt.NDArray[np.intp],
1136
+ sorted_ids: npt.NDArray[np.intp],
1137
+ axis: AxisInt = 0,
1138
+ ) -> None:
1139
+ self.data = data
1140
+ self.labels = ensure_platform_int(labels) # _should_ already be np.intp
1141
+ self.ngroups = ngroups
1142
+
1143
+ self._slabels = sorted_ids
1144
+ self._sort_idx = sort_idx
1145
+
1146
+ self.axis = axis
1147
+ assert isinstance(axis, int), axis
1148
+
1149
+ def __iter__(self) -> Iterator:
1150
+ sdata = self._sorted_data
1151
+
1152
+ if self.ngroups == 0:
1153
+ # we are inside a generator, rather than raise StopIteration
1154
+ # we merely return signal the end
1155
+ return
1156
+
1157
+ starts, ends = lib.generate_slices(self._slabels, self.ngroups)
1158
+
1159
+ for start, end in zip(starts, ends):
1160
+ yield self._chop(sdata, slice(start, end))
1161
+
1162
+ @cache_readonly
1163
+ def _sorted_data(self) -> NDFrameT:
1164
+ return self.data.take(self._sort_idx, axis=self.axis)
1165
+
1166
+ def _chop(self, sdata, slice_obj: slice) -> NDFrame:
1167
+ raise AbstractMethodError(self)
1168
+
1169
+
1170
+ class SeriesSplitter(DataSplitter):
1171
+ def _chop(self, sdata: Series, slice_obj: slice) -> Series:
1172
+ # fastpath equivalent to `sdata.iloc[slice_obj]`
1173
+ mgr = sdata._mgr.get_slice(slice_obj)
1174
+ ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
1175
+ ser._name = sdata.name
1176
+ return ser.__finalize__(sdata, method="groupby")
1177
+
1178
+
1179
+ class FrameSplitter(DataSplitter):
1180
+ def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
1181
+ # Fastpath equivalent to:
1182
+ # if self.axis == 0:
1183
+ # return sdata.iloc[slice_obj]
1184
+ # else:
1185
+ # return sdata.iloc[:, slice_obj]
1186
+ mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)
1187
+ df = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
1188
+ return df.__finalize__(sdata, method="groupby")
1189
+
1190
+
1191
+ def _get_splitter(
1192
+ data: NDFrame,
1193
+ labels: npt.NDArray[np.intp],
1194
+ ngroups: int,
1195
+ *,
1196
+ sort_idx: npt.NDArray[np.intp],
1197
+ sorted_ids: npt.NDArray[np.intp],
1198
+ axis: AxisInt = 0,
1199
+ ) -> DataSplitter:
1200
+ if isinstance(data, Series):
1201
+ klass: type[DataSplitter] = SeriesSplitter
1202
+ else:
1203
+ # i.e. DataFrame
1204
+ klass = FrameSplitter
1205
+
1206
+ return klass(
1207
+ data, labels, ngroups, sort_idx=sort_idx, sorted_ids=sorted_ids, axis=axis
1208
+ )
videollama2/lib/python3.10/site-packages/pandas/core/indexers/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.indexers.utils import (
2
+ check_array_indexer,
3
+ check_key_length,
4
+ check_setitem_lengths,
5
+ disallow_ndim_indexing,
6
+ is_empty_indexer,
7
+ is_list_like_indexer,
8
+ is_scalar_indexer,
9
+ is_valid_positional_slice,
10
+ length_of_indexer,
11
+ maybe_convert_indices,
12
+ unpack_1tuple,
13
+ unpack_tuple_and_ellipses,
14
+ validate_indices,
15
+ )
16
+
17
+ __all__ = [
18
+ "is_valid_positional_slice",
19
+ "is_list_like_indexer",
20
+ "is_scalar_indexer",
21
+ "is_empty_indexer",
22
+ "check_setitem_lengths",
23
+ "validate_indices",
24
+ "maybe_convert_indices",
25
+ "length_of_indexer",
26
+ "disallow_ndim_indexing",
27
+ "unpack_1tuple",
28
+ "check_key_length",
29
+ "check_array_indexer",
30
+ "unpack_tuple_and_ellipses",
31
+ ]
videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (694 Bytes). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
videollama2/lib/python3.10/site-packages/pandas/core/indexers/objects.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Indexer objects for computing start/end window bounds for rolling operations"""
2
+ from __future__ import annotations
3
+
4
+ from datetime import timedelta
5
+
6
+ import numpy as np
7
+
8
+ from pandas._libs.tslibs import BaseOffset
9
+ from pandas._libs.window.indexers import calculate_variable_window_bounds
10
+ from pandas.util._decorators import Appender
11
+
12
+ from pandas.core.dtypes.common import ensure_platform_int
13
+
14
+ from pandas.core.indexes.datetimes import DatetimeIndex
15
+
16
+ from pandas.tseries.offsets import Nano
17
+
18
+ get_window_bounds_doc = """
19
+ Computes the bounds of a window.
20
+
21
+ Parameters
22
+ ----------
23
+ num_values : int, default 0
24
+ number of values that will be aggregated over
25
+ window_size : int, default 0
26
+ the number of rows in a window
27
+ min_periods : int, default None
28
+ min_periods passed from the top level rolling API
29
+ center : bool, default None
30
+ center passed from the top level rolling API
31
+ closed : str, default None
32
+ closed passed from the top level rolling API
33
+ step : int, default None
34
+ step passed from the top level rolling API
35
+ .. versionadded:: 1.5
36
+ win_type : str, default None
37
+ win_type passed from the top level rolling API
38
+
39
+ Returns
40
+ -------
41
+ A tuple of ndarray[int64]s, indicating the boundaries of each
42
+ window
43
+ """
44
+
45
+
46
+ class BaseIndexer:
47
+ """
48
+ Base class for window bounds calculations.
49
+
50
+ Examples
51
+ --------
52
+ >>> from pandas.api.indexers import BaseIndexer
53
+ >>> class CustomIndexer(BaseIndexer):
54
+ ... def get_window_bounds(self, num_values, min_periods, center, closed, step):
55
+ ... start = np.empty(num_values, dtype=np.int64)
56
+ ... end = np.empty(num_values, dtype=np.int64)
57
+ ... for i in range(num_values):
58
+ ... start[i] = i
59
+ ... end[i] = i + self.window_size
60
+ ... return start, end
61
+ >>> df = pd.DataFrame({"values": range(5)})
62
+ >>> indexer = CustomIndexer(window_size=2)
63
+ >>> df.rolling(indexer).sum()
64
+ values
65
+ 0 1.0
66
+ 1 3.0
67
+ 2 5.0
68
+ 3 7.0
69
+ 4 4.0
70
+ """
71
+
72
+ def __init__(
73
+ self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs
74
+ ) -> None:
75
+ self.index_array = index_array
76
+ self.window_size = window_size
77
+ # Set user defined kwargs as attributes that can be used in get_window_bounds
78
+ for key, value in kwargs.items():
79
+ setattr(self, key, value)
80
+
81
+ @Appender(get_window_bounds_doc)
82
+ def get_window_bounds(
83
+ self,
84
+ num_values: int = 0,
85
+ min_periods: int | None = None,
86
+ center: bool | None = None,
87
+ closed: str | None = None,
88
+ step: int | None = None,
89
+ ) -> tuple[np.ndarray, np.ndarray]:
90
+ raise NotImplementedError
91
+
92
+
93
+ class FixedWindowIndexer(BaseIndexer):
94
+ """Creates window boundaries that are of fixed length."""
95
+
96
+ @Appender(get_window_bounds_doc)
97
+ def get_window_bounds(
98
+ self,
99
+ num_values: int = 0,
100
+ min_periods: int | None = None,
101
+ center: bool | None = None,
102
+ closed: str | None = None,
103
+ step: int | None = None,
104
+ ) -> tuple[np.ndarray, np.ndarray]:
105
+ if center or self.window_size == 0:
106
+ offset = (self.window_size - 1) // 2
107
+ else:
108
+ offset = 0
109
+
110
+ end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64")
111
+ start = end - self.window_size
112
+ if closed in ["left", "both"]:
113
+ start -= 1
114
+ if closed in ["left", "neither"]:
115
+ end -= 1
116
+
117
+ end = np.clip(end, 0, num_values)
118
+ start = np.clip(start, 0, num_values)
119
+
120
+ return start, end
121
+
122
+
123
+ class VariableWindowIndexer(BaseIndexer):
124
+ """Creates window boundaries that are of variable length, namely for time series."""
125
+
126
+ @Appender(get_window_bounds_doc)
127
+ def get_window_bounds(
128
+ self,
129
+ num_values: int = 0,
130
+ min_periods: int | None = None,
131
+ center: bool | None = None,
132
+ closed: str | None = None,
133
+ step: int | None = None,
134
+ ) -> tuple[np.ndarray, np.ndarray]:
135
+ # error: Argument 4 to "calculate_variable_window_bounds" has incompatible
136
+ # type "Optional[bool]"; expected "bool"
137
+ # error: Argument 6 to "calculate_variable_window_bounds" has incompatible
138
+ # type "Optional[ndarray]"; expected "ndarray"
139
+ return calculate_variable_window_bounds(
140
+ num_values,
141
+ self.window_size,
142
+ min_periods,
143
+ center, # type: ignore[arg-type]
144
+ closed,
145
+ self.index_array, # type: ignore[arg-type]
146
+ )
147
+
148
+
149
+ class VariableOffsetWindowIndexer(BaseIndexer):
150
+ """
151
+ Calculate window boundaries based on a non-fixed offset such as a BusinessDay.
152
+
153
+ Examples
154
+ --------
155
+ >>> from pandas.api.indexers import VariableOffsetWindowIndexer
156
+ >>> df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10))
157
+ >>> offset = pd.offsets.BDay(1)
158
+ >>> indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset)
159
+ >>> df
160
+ 0
161
+ 2020-01-01 0
162
+ 2020-01-02 1
163
+ 2020-01-03 2
164
+ 2020-01-04 3
165
+ 2020-01-05 4
166
+ 2020-01-06 5
167
+ 2020-01-07 6
168
+ 2020-01-08 7
169
+ 2020-01-09 8
170
+ 2020-01-10 9
171
+ >>> df.rolling(indexer).sum()
172
+ 0
173
+ 2020-01-01 0.0
174
+ 2020-01-02 1.0
175
+ 2020-01-03 2.0
176
+ 2020-01-04 3.0
177
+ 2020-01-05 7.0
178
+ 2020-01-06 12.0
179
+ 2020-01-07 6.0
180
+ 2020-01-08 7.0
181
+ 2020-01-09 8.0
182
+ 2020-01-10 9.0
183
+ """
184
+
185
+ def __init__(
186
+ self,
187
+ index_array: np.ndarray | None = None,
188
+ window_size: int = 0,
189
+ index: DatetimeIndex | None = None,
190
+ offset: BaseOffset | None = None,
191
+ **kwargs,
192
+ ) -> None:
193
+ super().__init__(index_array, window_size, **kwargs)
194
+ if not isinstance(index, DatetimeIndex):
195
+ raise ValueError("index must be a DatetimeIndex.")
196
+ self.index = index
197
+ if not isinstance(offset, BaseOffset):
198
+ raise ValueError("offset must be a DateOffset-like object.")
199
+ self.offset = offset
200
+
201
+ @Appender(get_window_bounds_doc)
202
+ def get_window_bounds(
203
+ self,
204
+ num_values: int = 0,
205
+ min_periods: int | None = None,
206
+ center: bool | None = None,
207
+ closed: str | None = None,
208
+ step: int | None = None,
209
+ ) -> tuple[np.ndarray, np.ndarray]:
210
+ if step is not None:
211
+ raise NotImplementedError("step not implemented for variable offset window")
212
+ if num_values <= 0:
213
+ return np.empty(0, dtype="int64"), np.empty(0, dtype="int64")
214
+
215
+ # if windows is variable, default is 'right', otherwise default is 'both'
216
+ if closed is None:
217
+ closed = "right" if self.index is not None else "both"
218
+
219
+ right_closed = closed in ["right", "both"]
220
+ left_closed = closed in ["left", "both"]
221
+
222
+ if self.index[num_values - 1] < self.index[0]:
223
+ index_growth_sign = -1
224
+ else:
225
+ index_growth_sign = 1
226
+ offset_diff = index_growth_sign * self.offset
227
+
228
+ start = np.empty(num_values, dtype="int64")
229
+ start.fill(-1)
230
+ end = np.empty(num_values, dtype="int64")
231
+ end.fill(-1)
232
+
233
+ start[0] = 0
234
+
235
+ # right endpoint is closed
236
+ if right_closed:
237
+ end[0] = 1
238
+ # right endpoint is open
239
+ else:
240
+ end[0] = 0
241
+
242
+ zero = timedelta(0)
243
+ # start is start of slice interval (including)
244
+ # end is end of slice interval (not including)
245
+ for i in range(1, num_values):
246
+ end_bound = self.index[i]
247
+ start_bound = end_bound - offset_diff
248
+
249
+ # left endpoint is closed
250
+ if left_closed:
251
+ start_bound -= Nano(1)
252
+
253
+ # advance the start bound until we are
254
+ # within the constraint
255
+ start[i] = i
256
+ for j in range(start[i - 1], i):
257
+ start_diff = (self.index[j] - start_bound) * index_growth_sign
258
+ if start_diff > zero:
259
+ start[i] = j
260
+ break
261
+
262
+ # end bound is previous end
263
+ # or current index
264
+ end_diff = (self.index[end[i - 1]] - end_bound) * index_growth_sign
265
+ if end_diff == zero and not right_closed:
266
+ end[i] = end[i - 1] + 1
267
+ elif end_diff <= zero:
268
+ end[i] = i + 1
269
+ else:
270
+ end[i] = end[i - 1]
271
+
272
+ # right endpoint is open
273
+ if not right_closed:
274
+ end[i] -= 1
275
+
276
+ return start, end
277
+
278
+
279
+ class ExpandingIndexer(BaseIndexer):
280
+ """Calculate expanding window bounds, mimicking df.expanding()"""
281
+
282
+ @Appender(get_window_bounds_doc)
283
+ def get_window_bounds(
284
+ self,
285
+ num_values: int = 0,
286
+ min_periods: int | None = None,
287
+ center: bool | None = None,
288
+ closed: str | None = None,
289
+ step: int | None = None,
290
+ ) -> tuple[np.ndarray, np.ndarray]:
291
+ return (
292
+ np.zeros(num_values, dtype=np.int64),
293
+ np.arange(1, num_values + 1, dtype=np.int64),
294
+ )
295
+
296
+
297
+ class FixedForwardWindowIndexer(BaseIndexer):
298
+ """
299
+ Creates window boundaries for fixed-length windows that include the current row.
300
+
301
+ Examples
302
+ --------
303
+ >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
304
+ >>> df
305
+ B
306
+ 0 0.0
307
+ 1 1.0
308
+ 2 2.0
309
+ 3 NaN
310
+ 4 4.0
311
+
312
+ >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
313
+ >>> df.rolling(window=indexer, min_periods=1).sum()
314
+ B
315
+ 0 1.0
316
+ 1 3.0
317
+ 2 2.0
318
+ 3 4.0
319
+ 4 4.0
320
+ """
321
+
322
+ @Appender(get_window_bounds_doc)
323
+ def get_window_bounds(
324
+ self,
325
+ num_values: int = 0,
326
+ min_periods: int | None = None,
327
+ center: bool | None = None,
328
+ closed: str | None = None,
329
+ step: int | None = None,
330
+ ) -> tuple[np.ndarray, np.ndarray]:
331
+ if center:
332
+ raise ValueError("Forward-looking windows can't have center=True")
333
+ if closed is not None:
334
+ raise ValueError(
335
+ "Forward-looking windows don't support setting the closed argument"
336
+ )
337
+ if step is None:
338
+ step = 1
339
+
340
+ start = np.arange(0, num_values, step, dtype="int64")
341
+ end = start + self.window_size
342
+ if self.window_size:
343
+ end = np.clip(end, 0, num_values)
344
+
345
+ return start, end
346
+
347
+
348
+ class GroupbyIndexer(BaseIndexer):
349
+ """Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()"""
350
+
351
+ def __init__(
352
+ self,
353
+ index_array: np.ndarray | None = None,
354
+ window_size: int | BaseIndexer = 0,
355
+ groupby_indices: dict | None = None,
356
+ window_indexer: type[BaseIndexer] = BaseIndexer,
357
+ indexer_kwargs: dict | None = None,
358
+ **kwargs,
359
+ ) -> None:
360
+ """
361
+ Parameters
362
+ ----------
363
+ index_array : np.ndarray or None
364
+ np.ndarray of the index of the original object that we are performing
365
+ a chained groupby operation over. This index has been pre-sorted relative to
366
+ the groups
367
+ window_size : int or BaseIndexer
368
+ window size during the windowing operation
369
+ groupby_indices : dict or None
370
+ dict of {group label: [positional index of rows belonging to the group]}
371
+ window_indexer : BaseIndexer
372
+ BaseIndexer class determining the start and end bounds of each group
373
+ indexer_kwargs : dict or None
374
+ Custom kwargs to be passed to window_indexer
375
+ **kwargs :
376
+ keyword arguments that will be available when get_window_bounds is called
377
+ """
378
+ self.groupby_indices = groupby_indices or {}
379
+ self.window_indexer = window_indexer
380
+ self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {}
381
+ super().__init__(
382
+ index_array=index_array,
383
+ window_size=self.indexer_kwargs.pop("window_size", window_size),
384
+ **kwargs,
385
+ )
386
+
387
+ @Appender(get_window_bounds_doc)
388
+ def get_window_bounds(
389
+ self,
390
+ num_values: int = 0,
391
+ min_periods: int | None = None,
392
+ center: bool | None = None,
393
+ closed: str | None = None,
394
+ step: int | None = None,
395
+ ) -> tuple[np.ndarray, np.ndarray]:
396
+ # 1) For each group, get the indices that belong to the group
397
+ # 2) Use the indices to calculate the start & end bounds of the window
398
+ # 3) Append the window bounds in group order
399
+ start_arrays = []
400
+ end_arrays = []
401
+ window_indices_start = 0
402
+ for key, indices in self.groupby_indices.items():
403
+ index_array: np.ndarray | None
404
+
405
+ if self.index_array is not None:
406
+ index_array = self.index_array.take(ensure_platform_int(indices))
407
+ else:
408
+ index_array = self.index_array
409
+ indexer = self.window_indexer(
410
+ index_array=index_array,
411
+ window_size=self.window_size,
412
+ **self.indexer_kwargs,
413
+ )
414
+ start, end = indexer.get_window_bounds(
415
+ len(indices), min_periods, center, closed, step
416
+ )
417
+ start = start.astype(np.int64)
418
+ end = end.astype(np.int64)
419
+ assert len(start) == len(
420
+ end
421
+ ), "these should be equal in length from get_window_bounds"
422
+ # Cannot use groupby_indices as they might not be monotonic with the object
423
+ # we're rolling over
424
+ window_indices = np.arange(
425
+ window_indices_start, window_indices_start + len(indices)
426
+ )
427
+ window_indices_start += len(indices)
428
+ # Extend as we'll be slicing window like [start, end)
429
+ window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype(
430
+ np.int64, copy=False
431
+ )
432
+ start_arrays.append(window_indices.take(ensure_platform_int(start)))
433
+ end_arrays.append(window_indices.take(ensure_platform_int(end)))
434
+ if len(start_arrays) == 0:
435
+ return np.array([], dtype=np.int64), np.array([], dtype=np.int64)
436
+ start = np.concatenate(start_arrays)
437
+ end = np.concatenate(end_arrays)
438
+ return start, end
439
+
440
+
441
+ class ExponentialMovingWindowIndexer(BaseIndexer):
442
+ """Calculate ewm window bounds (the entire window)"""
443
+
444
+ @Appender(get_window_bounds_doc)
445
+ def get_window_bounds(
446
+ self,
447
+ num_values: int = 0,
448
+ min_periods: int | None = None,
449
+ center: bool | None = None,
450
+ closed: str | None = None,
451
+ step: int | None = None,
452
+ ) -> tuple[np.ndarray, np.ndarray]:
453
+ return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64)
videollama2/lib/python3.10/site-packages/pandas/core/indexers/utils.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Low-dependency indexing utilities.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ )
10
+
11
+ import numpy as np
12
+
13
+ from pandas._libs import lib
14
+
15
+ from pandas.core.dtypes.common import (
16
+ is_array_like,
17
+ is_bool_dtype,
18
+ is_integer,
19
+ is_integer_dtype,
20
+ is_list_like,
21
+ )
22
+ from pandas.core.dtypes.dtypes import ExtensionDtype
23
+ from pandas.core.dtypes.generic import (
24
+ ABCIndex,
25
+ ABCSeries,
26
+ )
27
+
28
+ if TYPE_CHECKING:
29
+ from pandas._typing import AnyArrayLike
30
+
31
+ from pandas.core.frame import DataFrame
32
+ from pandas.core.indexes.base import Index
33
+
34
+ # -----------------------------------------------------------
35
+ # Indexer Identification
36
+
37
+
38
+ def is_valid_positional_slice(slc: slice) -> bool:
39
+ """
40
+ Check if a slice object can be interpreted as a positional indexer.
41
+
42
+ Parameters
43
+ ----------
44
+ slc : slice
45
+
46
+ Returns
47
+ -------
48
+ bool
49
+
50
+ Notes
51
+ -----
52
+ A valid positional slice may also be interpreted as a label-based slice
53
+ depending on the index being sliced.
54
+ """
55
+ return (
56
+ lib.is_int_or_none(slc.start)
57
+ and lib.is_int_or_none(slc.stop)
58
+ and lib.is_int_or_none(slc.step)
59
+ )
60
+
61
+
62
+ def is_list_like_indexer(key) -> bool:
63
+ """
64
+ Check if we have a list-like indexer that is *not* a NamedTuple.
65
+
66
+ Parameters
67
+ ----------
68
+ key : object
69
+
70
+ Returns
71
+ -------
72
+ bool
73
+ """
74
+ # allow a list_like, but exclude NamedTuples which can be indexers
75
+ return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
76
+
77
+
78
+ def is_scalar_indexer(indexer, ndim: int) -> bool:
79
+ """
80
+ Return True if we are all scalar indexers.
81
+
82
+ Parameters
83
+ ----------
84
+ indexer : object
85
+ ndim : int
86
+ Number of dimensions in the object being indexed.
87
+
88
+ Returns
89
+ -------
90
+ bool
91
+ """
92
+ if ndim == 1 and is_integer(indexer):
93
+ # GH37748: allow indexer to be an integer for Series
94
+ return True
95
+ if isinstance(indexer, tuple) and len(indexer) == ndim:
96
+ return all(is_integer(x) for x in indexer)
97
+ return False
98
+
99
+
100
+ def is_empty_indexer(indexer) -> bool:
101
+ """
102
+ Check if we have an empty indexer.
103
+
104
+ Parameters
105
+ ----------
106
+ indexer : object
107
+
108
+ Returns
109
+ -------
110
+ bool
111
+ """
112
+ if is_list_like(indexer) and not len(indexer):
113
+ return True
114
+ if not isinstance(indexer, tuple):
115
+ indexer = (indexer,)
116
+ return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
117
+
118
+
119
+ # -----------------------------------------------------------
120
+ # Indexer Validation
121
+
122
+
123
+ def check_setitem_lengths(indexer, value, values) -> bool:
124
+ """
125
+ Validate that value and indexer are the same length.
126
+
127
+ An special-case is allowed for when the indexer is a boolean array
128
+ and the number of true values equals the length of ``value``. In
129
+ this case, no exception is raised.
130
+
131
+ Parameters
132
+ ----------
133
+ indexer : sequence
134
+ Key for the setitem.
135
+ value : array-like
136
+ Value for the setitem.
137
+ values : array-like
138
+ Values being set into.
139
+
140
+ Returns
141
+ -------
142
+ bool
143
+ Whether this is an empty listlike setting which is a no-op.
144
+
145
+ Raises
146
+ ------
147
+ ValueError
148
+ When the indexer is an ndarray or list and the lengths don't match.
149
+ """
150
+ no_op = False
151
+
152
+ if isinstance(indexer, (np.ndarray, list)):
153
+ # We can ignore other listlikes because they are either
154
+ # a) not necessarily 1-D indexers, e.g. tuple
155
+ # b) boolean indexers e.g. BoolArray
156
+ if is_list_like(value):
157
+ if len(indexer) != len(value) and values.ndim == 1:
158
+ # boolean with truth values == len of the value is ok too
159
+ if isinstance(indexer, list):
160
+ indexer = np.array(indexer)
161
+ if not (
162
+ isinstance(indexer, np.ndarray)
163
+ and indexer.dtype == np.bool_
164
+ and indexer.sum() == len(value)
165
+ ):
166
+ raise ValueError(
167
+ "cannot set using a list-like indexer "
168
+ "with a different length than the value"
169
+ )
170
+ if not len(indexer):
171
+ no_op = True
172
+
173
+ elif isinstance(indexer, slice):
174
+ if is_list_like(value):
175
+ if len(value) != length_of_indexer(indexer, values) and values.ndim == 1:
176
+ # In case of two dimensional value is used row-wise and broadcasted
177
+ raise ValueError(
178
+ "cannot set using a slice indexer with a "
179
+ "different length than the value"
180
+ )
181
+ if not len(value):
182
+ no_op = True
183
+
184
+ return no_op
185
+
186
+
187
+ def validate_indices(indices: np.ndarray, n: int) -> None:
188
+ """
189
+ Perform bounds-checking for an indexer.
190
+
191
+ -1 is allowed for indicating missing values.
192
+
193
+ Parameters
194
+ ----------
195
+ indices : ndarray
196
+ n : int
197
+ Length of the array being indexed.
198
+
199
+ Raises
200
+ ------
201
+ ValueError
202
+
203
+ Examples
204
+ --------
205
+ >>> validate_indices(np.array([1, 2]), 3) # OK
206
+
207
+ >>> validate_indices(np.array([1, -2]), 3)
208
+ Traceback (most recent call last):
209
+ ...
210
+ ValueError: negative dimensions are not allowed
211
+
212
+ >>> validate_indices(np.array([1, 2, 3]), 3)
213
+ Traceback (most recent call last):
214
+ ...
215
+ IndexError: indices are out-of-bounds
216
+
217
+ >>> validate_indices(np.array([-1, -1]), 0) # OK
218
+
219
+ >>> validate_indices(np.array([0, 1]), 0)
220
+ Traceback (most recent call last):
221
+ ...
222
+ IndexError: indices are out-of-bounds
223
+ """
224
+ if len(indices):
225
+ min_idx = indices.min()
226
+ if min_idx < -1:
227
+ msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
228
+ raise ValueError(msg)
229
+
230
+ max_idx = indices.max()
231
+ if max_idx >= n:
232
+ raise IndexError("indices are out-of-bounds")
233
+
234
+
235
+ # -----------------------------------------------------------
236
+ # Indexer Conversion
237
+
238
+
239
+ def maybe_convert_indices(indices, n: int, verify: bool = True) -> np.ndarray:
240
+ """
241
+ Attempt to convert indices into valid, positive indices.
242
+
243
+ If we have negative indices, translate to positive here.
244
+ If we have indices that are out-of-bounds, raise an IndexError.
245
+
246
+ Parameters
247
+ ----------
248
+ indices : array-like
249
+ Array of indices that we are to convert.
250
+ n : int
251
+ Number of elements in the array that we are indexing.
252
+ verify : bool, default True
253
+ Check that all entries are between 0 and n - 1, inclusive.
254
+
255
+ Returns
256
+ -------
257
+ array-like
258
+ An array-like of positive indices that correspond to the ones
259
+ that were passed in initially to this function.
260
+
261
+ Raises
262
+ ------
263
+ IndexError
264
+ One of the converted indices either exceeded the number of,
265
+ elements (specified by `n`), or was still negative.
266
+ """
267
+ if isinstance(indices, list):
268
+ indices = np.array(indices)
269
+ if len(indices) == 0:
270
+ # If `indices` is empty, np.array will return a float,
271
+ # and will cause indexing errors.
272
+ return np.empty(0, dtype=np.intp)
273
+
274
+ mask = indices < 0
275
+ if mask.any():
276
+ indices = indices.copy()
277
+ indices[mask] += n
278
+
279
+ if verify:
280
+ mask = (indices >= n) | (indices < 0)
281
+ if mask.any():
282
+ raise IndexError("indices are out-of-bounds")
283
+ return indices
284
+
285
+
286
+ # -----------------------------------------------------------
287
+ # Unsorted
288
+
289
+
290
+ def length_of_indexer(indexer, target=None) -> int:
291
+ """
292
+ Return the expected length of target[indexer]
293
+
294
+ Returns
295
+ -------
296
+ int
297
+ """
298
+ if target is not None and isinstance(indexer, slice):
299
+ target_len = len(target)
300
+ start = indexer.start
301
+ stop = indexer.stop
302
+ step = indexer.step
303
+ if start is None:
304
+ start = 0
305
+ elif start < 0:
306
+ start += target_len
307
+ if stop is None or stop > target_len:
308
+ stop = target_len
309
+ elif stop < 0:
310
+ stop += target_len
311
+ if step is None:
312
+ step = 1
313
+ elif step < 0:
314
+ start, stop = stop + 1, start + 1
315
+ step = -step
316
+ return (stop - start + step - 1) // step
317
+ elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)):
318
+ if isinstance(indexer, list):
319
+ indexer = np.array(indexer)
320
+
321
+ if indexer.dtype == bool:
322
+ # GH#25774
323
+ return indexer.sum()
324
+ return len(indexer)
325
+ elif isinstance(indexer, range):
326
+ return (indexer.stop - indexer.start) // indexer.step
327
+ elif not is_list_like_indexer(indexer):
328
+ return 1
329
+ raise AssertionError("cannot find the length of the indexer")
330
+
331
+
332
+ def disallow_ndim_indexing(result) -> None:
333
+ """
334
+ Helper function to disallow multi-dimensional indexing on 1D Series/Index.
335
+
336
+ GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
337
+ and keep an index, so we used to return ndarray, which was deprecated
338
+ in GH#30588.
339
+ """
340
+ if np.ndim(result) > 1:
341
+ raise ValueError(
342
+ "Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer "
343
+ "supported. Convert to a numpy array before indexing instead."
344
+ )
345
+
346
+
347
+ def unpack_1tuple(tup):
348
+ """
349
+ If we have a length-1 tuple/list that contains a slice, unpack to just
350
+ the slice.
351
+
352
+ Notes
353
+ -----
354
+ The list case is deprecated.
355
+ """
356
+ if len(tup) == 1 and isinstance(tup[0], slice):
357
+ # if we don't have a MultiIndex, we may still be able to handle
358
+ # a 1-tuple. see test_1tuple_without_multiindex
359
+
360
+ if isinstance(tup, list):
361
+ # GH#31299
362
+ raise ValueError(
363
+ "Indexing with a single-item list containing a "
364
+ "slice is not allowed. Pass a tuple instead.",
365
+ )
366
+
367
+ return tup[0]
368
+ return tup
369
+
370
+
371
+ def check_key_length(columns: Index, key, value: DataFrame) -> None:
372
+ """
373
+ Checks if a key used as indexer has the same length as the columns it is
374
+ associated with.
375
+
376
+ Parameters
377
+ ----------
378
+ columns : Index The columns of the DataFrame to index.
379
+ key : A list-like of keys to index with.
380
+ value : DataFrame The value to set for the keys.
381
+
382
+ Raises
383
+ ------
384
+ ValueError: If the length of key is not equal to the number of columns in value
385
+ or if the number of columns referenced by key is not equal to number
386
+ of columns.
387
+ """
388
+ if columns.is_unique:
389
+ if len(value.columns) != len(key):
390
+ raise ValueError("Columns must be same length as key")
391
+ else:
392
+ # Missing keys in columns are represented as -1
393
+ if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns):
394
+ raise ValueError("Columns must be same length as key")
395
+
396
+
397
+ def unpack_tuple_and_ellipses(item: tuple):
398
+ """
399
+ Possibly unpack arr[..., n] to arr[n]
400
+ """
401
+ if len(item) > 1:
402
+ # Note: we are assuming this indexing is being done on a 1D arraylike
403
+ if item[0] is Ellipsis:
404
+ item = item[1:]
405
+ elif item[-1] is Ellipsis:
406
+ item = item[:-1]
407
+
408
+ if len(item) > 1:
409
+ raise IndexError("too many indices for array.")
410
+
411
+ item = item[0]
412
+ return item
413
+
414
+
415
+ # -----------------------------------------------------------
416
+ # Public indexer validation
417
+
418
+
419
+ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
420
+ """
421
+ Check if `indexer` is a valid array indexer for `array`.
422
+
423
+ For a boolean mask, `array` and `indexer` are checked to have the same
424
+ length. The dtype is validated, and if it is an integer or boolean
425
+ ExtensionArray, it is checked if there are missing values present, and
426
+ it is converted to the appropriate numpy array. Other dtypes will raise
427
+ an error.
428
+
429
+ Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
430
+ through as is.
431
+
432
+ Parameters
433
+ ----------
434
+ array : array-like
435
+ The array that is being indexed (only used for the length).
436
+ indexer : array-like or list-like
437
+ The array-like that's used to index. List-like input that is not yet
438
+ a numpy array or an ExtensionArray is converted to one. Other input
439
+ types are passed through as is.
440
+
441
+ Returns
442
+ -------
443
+ numpy.ndarray
444
+ The validated indexer as a numpy array that can be used to index.
445
+
446
+ Raises
447
+ ------
448
+ IndexError
449
+ When the lengths don't match.
450
+ ValueError
451
+ When `indexer` cannot be converted to a numpy ndarray to index
452
+ (e.g. presence of missing values).
453
+
454
+ See Also
455
+ --------
456
+ api.types.is_bool_dtype : Check if `key` is of boolean dtype.
457
+
458
+ Examples
459
+ --------
460
+ When checking a boolean mask, a boolean ndarray is returned when the
461
+ arguments are all valid.
462
+
463
+ >>> mask = pd.array([True, False])
464
+ >>> arr = pd.array([1, 2])
465
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
466
+ array([ True, False])
467
+
468
+ An IndexError is raised when the lengths don't match.
469
+
470
+ >>> mask = pd.array([True, False, True])
471
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
472
+ Traceback (most recent call last):
473
+ ...
474
+ IndexError: Boolean index has wrong length: 3 instead of 2.
475
+
476
+ NA values in a boolean array are treated as False.
477
+
478
+ >>> mask = pd.array([True, pd.NA])
479
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
480
+ array([ True, False])
481
+
482
+ A numpy boolean mask will get passed through (if the length is correct):
483
+
484
+ >>> mask = np.array([True, False])
485
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
486
+ array([ True, False])
487
+
488
+ Similarly for integer indexers, an integer ndarray is returned when it is
489
+ a valid indexer, otherwise an error is (for integer indexers, a matching
490
+ length is not required):
491
+
492
+ >>> indexer = pd.array([0, 2], dtype="Int64")
493
+ >>> arr = pd.array([1, 2, 3])
494
+ >>> pd.api.indexers.check_array_indexer(arr, indexer)
495
+ array([0, 2])
496
+
497
+ >>> indexer = pd.array([0, pd.NA], dtype="Int64")
498
+ >>> pd.api.indexers.check_array_indexer(arr, indexer)
499
+ Traceback (most recent call last):
500
+ ...
501
+ ValueError: Cannot index with an integer indexer containing NA values
502
+
503
+ For non-integer/boolean dtypes, an appropriate error is raised:
504
+
505
+ >>> indexer = np.array([0., 2.], dtype="float64")
506
+ >>> pd.api.indexers.check_array_indexer(arr, indexer)
507
+ Traceback (most recent call last):
508
+ ...
509
+ IndexError: arrays used as indices must be of integer or boolean type
510
+ """
511
+ from pandas.core.construction import array as pd_array
512
+
513
+ # whatever is not an array-like is returned as-is (possible valid array
514
+ # indexers that are not array-like: integer, slice, Ellipsis, None)
515
+ # In this context, tuples are not considered as array-like, as they have
516
+ # a specific meaning in indexing (multi-dimensional indexing)
517
+ if is_list_like(indexer):
518
+ if isinstance(indexer, tuple):
519
+ return indexer
520
+ else:
521
+ return indexer
522
+
523
+ # convert list-likes to array
524
+ if not is_array_like(indexer):
525
+ indexer = pd_array(indexer)
526
+ if len(indexer) == 0:
527
+ # empty list is converted to float array by pd.array
528
+ indexer = np.array([], dtype=np.intp)
529
+
530
+ dtype = indexer.dtype
531
+ if is_bool_dtype(dtype):
532
+ if isinstance(dtype, ExtensionDtype):
533
+ indexer = indexer.to_numpy(dtype=bool, na_value=False)
534
+ else:
535
+ indexer = np.asarray(indexer, dtype=bool)
536
+
537
+ # GH26658
538
+ if len(indexer) != len(array):
539
+ raise IndexError(
540
+ f"Boolean index has wrong length: "
541
+ f"{len(indexer)} instead of {len(array)}"
542
+ )
543
+ elif is_integer_dtype(dtype):
544
+ try:
545
+ indexer = np.asarray(indexer, dtype=np.intp)
546
+ except ValueError as err:
547
+ raise ValueError(
548
+ "Cannot index with an integer indexer containing NA values"
549
+ ) from err
550
+ else:
551
+ raise IndexError("arrays used as indices must be of integer or boolean type")
552
+
553
+ return indexer
vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/read_stencil.cpython-310.pyc ADDED
Binary file (991 Bytes). View file
 
vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/sRGB_formats.cpython-310.pyc ADDED
Binary file (991 Bytes). View file
 
vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/blend_equation_advanced.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''OpenGL extension NV.blend_equation_advanced
2
+
3
+ This module customises the behaviour of the
4
+ OpenGL.raw.GLES2.NV.blend_equation_advanced to provide a more
5
+ Python-friendly API
6
+
7
+ The official definition of this extension is available here:
8
+ http://www.opengl.org/registry/specs/NV/blend_equation_advanced.txt
9
+ '''
10
+ from OpenGL import platform, constant, arrays
11
+ from OpenGL import extensions, wrapper
12
+ import ctypes
13
+ from OpenGL.raw.GLES2 import _types, _glgets
14
+ from OpenGL.raw.GLES2.NV.blend_equation_advanced import *
15
+ from OpenGL.raw.GLES2.NV.blend_equation_advanced import _EXTENSION_NAME
16
+
17
+ def glInitBlendEquationAdvancedNV():
18
+ '''Return boolean indicating whether this extension is available'''
19
+ from OpenGL import extensions
20
+ return extensions.hasGLExtension( _EXTENSION_NAME )
21
+
22
+
23
+ ### END AUTOGENERATED SECTION
vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/coverage_sample.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''OpenGL extension NV.coverage_sample
2
+
3
+ This module customises the behaviour of the
4
+ OpenGL.raw.GLES2.NV.coverage_sample to provide a more
5
+ Python-friendly API
6
+
7
+ The official definition of this extension is available here:
8
+ http://www.opengl.org/registry/specs/NV/coverage_sample.txt
9
+ '''
10
+ from OpenGL import platform, constant, arrays
11
+ from OpenGL import extensions, wrapper
12
+ import ctypes
13
+ from OpenGL.raw.GLES2 import _types, _glgets
14
+ from OpenGL.raw.GLES2.NV.coverage_sample import *
15
+ from OpenGL.raw.GLES2.NV.coverage_sample import _EXTENSION_NAME
16
+
17
+ def glInitCoverageSampleNV():
18
+ '''Return boolean indicating whether this extension is available'''
19
+ from OpenGL import extensions
20
+ return extensions.hasGLExtension( _EXTENSION_NAME )
21
+
22
+
23
+ ### END AUTOGENERATED SECTION
vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/draw_buffers.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''OpenGL extension NV.draw_buffers
2
+
3
+ This module customises the behaviour of the
4
+ OpenGL.raw.GLES2.NV.draw_buffers to provide a more
5
+ Python-friendly API
6
+
7
+ The official definition of this extension is available here:
8
+ http://www.opengl.org/registry/specs/NV/draw_buffers.txt
9
+ '''
10
+ from OpenGL import platform, constant, arrays
11
+ from OpenGL import extensions, wrapper
12
+ import ctypes
13
+ from OpenGL.raw.GLES2 import _types, _glgets
14
+ from OpenGL.raw.GLES2.NV.draw_buffers import *
15
+ from OpenGL.raw.GLES2.NV.draw_buffers import _EXTENSION_NAME
16
+
17
+ def glInitDrawBuffersNV():
18
+ '''Return boolean indicating whether this extension is available'''
19
+ from OpenGL import extensions
20
+ return extensions.hasGLExtension( _EXTENSION_NAME )
21
+
22
+ # INPUT glDrawBuffersNV.bufs size not checked against n
23
+ glDrawBuffersNV=wrapper.wrapper(glDrawBuffersNV).setInputArraySize(
24
+ 'bufs', None
25
+ )
26
+ ### END AUTOGENERATED SECTION
vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/explicit_attrib_location.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''OpenGL extension NV.explicit_attrib_location
2
+
3
+ This module customises the behaviour of the
4
+ OpenGL.raw.GLES2.NV.explicit_attrib_location to provide a more
5
+ Python-friendly API
6
+
7
+ The official definition of this extension is available here:
8
+ http://www.opengl.org/registry/specs/NV/explicit_attrib_location.txt
9
+ '''
10
+ from OpenGL import platform, constant, arrays
11
+ from OpenGL import extensions, wrapper
12
+ import ctypes
13
+ from OpenGL.raw.GLES2 import _types, _glgets
14
+ from OpenGL.raw.GLES2.NV.explicit_attrib_location import *
15
+ from OpenGL.raw.GLES2.NV.explicit_attrib_location import _EXTENSION_NAME
16
+
17
+ def glInitExplicitAttribLocationNV():
18
+ '''Return boolean indicating whether this extension is available'''
19
+ from OpenGL import extensions
20
+ return extensions.hasGLExtension( _EXTENSION_NAME )
21
+
22
+
23
+ ### END AUTOGENERATED SECTION
vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/framebuffer_multisample.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''OpenGL extension NV.framebuffer_multisample
2
+
3
+ This module customises the behaviour of the
4
+ OpenGL.raw.GLES2.NV.framebuffer_multisample to provide a more
5
+ Python-friendly API
6
+
7
+ The official definition of this extension is available here:
8
+ http://www.opengl.org/registry/specs/NV/framebuffer_multisample.txt
9
+ '''
10
+ from OpenGL import platform, constant, arrays
11
+ from OpenGL import extensions, wrapper
12
+ import ctypes
13
+ from OpenGL.raw.GLES2 import _types, _glgets
14
+ from OpenGL.raw.GLES2.NV.framebuffer_multisample import *
15
+ from OpenGL.raw.GLES2.NV.framebuffer_multisample import _EXTENSION_NAME
16
+
17
+ def glInitFramebufferMultisampleNV():
18
+ '''Return boolean indicating whether this extension is available'''
19
+ from OpenGL import extensions
20
+ return extensions.hasGLExtension( _EXTENSION_NAME )
21
+
22
+
23
+ ### END AUTOGENERATED SECTION
vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/generate_mipmap_sRGB.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''OpenGL extension NV.generate_mipmap_sRGB
2
+
3
+ This module customises the behaviour of the
4
+ OpenGL.raw.GLES2.NV.generate_mipmap_sRGB to provide a more
5
+ Python-friendly API
6
+
7
+ The official definition of this extension is available here:
8
+ http://www.opengl.org/registry/specs/NV/generate_mipmap_sRGB.txt
9
+ '''
10
+ from OpenGL import platform, constant, arrays
11
+ from OpenGL import extensions, wrapper
12
+ import ctypes
13
+ from OpenGL.raw.GLES2 import _types, _glgets
14
+ from OpenGL.raw.GLES2.NV.generate_mipmap_sRGB import *
15
+ from OpenGL.raw.GLES2.NV.generate_mipmap_sRGB import _EXTENSION_NAME
16
+
17
+ def glInitGenerateMipmapSrgbNV():
18
+ '''Return boolean indicating whether this extension is available'''
19
+ from OpenGL import extensions
20
+ return extensions.hasGLExtension( _EXTENSION_NAME )
21
+
22
+
23
+ ### END AUTOGENERATED SECTION